content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
import torch.optim as optim
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
20471,
13,
18170,
13,
22462,
1330,
4808,
43,
793,
198,
6738,
28034,
13,
40085,
13,
40085,
7509,
1330,
30011,
7509,
198,
11748,
28034,
13,
... | 3.285714 | 49 |
"""Compute correlation between access history and next prefetch,
using load traces.
Need to run from above corr/ directory. If you still get an error,
try export PYTHONPATH=.
"""
import argparse
import time
from utils.load import get_open_function
from utils.load_trace import get_instructions
from utils.logging import log_progress
def gather_correlation_data(f, cd, pcd):
"""Wrapper function to gather correlation data
from each address in the load trace."""
# Count number of lines
nlines = 0
for _ in f:
nlines += 1
f.seek(0)
start_time = time.time()
for lnum, inst in enumerate(get_instructions(f)):
# Periodically log progress
log_progress(lnum, nlines, start_time, interval=50000)
# Add load to correlation tracker
addr = inst.addr
cd.add_addr(addr)
pcd.add_addr(addr)
# Print time to run
print('Time to run:', (time.time() - start_time) / 60, 'min')
class CorrelationData(object):
"""Track correlation between address histories (triggers) and the next prefetch address.
depth : how many prefetches to look ahead
- e.g. 1 = next prefetch, 2 = the second prefetch ahead, etc.
max_hist_len : number of prior global load addresses to consider as part of the trigger.
- Track all triggers of length 1 to max_hist_len (inclusive)
shift : number of bits to cut-off for tracking
- 0 : cache line temporal correlation
- 6 : page temporal correlation
"""
def compute_correlation(load_trace, depth, max_hist_len):
"""Main temporal correlation computation"""
correlation_data = CorrelationData(depth, max_hist_len)
page_correlation_data = CorrelationData(depth, max_hist_len, shift=6)
start = time.time()
l_open = get_open_function(load_trace)
with l_open(load_trace, mode='rt', encoding='utf-8') as f:
gather_correlation_data(f, correlation_data, page_correlation_data)
print_freqs(correlation_data.compute_freqs(), 'Cache Lines')
print_freqs(page_correlation_data.compute_freqs(), 'Pages')
print_freqs(correlation_data.compute_freqs(weighted=True), 'Weighted Cache Lines')
print_freqs(page_correlation_data.compute_freqs(weighted=True), 'Weighted Pages')
print('Time to run:', (time.time() - start) / 60, 'min')
if __name__ == '__main__':
args = get_argument_parser()
compute_correlation(args.load_trace, args.depth, args.max_hist_len)
| [
37811,
7293,
1133,
16096,
1022,
1895,
2106,
290,
1306,
7694,
7569,
11,
198,
3500,
3440,
20675,
13,
198,
198,
23037,
284,
1057,
422,
2029,
1162,
81,
14,
8619,
13,
1002,
345,
991,
651,
281,
4049,
11,
198,
28311,
10784,
350,
56,
4221,
... | 2.709817 | 927 |
import argparse
import os
from typing import Iterable, Tuple, List, Optional
from .module_definition import Module, Method, ParameterDocumentation
from .module_definition.exceptions import MockGeneratorError
from .util import CodeBuilder, TemplateFormatter, read_lines
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
19720,
1330,
40806,
540,
11,
309,
29291,
11,
7343,
11,
32233,
198,
198,
6738,
764,
21412,
62,
46758,
1330,
19937,
11,
11789,
11,
25139,
2357,
24941,
341,
198,
6738,
764,
21412,
62,
46758... | 3.625 | 88 |
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import os
import boto3
from botocore.exceptions import ClientError
from mock import Mock, patch
from moto import mock_ssm
from nose.tools import (
assert_equal,
raises
)
from stream_alert.shared.config import load_config
from stream_alert.threat_intel_downloader.exceptions import (
ThreatStreamCredsError,
ThreatStreamLambdaInvokeError,
ThreatStreamRequestsError
)
from stream_alert.threat_intel_downloader.main import ThreatStream
from tests.unit.stream_alert_apps.test_helpers import MockLambdaClient
from tests.unit.threat_intel_downloader.test_helpers import get_mock_context, put_mock_params
@patch('time.sleep', Mock())
class TestThreatStream(object):
"""Test class to test ThreatStream functionalities"""
# pylint: disable=protected-access
@patch('stream_alert.threat_intel_downloader.main.load_config',
Mock(return_value=load_config('tests/unit/conf/')))
def setup(self):
"""Setup TestThreatStream"""
# pylint: disable=attribute-defined-outside-init
context = get_mock_context(100000)
self.threatstream = ThreatStream(context.invoked_function_arn,
context.get_remaining_time_in_millis)
@staticmethod
@staticmethod
@patch('stream_alert.threat_intel_downloader.main.load_config',
Mock(return_value=load_config('tests/unit/conf/')))
def test_load_config(self):
"""ThreatStream - Load Config"""
arn = 'arn:aws:lambda:region:123456789012:function:name:development'
expected_config = {
'account_id': '123456789012',
'function_name': 'name',
'qualifier': 'development',
'region': 'region',
'enabled': True,
'excluded_sub_types': [
'bot_ip',
'brute_ip',
'scan_ip',
'spam_ip',
'tor_ip'
],
'ioc_filters': [
'crowdstrike',
'@airbnb.com'
],
'ioc_keys': [
'expiration_ts',
'itype',
'source',
'type',
'value'
],
'ioc_types': [
'domain',
'ip',
'md5'
],
'memory': '128',
'timeout': '60'
}
assert_equal(self.threatstream._load_config(arn), expected_config)
def test_process_data(self):
"""ThreatStream - Process Raw IOC Data"""
raw_data = [
self._get_fake_intel('malicious_domain.com', 'ioc_source'),
self._get_fake_intel('malicious_domain2.com', 'ioc_source2'),
# this will get filtered out
self._get_fake_intel('malicious_domain3.com', 'bad_source_ioc'),
]
self.threatstream._config['ioc_filters'] = {'ioc_source'}
processed_data = self.threatstream._process_data(raw_data)
expected_result = [
{
'value': 'malicious_domain.com',
'itype': 'c2_domain',
'source': 'ioc_source',
'type': 'domain',
'expiration_ts': 1512000062
},
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'ioc_source2',
'type': 'domain',
'expiration_ts': 1512000062
}
]
assert_equal(processed_data, expected_result)
@mock_ssm
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds(self):
"""ThreatStream - Load API creds from SSM"""
value = {'api_user': 'test_user', 'api_key': 'test_key'}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
assert_equal(self.threatstream.api_user, 'test_user')
assert_equal(self.threatstream.api_key, 'test_key')
@mock_ssm
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_cached(self):
"""ThreatStream - Load API creds from SSM, Cached"""
value = {'api_user': 'test_user', 'api_key': 'test_key'}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
assert_equal(self.threatstream.api_user, 'test_user')
assert_equal(self.threatstream.api_key, 'test_key')
self.threatstream._load_api_creds()
@mock_ssm
@raises(ClientError)
def test_load_api_creds_client_errors(self):
"""ThreatStream - Load API creds from SSM, ClientError"""
self.threatstream._load_api_creds()
@patch('boto3.client')
@raises(ThreatStreamCredsError)
def test_load_api_creds_empty_response(self, boto_mock):
"""ThreatStream - Load API creds from SSM, Empty Response"""
boto_mock.return_value.get_parameter.return_value = None
self.threatstream._load_api_creds()
@mock_ssm
@raises(ThreatStreamCredsError)
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_invalid_json(self):
"""ThreatStream - Load API creds from SSM with invalid JSON"""
boto3.client('ssm').put_parameter(
Name=ThreatStream.CRED_PARAMETER_NAME,
Value='invalid_value',
Type='SecureString',
Overwrite=True
)
self.threatstream._load_api_creds()
@mock_ssm
@raises(ThreatStreamCredsError)
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_no_api_key(self):
"""ThreatStream - Load API creds from SSM, No API Key"""
value = {'api_user': 'test_user', 'api_key': ''}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
@patch('stream_alert.threat_intel_downloader.main.datetime')
def test_epoch_now(self, date_mock):
"""ThreatStream - Epoch, Now"""
fake_date_now = datetime(year=2017, month=9, day=1)
date_mock.utcnow.return_value = fake_date_now
date_mock.utcfromtimestamp = datetime.utcfromtimestamp
expected_value = datetime(year=2017, month=11, day=30)
value = self.threatstream._epoch_time(None)
assert_equal(datetime.utcfromtimestamp(value), expected_value)
def test_epoch_from_time(self):
"""ThreatStream - Epoch, From Timestamp"""
expected_value = datetime(year=2017, month=11, day=30)
value = self.threatstream._epoch_time('2017-11-30T00:00:00.000Z')
assert_equal(datetime.utcfromtimestamp(value), expected_value)
@raises(ValueError)
def test_epoch_from_bad_time(self):
"""ThreatStream - Epoch, Error"""
self.threatstream._epoch_time('20171130T00:00:00.000Z')
def test_excluded_sub_types(self):
"""ThreatStream - Excluded Sub Types Property"""
expected_value = ['bot_ip', 'brute_ip', 'scan_ip', 'spam_ip', 'tor_ip']
assert_equal(self.threatstream.excluded_sub_types, expected_value)
def test_ioc_keys(self):
"""ThreatStream - IOC Keys Property"""
expected_value = ['expiration_ts', 'itype', 'source', 'type', 'value']
assert_equal(self.threatstream.ioc_keys, expected_value)
def test_ioc_sources(self):
"""ThreatStream - IOC Sources Property"""
expected_value = ['crowdstrike', '@airbnb.com']
assert_equal(self.threatstream.ioc_sources, expected_value)
def test_ioc_types(self):
"""ThreatStream - IOC Types Property"""
expected_value = ['domain', 'ip', 'md5']
assert_equal(self.threatstream.ioc_types, expected_value)
def test_threshold(self):
"""ThreatStream - Threshold Property"""
assert_equal(self.threatstream.threshold, 499000)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._finalize')
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect(self, get_mock, finalize_mock):
"""ThreatStream - Connection to ThreatStream.com"""
get_mock.return_value.json.return_value = self._get_http_response()
get_mock.return_value.status_code = 200
self.threatstream._config['ioc_filters'] = {'test_source'}
self.threatstream._connect('previous_url')
expected_intel = [
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'test_source',
'type': 'domain',
'expiration_ts': 1512000062
}
]
finalize_mock.assert_called_with(expected_intel, None)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._finalize')
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_next(self, get_mock, finalize_mock):
"""ThreatStream - Connection to ThreatStream.com, with Continuation"""
next_url = 'this_url'
get_mock.return_value.json.return_value = self._get_http_response(next_url)
get_mock.return_value.status_code = 200
self.threatstream._config['ioc_filters'] = {'test_source'}
self.threatstream._connect('previous_url')
expected_intel = [
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'test_source',
'type': 'domain',
'expiration_ts': 1512000062
}
]
finalize_mock.assert_called_with(expected_intel, next_url)
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_unauthed(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Unauthorized Error"""
get_mock.return_value.json.return_value = self._get_http_response()
get_mock.return_value.status_code = 401
self.threatstream._connect('previous_url')
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_retry_error(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Retry Error"""
get_mock.return_value.status_code = 500
self.threatstream._connect('previous_url')
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_unknown_error(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Unknown Error"""
get_mock.return_value.status_code = 404
self.threatstream._connect('previous_url')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._load_api_creds')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._connect')
def test_runner(self, connect_mock, _):
"""ThreatStream - Runner"""
expected_url = ('/api/v2/intelligence/?username=user&api_key=key&limit=1000&q='
'(status="active")+AND+(type="domain"+OR+type="ip"+OR+type="md5")+'
'AND+NOT+(itype="bot_ip"+OR+itype="brute_ip"+OR+itype="scan_ip"+'
'OR+itype="spam_ip"+OR+itype="tor_ip")')
self.threatstream.api_key = 'key'
self.threatstream.api_user = 'user'
self.threatstream.runner({'none': 'test'})
connect_mock.assert_called_with(expected_url)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._write_to_dynamodb_table')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._invoke_lambda_function')
def test_finalize(self, invoke_mock, write_mock):
"""ThreatStream - Finalize with Intel"""
intel = ['foo', 'bar']
self.threatstream._finalize(intel, None)
write_mock.assert_called_with(intel)
invoke_mock.assert_not_called()
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._write_to_dynamodb_table')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._invoke_lambda_function')
def test_finalize_next_url(self, invoke_mock, write_mock):
"""ThreatStream - Finalize with Next URL"""
intel = ['foo', 'bar']
self.threatstream._finalize(intel, 'next')
write_mock.assert_called_with(intel)
invoke_mock.assert_called_with('next')
@patch('boto3.resource')
def test_write_to_dynamodb_table(self, boto_mock):
"""ThreatStream - Write Intel to DynamoDB Table"""
intel = [self._get_fake_intel('malicious_domain.com', 'test_source')]
expected_intel = {
'expiration_ts': '2017-11-30T00:01:02.123Z',
'source': 'test_source',
'ioc_type': 'domain',
'sub_type': 'c2_domain',
'ioc_value': 'malicious_domain.com'
}
self.threatstream._write_to_dynamodb_table(intel)
batch_writer = boto_mock.return_value.Table.return_value.batch_writer.return_value
batch_writer.__enter__.return_value.put_item.assert_called_with(Item=expected_intel)
@patch('boto3.resource')
@raises(ClientError)
def test_write_to_dynamodb_table_error(self, boto_mock):
"""ThreatStream - Write Intel to DynamoDB Table, Error"""
intel = [self._get_fake_intel('malicious_domain.com', 'test_source')]
err = ClientError({'Error': {'Code': 404}}, 'PutItem')
batch_writer = boto_mock.return_value.Table.return_value.batch_writer.return_value
batch_writer.__enter__.return_value.put_item.side_effect = err
self.threatstream._write_to_dynamodb_table(intel)
@patch('boto3.client')
def test_invoke_lambda_function(self, boto_mock):
"""ThreatStream - Invoke Lambda Function"""
boto_mock.return_value = MockLambdaClient()
self.threatstream._invoke_lambda_function('next_token')
boto_mock.assert_called_once()
@patch('boto3.client', Mock(return_value=MockLambdaClient()))
@raises(ThreatStreamLambdaInvokeError)
def test_invoke_lambda_function_error(self):
"""ThreatStream - Invoke Lambda Function, Error"""
MockLambdaClient._raise_exception = True
self.threatstream._invoke_lambda_function('next_token')
| [
37811,
198,
15269,
2177,
12,
25579,
11,
35079,
3457,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.248115 | 6,630 |
"""
Implements the global range check used in the EN quality control
system.
"""
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
# Get temperature values from the profile.
t = p.t()
# Make the quality control decisions. This should
# return true if the temperature is outside -4 deg C
# and 40 deg C.
qc = (t.mask == False) & ((t.data < -4.0) | (t.data > 40.0))
return qc
| [
37811,
220,
198,
3546,
1154,
902,
262,
3298,
2837,
2198,
973,
287,
262,
12964,
3081,
1630,
220,
198,
10057,
13,
220,
198,
37811,
198,
198,
4299,
1332,
7,
79,
11,
10007,
2599,
198,
220,
220,
220,
37227,
220,
198,
220,
220,
220,
44743... | 3.025381 | 197 |
import logging
import threading
import time
module_logger = logging.getLogger(__name__)
__all__ = ["iterativeRun", "Pause", "PausableThread", "PausableThreadCallback"]
def iterativeRun(run_fn):
"""
A decorator for running functions repeatedly inside a PausableThread.
Allows one to pause and stop the thread while its repeatedly calling
the overriden run function.
Args:
run_fn (callable): the overridden run function from PausableThread
Returns:
callable: wrapped function
"""
return wrapper
class Pause(object):
"""
A context manager for pausing threads.
This starts by pausing an input thread or threads and unpausing them when
code inside block has been called.
This makes sure that when we unpause the thread when we're done
doing whatever task we needed.
Attributes:
thread (dict): A collection of threads to pause and unpause.
init_pause_status (dict): The initial state of the threads in
the thread attribute.
"""
def __init__(self, pausable_thread):
"""
Args:
pausable_thread (list, PausableThread): An instance, or list of
instances of PausableThread. If we pass ``None``, then this gets
dealt with properly down stream.
"""
self.thread = pausable_thread
if not isinstance(self.thread, dict):
# if the argument is not a dict, make it one
self.thread = {'thread': self.thread}
self.init_pause_status = {}
for name in list(self.thread.keys()):
if self.thread[name]:
self.init_pause_status[name] = self.thread[name].paused()
else:
self.init_pause_status[name] = None
# self.init_pause_status = {name: self.thread[name].paused() for name in self.thread.keys()}
def __enter__(self):
"""
Pause the thread in question, and make sure that whatever
functionality is being performing is actually stopped.
"""
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
if not self.init_pause_status[name]:
# and it is not already paused
t.pause()
else:
pass
# now make sure that they're actually paused.
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
while self.thread[name].running():
# wait until it is no longer running
time.sleep(0.001)
else:
pass
def __exit__(self, *args):
"""
Unpause the thread
"""
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
if not self.init_pause_status[name]:
self.thread[name].unpause()
else:
pass
class PausableThread(threading.Thread):
"""
A pausable stoppable thread.
It also has a running flag that can be used to determine if the process is
still running.
Attributes:
_running ():
name (str): name of thread, if any
logger (logging.getLogger): logging instance.
_lock (threading.Lock): thread's internal lock
_pause (threading.Event): setting and clearing this indicates to
pause or unpause thread.
_stop (threading.Event): setting this stops thread.
_running (threading.Event): setting this indicates thread is
currently executing "run" method.
"""
def __init__(self, name=None, **kwargs):
"""
create a pausable thread
Args:
name (str): name of thread.
**kwargs: To be passed to
"""
threading.Thread.__init__(self)
self.name = name
self.logger = logging.getLogger(__name__)
self._lock = threading.Lock()
# create events for the thread states
self._pause = threading.Event()
self._stop = threading.Event()
self._running = threading.Event()
def stop(self):
"""
Stop the thread from running all together. Make
sure to join this up with threading.Thread.join()
"""
self._stop.set()
class PausableThreadCallback(threading.Thread):
"""
A thread that runs the same callback over an over again, with some
predetermined wait time.
This thread can be paused, unpaused, and stopped in a thread-safe manner.
"""
| [
11748,
18931,
198,
11748,
4704,
278,
198,
11748,
640,
198,
198,
21412,
62,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
834,
439,
834,
796,
14631,
2676,
876,
10987,
1600,
366,
49991,
1600,
366,
47,
... | 2.32872 | 2,023 |
#!/bin/env python3
"""
If gene name is simply totally missing, use the gene ID for the gene name.
(Don't confuse this with the case in which there is actualy a gene name
embeded in the gene id, for which you would want to use the utility
'extract_name_embedded_in_gene_id.py'.)
"""
import argparse
import gtfez
# command line interface (making this a modulino)
if __name__ == '__main__':
main(parse_args())
| [
2,
48443,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
1532,
9779,
1438,
318,
2391,
6635,
4814,
11,
779,
262,
9779,
4522,
329,
262,
9779,
1438,
13,
198,
7,
3987,
470,
27531,
428,
351,
262,
1339,
287,
543,
612,
318,
4036,
88,
257,
... | 3.151515 | 132 |
from model.Control import Control
from model.Sensor import Sensor
from apscheduler.schedulers.background import BackgroundScheduler
# The "apscheduler." prefix is hard coded
scheduler = BackgroundScheduler({
'apscheduler.executors.default': {
'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
'max_workers': '60'
},
'apscheduler.executors.processpool': {
'type': 'processpool',
'max_workers': '60'
},
'apscheduler.job_defaults.max_instances': '60',
'apscheduler.timezone': 'UTC',
})
class Schedule():
"""
Kelas untuk set scheduler dan fungsi yang berkaitan
"""
def getListControl(self):
"""
Fungsi untuk mengecek secara berkala Control pada tiap Nodes ke Main Web
"""
print("memulai getlistcontrol")
try:
a = Control().read_controls()
if a!=None:#cek local ada nodes atau tidak
for control in a:
# print("Satuan control")
#Membuat obj control tiap baris pada table control
c = Control(id_arduino=control['id_arduino'], id_user=control['id_user'])
c.getControl() #Ngambil data terbaru
else:
print("KOSONG")
except Exception as e:
print(e)
| [
6738,
2746,
13,
15988,
1330,
6779,
198,
6738,
2746,
13,
47864,
1330,
35367,
198,
6738,
257,
862,
1740,
18173,
13,
1416,
704,
377,
364,
13,
25249,
1330,
25353,
50,
1740,
18173,
628,
198,
2,
383,
366,
499,
1416,
704,
18173,
526,
21231,
... | 2.1344 | 625 |
import sys
import os
size = os.get_terminal_size()
rows, cols = size.lines, size.columns
data_to_add = b'Hello, world!'
data = bytes((rows * cols) * b' ')
data = data_to_add + data[len(data_to_add) : ]
for row in range(rows):
start = row * cols
end = start + cols
write(str(data[start : end], 'utf-8'))
write('\n')
| [
11748,
25064,
198,
11748,
28686,
198,
198,
7857,
796,
28686,
13,
1136,
62,
23705,
282,
62,
7857,
3419,
198,
8516,
11,
951,
82,
796,
2546,
13,
6615,
11,
2546,
13,
28665,
82,
198,
198,
7890,
62,
1462,
62,
2860,
796,
275,
6,
15496,
1... | 2.41791 | 134 |
from __future__ import unicode_literals
from builtins import super
import numpy as np
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
# what this moddule does is displaying the price median price, minunum and maximum price and displaying the companies that offer the services
# it goes this way a model to display the activity and the prices
# what is the price of printing and a banner 1 meter squared on roland
# or what is the price of printing a banner 1 meter squared on flora
# what is the price of hosting a meeting for and hour in a hotel
# what is the price of repairing a phone nokia
from accounts.models import User
from company.models import Company
from location.models import Location
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
NEGOTIABLE = (
('yes', 'Yes'),
('no', 'No'),
)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
3170,
1040,
1330,
2208,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
957... | 3.659836 | 244 |
from __future__ import print_function
from os import path
from blazeweb.config import DefaultSettings
basedir = path.dirname(path.dirname(__file__))
app_package = path.basename(basedir)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
698,
1031,
413,
1765,
13,
11250,
1330,
15161,
26232,
198,
198,
3106,
343,
796,
3108,
13,
15908,
3672,
7,
6978,
13,
15908,
3672,
7,
834,
7753,... | 3.177419 | 62 |
import urllib2
| [
11748,
2956,
297,
571,
17,
628
] | 2.666667 | 6 |
lines = []
with open('Time.txt') as f:
lines = f.readlines()
f.close()
total_lines = len(lines)
TimeTakenInDays = dict()
DifficultLevelInEachDays = dict()
day = 1
for index in range(total_lines):
if(index!=0):
difficultLevel, Time = lines[index].split(" ")
TimeTakenInDays["Day "+str(day-1)] = int(Time)
DifficultLevelInEachDays["Day "+str(day-1)] = difficultLevel
day+=1
easyAverage = 0
# 20 minutes
easyMin = 1200
easyMax = 0
easyCount = 0
mediumAverage = 0
# 60 minutes
mediumMin = 3600
mediumMax = 0
mediumCount = 0
hardAverage = 0
# 2 hours
hardMin = 7200
hardMax = 0
hardCount = 0
for index in range(total_lines-1):
key = "Day {}".format(index+1)
if(DifficultLevelInEachDays[key]=="Easy"):
easyAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>easyMax):
easyMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<easyMin):
easyMin = TimeTakenInDays[key]
easyCount+=1
elif(DifficultLevelInEachDays[key]=="Medium"):
mediumAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>mediumMax):
mediumMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<mediumMin):
mediumMin = TimeTakenInDays[key]
mediumCount+=1
elif(DifficultLevelInEachDays[key]=="Hard"):
hardAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>hardMax):
hardMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<hardMin):
hardMin = TimeTakenInDays[key]
hardCount+=1
lines = []
with open("README.md", "r") as f:
lines = f.readlines()
f.close()
del lines[8:len(lines)]
with open("README.md", "w") as f:
for line in lines:
f.write(line)
f.close()
with open("README.md", "a") as f:
for index in range(total_lines-1):
key = "Day {}".format(index+1)
f.write("\n"+key+" ")
f.write(DifficultLevelInEachDays[key]+" ")
f.write(str(TimeTakenInDays[key]))
f.write("<br />")
f.write("<br /><br />")
if(easyCount!=0):
f.write("\nEasy Level\n")
f.write("\nMinimum Time Taken - "+str(easyMin))
f.write("\n\nMaximum Time Taken - "+str(easyMax))
f.write("\n\nAverage Time Taken - "+str(easyAverage//easyCount))
f.write("<br /><br />")
if(mediumCount!=0):
f.write("\n\nMedium Level\n")
f.write("\nMinimum Time Taken - "+str(mediumMin))
f.write("\n\nMaximum Time Taken - "+str(mediumMax))
f.write("\n\nAverage Time Taken - "+str(mediumAverage//mediumCount))
f.write("<br /><br />")
if(hardCount!=0):
f.write("\n\nHard Level\n")
f.write("\n\nMinimum Time Taken - "+str(hardMin))
f.write("\n\nMaximum Time Taken - "+str(hardMax))
f.write("\n\nAverage Time Taken - "+str(hardAverage//hardCount))
f.close()
| [
6615,
796,
17635,
198,
4480,
1280,
10786,
7575,
13,
14116,
11537,
355,
277,
25,
198,
220,
220,
220,
3951,
796,
277,
13,
961,
6615,
3419,
198,
69,
13,
19836,
3419,
198,
198,
23350,
62,
6615,
796,
18896,
7,
6615,
8,
198,
198,
7575,
... | 2.144412 | 1,378 |
from os import environ
from loguru import logger
from sentry_sdk import capture_exception
| [
6738,
28686,
1330,
551,
2268,
198,
198,
6738,
2604,
14717,
1330,
49706,
198,
6738,
1908,
563,
62,
21282,
74,
1330,
8006,
62,
1069,
4516,
628,
198
] | 3.576923 | 26 |
from datetime import datetime
from flask_login import current_user
from flask_wtf import FlaskForm
from sqlalchemy import asc, or_
from wtforms import StringField, FloatField, SelectField, PasswordField, TextAreaField, HiddenField
from wtforms.validators import DataRequired, Length, Regexp, InputRequired, ValidationError, length
from agil.Chef.utils import validation, verifDate, days_between, date_check, days_calc
from agil.Main.utils import FormatString
from agil.models.Absence import Absence
from agil.models.Carburant import Carburant
from agil.models.Citerne import Citerne
from agil.models.Conge import TypeConge, Conge
from agil.models.Employee import Employee
from agil.models.Groupe import Groupe
from agil.models.Lavage import Lavage
from agil.models.Pompe import Pompe
from agil.models.PompeCiterne import PompeCiterne
from agil.models.Role import Role
from agil.models.Station import Station
from agil.models.Voie import Voie
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
42903,
62,
38235,
1330,
1459,
62,
7220,
198,
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
44161,
282,
26599,
1330,
10570,
11,
393,
62,
198,
6738,
266,
83,
23914,
1330,
1... | 3.302721 | 294 |
from datetime import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext_lazy as _
from authority.managers import PermissionManager
class Permission(models.Model):
"""
A granular permission model, per-object permission in other words.
This kind of permission is associated with a user/group and an object
of any content type.
"""
codename = models.CharField(_('codename'), max_length=100)
content_type = models.ForeignKey(ContentType, related_name="row_permissions")
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(User, null=True, blank=True, related_name='granted_permissions')
group = models.ForeignKey(Group, null=True, blank=True)
creator = models.ForeignKey(User, null=True, blank=True, related_name='created_permissions')
approved = models.BooleanField(_('approved'), default=False, help_text=_("Designates whether the permission has been approved and treated as active. Unselect this instead of deleting permissions."))
date_requested = models.DateTimeField(_('date requested'), default=datetime.now)
date_approved = models.DateTimeField(_('date approved'), blank=True, null=True)
objects = PermissionManager()
def approve(self, creator):
"""
Approve granular permission request setting a Permission entry as
approved=True for a specific action from an user on an object instance.
"""
self.approved = True
self.creator = creator
self.save()
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
1330,
1... | 3.285448 | 536 |
import unittest
from classes import *
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
6097,
1330,
1635,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.69697 | 33 |
import time
import numpy as np
| [
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.444444 | 9 |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmcv.utils import print_log
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.pipelines import Compose
from torch.utils.data import Dataset
from mmocr.datasets.builder import build_loader
@DATASETS.register_module()
class BaseDataset(Dataset):
"""Custom dataset for text detection, text recognition, and their
downstream tasks.
1. The text detection annotation format is as follows:
The `annotations` field is optional for testing
(this is one line of anno_file, with line-json-str
converted to dict for visualizing only).
{
"file_name": "sample.jpg",
"height": 1080,
"width": 960,
"annotations":
[
{
"iscrowd": 0,
"category_id": 1,
"bbox": [357.0, 667.0, 804.0, 100.0],
"segmentation": [[361, 667, 710, 670,
72, 767, 357, 763]]
}
]
}
2. The two text recognition annotation formats are as follows:
The `x1,y1,x2,y2,x3,y3,x4,y4` field is used for online crop
augmentation during training.
format1: sample.jpg hello
format2: sample.jpg 20 20 100 20 100 40 20 40 hello
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
loader (dict): Dictionary to construct loader
to load annotation infos.
img_prefix (str, optional): Image prefix to generate full
image path.
test_mode (bool, optional): If set True, try...except will
be turned off in __getitem__.
"""
def _set_group_flag(self):
"""Set flag."""
self.flag = np.zeros(len(self), dtype=np.uint8)
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
def prepare_train_img(self, index):
"""Get training data and annotations from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.data_infos[index]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, img_info):
"""Get testing data from pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by
pipeline.
"""
return self.prepare_train_img(img_info)
def _log_error_index(self, index):
"""Logging data info of bad index."""
try:
data_info = self.data_infos[index]
img_prefix = self.img_prefix
print_log(f'Warning: skip broken file {data_info} '
f'with img_prefix {img_prefix}')
except Exception as e:
print_log(f'load index {index} with error {e}')
def _get_next_index(self, index):
"""Get next index from dataset."""
self._log_error_index(index)
index = (index + 1) % len(self)
return index
def __getitem__(self, index):
"""Get training/test data from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training/test data.
"""
if self.test_mode:
return self.prepare_test_img(index)
while True:
try:
data = self.prepare_train_img(index)
if data is None:
raise Exception('prepared train data empty')
break
except Exception as e:
print_log(f'prepare index {index} with error {e}')
index = self._get_next_index(index)
return data
def format_results(self, results, **kwargs):
"""Placeholder to format result to dataset-specific output."""
pass
def evaluate(self, results, metric=None, logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]
"""
raise NotImplementedError
| [
2,
15069,
357,
66,
8,
4946,
44,
5805,
397,
13,
1439,
2489,
10395,
13,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
8085,
33967,
13,
26791,
1330,
3601,
62,
6404,
198,
6738,
8085,
15255,
13,
19608,
292,
1039,
13,
38272,
1330,
360,
1... | 2.161365 | 2,169 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, List, Optional, Tuple
if TYPE_CHECKING:
from .file import HashFile
ADD = "add"
MODIFY = "modify"
DELETE = "delete"
UNCHANGED = "unchanged"
@dataclass
@dataclass
@dataclass
ROOT = ("",)
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
7343,
11,
32233,
11,
309,
29291,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
764,
7753,
1330,
21059,... | 2.669903 | 103 |
import io
import re
from setuptools import find_packages, setup
with io.open("int_rew/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r"__version__ = \"(.*?)\"", f.read()).group(1)
setup(
name="intrinsic_rewards",
version=version,
url="https://github.com/kngwyu/intrinsic_rewards",
project_urls={
"Code": "https://github.com/kngwyu/intrinsic_rewards",
"Issue tracker": "https://github.com/kngwyu/intrinsic_rewards/issues",
},
author="Yuji Kanagawa",
author_email="yuji.kngw.80s.revive@gmail.com",
description="A collection of DRL algorithms with intrinsic rewards",
packages=find_packages(),
python_requires=">=3.6",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| [
11748,
33245,
198,
11748,
302,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
4480,
33245,
13,
9654,
7203,
600,
62,
1809,
14,
834,
15003,
834,
13,
9078,
1600,
366,
17034,
1600,
21004,
2625,
40477,
23,
49... | 2.54 | 450 |
# Generated by Django 3.1.2 on 2020-10-17 14:58
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
940,
12,
1558,
1478,
25,
3365,
198,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
70,
271,
13,
9945,
13,
27530,
13,
25747,
198,
6738,
42625,
14208,
13,
9945,
1330,
1572... | 2.783333 | 60 |
import chorecore
| [
11748,
30569,
7295,
628
] | 4.5 | 4 |
# Desafio069 quantas pessoas tem mais de 18, quantos homens, quantas mulheres com menos de 20.
maiorDeIdade = int()
homens = int()
mulheresMenor = int()
while True:
idade = int(input('Qual a idade? '))
sexo = input('M para masculino, e F para feminino ').strip().lower()
if idade > 18:
maiorDeIdade += 1
if sexo == 'm':
homens += 1
if sexo == 'f' and idade > 20:
mulheresMenor += 1
saida = input('Deseja continuar? \033[2;32mS\033[m ou \033[2;32mN\033[m ')
if saida == 'n':
break
print(f'Tivemos cadastrados {maiorDeIdade} Maiores de 18 anos.')
print(f'Tivemos cadastrados {homens} homens.')
print(f'Tivemos cadastrados {mulheresMenor} mulheres com menos de 20 anos.') | [
2,
2935,
1878,
952,
3312,
24,
5554,
292,
279,
408,
78,
292,
2169,
285,
15152,
390,
1248,
11,
5554,
418,
3488,
641,
11,
5554,
292,
35971,
19079,
401,
1450,
418,
390,
1160,
13,
628,
198,
2611,
1504,
5005,
7390,
671,
796,
493,
3419,
... | 2.261538 | 325 |
import builtins
import sys
import unittest
import hebrew_python.hook as hepy
from io import StringIO
from contextlib import contextmanager
import re
# for debug the test:
true_stdout = sys.stdout
true_stderr = sys.stderr
DEBUG = False
if DEBUG:
from ddebug import dd
dd.add_output_folder(with_errors=False)
try:
import friendly_traceback
except ImportError:
friendly_traceback = None
@contextmanager
if __name__ == '__main__':
unittest.main()
| [
11748,
3170,
1040,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
198,
11748,
339,
11269,
62,
29412,
13,
25480,
355,
339,
9078,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
11748,
302,
198,
2,
... | 2.9375 | 160 |
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value) | [
201,
198,
2,
3406,
37491,
9598,
4891,
2134,
481,
307,
9113,
12931,
290,
1444,
355,
884,
25,
201,
198,
2,
26181,
796,
37491,
9598,
4891,
7,
42404,
8,
201,
198,
2,
5772,
62,
16,
796,
26181,
13,
1136,
7,
2539,
8,
201,
198,
2,
26181... | 2.711538 | 52 |
# python code for KMTNET data astrometry (specially CTIO data)
# read kmtnet_astrom.txt first to understand the order and process
# 2015.09.17 Changsu Choi
from astropy.io import ascii
import numpy as np
import os,sys
from astropy.io import fits
import astropy.units as u
import astropy.coordinates as coord
import astropy.units as u
import subprocess
import pp
#os.system('gethead kmtc.20150218.00503*.fits ra dec filter object exptime date-obs > info.txt')
info=ascii.read('info.txt')
addlist=info['col1']
ra=info['col2']
dec=info['col3']
filters=info['col4']
obj=info['col5']
exptime=info['col6']
dateobs=info['col7']
'''
def mefcr :
num=addlist[n][14:-5]
rad=coord.Angle(ra[n],unit=u.hour)
radd=rad.degree
decd=coord.Angle(dec[n],unit=u.deg)
decdd=decd.degree
# python
makemef='python kmtn_makemef.py '+num
resetcrval='python kmtn_resetcrval.py '+ num+'.fits -c '+str(radd)+','+str(decdd)
os.system(makemef)
os.system(resetcrval)
'''
for n in range(len(addlist)):
num=addlist[n][14:-5]
rad=coord.Angle(ra[n],unit=u.hour)
radd=rad.degree
decd=coord.Angle(dec[n],unit=u.deg)
decdd=decd.degree
# python
makemef='python kmtn_makemef.py '+num
resetcrval='python kmtn_resetcrval.py '+ num+'.fits -c '+str(radd)+','+str(decdd)
os.system(makemef)
os.system(resetcrval)
# sextractor
#sexcom= 'sex '+num+'.fits -c kmtnet.sex -CATALOG_NAME '+num+'.cat -HEADER_SUFFIX NONE -DETECT_THRESH 50.0 -ANALYSIS_THRESH 50.0 -SATUR_LEVEL 60000.0 -WEIGHT_TYPE MAP_WEIGHT -WEIGHT_IMAGE weight.fits'
#scampcom='scamp '+num+'.cat -c kmtnet.scamp -ASTREF_CATALOG 2MASS -POSITION_MAXERR 20.0 -CROSSID_RADIUS 5.0 -DISTORT_DEGREES 3 -PROJECTION_TYPE TPV -AHEADER_GLOBAL kmtnet_global_ctio.ahead -CHECKPLOT_TYPE NONE'
## sextractor and scamp
for n in range(len(addlist)):
sexscamp(addlist[n])
## final file making
for files in addlist : set4astrom(files)
'''
## header edition
for n in range(len(addlist)):
num=addlist[n][14:-5]
hdr=fits.getheader(addlist[n])
data=fits.getdata(num+'.fits')
hdr.fromTxtFile('006022.head')
newfile='a'+addlist[n]
fits.writeto(newfile,data,hdr,clobber=True)
'''
| [
2,
21015,
2438,
329,
509,
13752,
12884,
1366,
6468,
398,
11973,
357,
82,
2333,
16356,
9399,
1366,
8,
198,
2,
1100,
10571,
83,
3262,
62,
459,
398,
13,
14116,
717,
284,
1833,
262,
1502,
290,
1429,
198,
2,
1853,
13,
2931,
13,
1558,
2... | 2.301087 | 920 |
# coding=utf-8
"""Command line processing"""
import argparse
from sksurgeryspeech import __version__
from sksurgeryspeech.ui import sksurgeryspeech_demo
def main(args=None):
"""Entry point for scikit-surgeryspeech application"""
parser = argparse.ArgumentParser(description='scikit-surgeryspeech')
version_string = __version__
friendly_version_string = version_string if version_string else 'unknown'
parser.add_argument(
"--version",
action='version',
version='scikit-surgeryspeech version ' + friendly_version_string)
parser.add_argument(
"-c", "--config",
required=True,
type=str,
help="Configuration file")
args = parser.parse_args(args)
demo = sksurgeryspeech_demo.SpeechRecognitionDemo(args.config)
demo.run_demo()
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
37811,
21575,
1627,
7587,
37811,
628,
198,
11748,
1822,
29572,
198,
6738,
264,
591,
32650,
893,
431,
3055,
1330,
11593,
9641,
834,
198,
6738,
264,
591,
32650,
893,
431,
3055,
13,
9019,
1330,
264,... | 2.646302 | 311 |
import numpy as np
import pandas as pd
import torch
from torch import nn
from torch.utils.data import TensorDataset,DataLoader
X = np.random.normal(5,2,(10000,30))
Y = X@np.random.normal(1,2,30)
model = NeuralNetwork(
configuration= nn.Sequential(
nn.Linear(30,10),
nn.Sigmoid(),
nn.Linear(10,1)
),
loss_fn = torch.nn.modules.loss.L1Loss(),
# loss_fn=torch.nn.modules.loss.MSELoss(),
optimizer = torch.optim.SGD,
lr = 1e-2,
batch_size = 200,
epochs=100
).to('cpu')
model.fit(X,Y)
pred = model.predict(X)
Y_torch = torch.from_numpy(Y.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
1-sum((Y_torch-pred)**2)/sum((Y_torch-torch.mean(Y_torch))**2)
###
training_data = pd.read_csv("c:/users/jliv/downloads/mnist_train.csv")
testing_data = pd.read_csv("c:/users/jliv/downloads/mnist_test.csv")
cols = ['label']+['col_'+str(i) for i in range(len(training_data.columns)-1)]
training_data.columns = cols
testing_data.columns = cols
training_labels=training_data['label']
testing_labels=testing_data['label']
training_data.drop(['label'],inplace=True,axis=1)
testing_data.drop(['label'],inplace=True,axis=1)
training_data=np.array(training_data).reshape(59999,1,28,28)
testing_data=np.array(testing_data).reshape(9999,1,28,28)
import matplotlib.pyplot as plt
plt.imshow(training_data[0][0])
plt.show()
training_labels=np.array(training_labels)
testing_labels=np.array(testing_labels)
model = NeuralNetwork(
configuration= nn.Sequential(
nn.Conv2d(1, 1, kernel_size=4, stride=2, padding=2),
nn.ReLU(),
nn.AdaptiveAvgPool2d(16),
nn.Flatten(),
nn.Linear(16*16, 10),
nn.Sigmoid()
),
loss_fn = torch.nn.modules.loss.CrossEntropyLoss(),
optimizer = torch.optim.SGD,
lr = 1e-2,
batch_size = 200,
epochs=1
).to('cpu')
model.fit(training_data,training_labels)
pred=np.argmax(model.predict(training_data),axis=1)
Y_torch = torch.from_numpy(training_labels.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
np.mean(np.where(Y_torch==pred,1,0))
pred=np.argmax(model.predict(testing_data),axis=1)
Y_torch = torch.from_numpy(testing_labels.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
np.mean(np.where(Y_torch==pred,1,0))
| [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
309,
22854,
27354,
292,
316,
11,
6601,
17401,
198,
220,
220,
22... | 2.018608 | 1,236 |
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='mo-ta']/p[1]",
'price' : "//span[@class='item_price']",
'category' : "//div[@class='tieu-de']/h1/a",
'description' : "//div[@id='my-cls-ajax']/table//tr[2]/td[3]",
'images' : "//div[@id='picture']/img[@id='large_image']/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'kkfashion.vn'
allowed_domains = ['kkfashion.vn']
start_urls = ['http://kkfashion.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow = ['/shop-online+/[a-zA-Z0-9_-]+\.html',''], deny = ['Huong_Dan']), 'parse_item'),
Rule(LinkExtractor(allow = ['/[a-zA-Z0-9-_]+\.html'], deny = ['Huong_Dan']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
2,
11160,
7560,
416,
17301,
13,
9078,
13,
23520,
428,
1627,
611,
345,
787,
17613,
13,
198,
6738,
15881,
88,
13,
2777,
4157,
1330,
14330,
198,
6738,
15881,
88,
13,
2815,
365,
742,
974,
669,
1330,
7502,
11627,
40450,
198,
198,
27481,
... | 2.297362 | 417 |
from time import time
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
"""
Benchmark the speed of deserializing structs (such as Vector3 and Quaternion).
"""
if __name__ == "__main__":
o_id = 0
cmds = [{"$type": "teleport_object",
"position": {"x": 0, "y": 0, "z": 0},
"id": o_id},
{"$type": "rotate_object_to",
"rotation": {"w": 1, "x": 0, "y": 0, "z": 0},
"id": o_id}]
c = Controller()
c.start()
c.communicate([TDWUtils.create_empty_room(12, 12),
c.get_add_object("rh10", object_id=o_id)])
num_trials = 5000
t0 = time()
for i in range(num_trials):
c.communicate(cmds)
fps = (num_trials / (time() - t0))
print(f"FPS: {round(fps)}")
c.communicate({"$type": "terminate"})
| [
6738,
640,
1330,
640,
198,
6738,
41560,
86,
13,
36500,
1330,
22741,
198,
6738,
41560,
86,
13,
8671,
86,
62,
26791,
1330,
13320,
54,
18274,
4487,
628,
198,
37811,
198,
44199,
4102,
262,
2866,
286,
748,
48499,
2890,
2878,
82,
357,
10508... | 2.051597 | 407 |
import numpy as np
q=__
l=__
x = np.linspace(0,l,__)
M = q/2*(l*x-x**2)
V = q*(1/2-x)
print("Moment")
print(__)
print("Shear")
print(__) | [
11748,
299,
32152,
355,
45941,
198,
198,
80,
28,
834,
220,
198,
75,
28,
834,
198,
198,
87,
796,
45941,
13,
21602,
10223,
7,
15,
11,
75,
11,
834,
8,
198,
198,
44,
796,
10662,
14,
17,
9,
7,
75,
9,
87,
12,
87,
1174,
17,
8,
19... | 1.719512 | 82 |
from datetime import datetime, timedelta
from collections import defaultdict
from flask import render_template, request
from flask.ext.login import login_required
from ybk.models import Collection
from ybk.settings import get_conf
from .views import frontend
@frontend.route('/calendar/')
@login_required
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
198,
6738,
42903,
13,
2302,
13,
38235,
1330,
17594,
62,
35827,
198,
198,
6738,
331,
65,
... | 3.690476 | 84 |
for c in range(1, 50):
print(c, ":", fibonacci(c))
| [
198,
1640,
269,
287,
2837,
7,
16,
11,
2026,
2599,
198,
220,
220,
220,
3601,
7,
66,
11,
366,
25,
1600,
12900,
261,
44456,
7,
66,
4008,
198
] | 2 | 28 |
#
# This is the code for plotting the figures for RQ1. It is optimized towards plotting exactly those figures
# Use data_analysis.py for explorative data analysis
#
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from plotting_utils import load_data_from_folder, create_custom_palette, \
filter_data_and_plot_as_boxplots, filter_data_by_tag, store_figure_to_paper_folder
import matplotlib.pyplot as plt
if __name__ == "__main__":
main()
| [
2,
198,
2,
770,
318,
262,
2438,
329,
29353,
262,
5538,
329,
371,
48,
16,
13,
632,
318,
23392,
3371,
29353,
3446,
883,
5538,
198,
2,
5765,
1366,
62,
20930,
13,
9078,
329,
1193,
36478,
1366,
3781,
198,
2,
198,
11748,
2603,
29487,
80... | 3.042683 | 164 |
# -*- coding: utf-8 -*-
# Example code for telegrambot.py module
from telegram.ext import CommandHandler, MessageHandler, Filters
from django_telegrambot.apps import DjangoTelegramBot
import logging
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
17934,
2438,
329,
573,
30536,
13645,
13,
9078,
8265,
198,
6738,
573,
30536,
13,
2302,
1330,
9455,
25060,
11,
16000,
25060,
11,
7066,
1010,
198,
6738,
42625,
14208,
62... | 3.59292 | 113 |
import os
import dlib
from pathlib import Path
root_path = Path(__file__).parent
landmarks_model_path = os.path.join(root_path, 'shape_predictor_68_face_landmarks.dat')
landmarks_detector = LandmarksDetector(landmarks_model_path)
| [
11748,
28686,
201,
198,
11748,
288,
8019,
201,
198,
6738,
3108,
8019,
1330,
10644,
201,
198,
15763,
62,
6978,
796,
10644,
7,
834,
7753,
834,
737,
8000,
201,
198,
201,
198,
201,
198,
201,
198,
1044,
14306,
62,
19849,
62,
6978,
796,
2... | 2.688889 | 90 |
import os
import rnnSMAP
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import imp
imp.reload(rnnSMAP)
rnnSMAP.reload()
figTitleLst = ['Temporal Test', 'Spatial Test']
figNameLst = ['temporal', 'spatial']
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
for iFig in range(0, 2):
# iFig = 0
figTitle = figTitleLst[iFig]
if iFig == 0:
testName = 'CONUSv2f1'
yr = [2017]
if iFig == 1:
testName = 'CONUSv2f2'
yr = [2015]
trainName = 'CONUSv2f1'
out = trainName+'_y15_Forcing_dr60'
rootDB = rnnSMAP.kPath['DB_L3_NA']
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
caseStrLst = ['sigmaMC', 'sigmaX', 'sigma']
nCase = len(caseStrLst)
saveFolder = os.path.join(rnnSMAP.kPath['dirResult'], 'paperSigma')
#################################################
# test
predField = 'LSTM'
targetField = 'SMAP'
ds = rnnSMAP.classDB.DatasetPost(
rootDB=rootDB, subsetName=testName, yrLst=yr)
ds.readData(var='SMAP_AM', field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
statErr = ds.statCalError(predField='LSTM', targetField='SMAP')
statSigma = ds.statCalSigma(field='LSTM')
statConf = ds.statCalConf(predField='LSTM', targetField='SMAP')
statNorm = rnnSMAP.classPost.statNorm(
statSigma=statSigma, dataPred=ds.LSTM, dataTarget=ds.SMAP)
#################################################
# plot figure
fig = plt.figure(figsize=[12, 3])
gs = gridspec.GridSpec(
1, 3, width_ratios=[1, 1, 0.5], height_ratios=[1])
dataErr = getattr(statErr, 'ubRMSE')
dataSigma = getattr(statSigma, 'sigma')
cRange = [0, 0.06]
cRange2 = [0, 0.03]
# plot map RMSE
ax = fig.add_subplot(gs[0, 0])
grid = ds.data2grid(data=dataErr)
titleStr = 'ubRMSE of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
# plot map sigma
ax = fig.add_subplot(gs[0, 1])
grid = ds.data2grid(data=dataSigma)
titleStr = r'$\sigma_{comb}$'+' of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
fig.show()
# plot map sigma vs RMSE
ax = fig.add_subplot(gs[0, 2])
ax.set_aspect('equal', 'box')
y = dataErr
x = dataSigma
rnnSMAP.funPost.plotVS(
x, y, ax=ax, xlabel=r'$\sigma_{comb}$', ylabel='ubRMSE')
fig.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'map_'+figNameLst[iFig])
fig.savefig(saveFile, dpi=100)
fig.savefig(saveFile+'.eps')
print(scipy.stats.pearsonr(x, y))
#################################################
# plot sigmaX vs sigmaMC
plotSigma = 1
if plotSigma == 1:
fig = plt.figure(figsize=[12, 3])
gs = gridspec.GridSpec(
1, 3, width_ratios=[1, 1, 0.5], height_ratios=[1])
dataSigmaX = getattr(statSigma, 'sigmaX')
dataSigmaMC = getattr(statSigma, 'sigmaMC')
# plot map RMSE
ax = fig.add_subplot(gs[0, 0])
grid = ds.data2grid(data=dataSigmaX)
titleStr = r'$\sigma_{x}$ '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
# plot map sigma
ax = fig.add_subplot(gs[0, 1])
grid = ds.data2grid(data=dataSigmaMC)
titleStr = r'$\sigma_{MC}$'+' of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange2, title=titleStr)
# plot map sigma vs RMSE
ax = fig.add_subplot(gs[0, 2])
ax.set_aspect('equal', 'box')
y = dataSigmaMC
x = dataSigmaX
rnnSMAP.funPost.plotVS(
x, y, ax=ax, xlabel=r'$\sigma_{x}$', ylabel=r'$\sigma_{MC}$')
fig.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'map_'+figNameLst[iFig]+'_sigma')
fig.savefig(saveFile, dpi=100)
fig.savefig(saveFile+'.eps')
| [
11748,
28686,
198,
11748,
374,
20471,
12310,
2969,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
2164,
2340,
43106,
355,
50000,
43106,
198,
198,
11748,
848,... | 2.027938 | 2,076 |
import pandas as pd
import geopandas as gpd
import re
import textdistance
import numpy as np
import math
if __name__ == "__main__":
main() | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
30324,
392,
292,
355,
27809,
67,
198,
11748,
302,
198,
11748,
2420,
30246,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
... | 3 | 48 |
from setuptools import setup
setup(install_requires=open("requirements.txt").readlines())
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
17350,
62,
47911,
28,
9654,
7203,
8897,
18883,
13,
14116,
11074,
961,
6615,
28955,
198
] | 3.64 | 25 |
# This script handles the SIP configuration and generates the Makefiles.
#
# Copyright (c) 2019 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of SIP.
#
# This copy of SIP is licensed for use under the terms of the SIP License
# Agreement. See the file LICENSE for more details.
#
# This copy of SIP may also used under the terms of the GNU General Public
# License v2 or v3 as published by the Free Software Foundation which can be
# found in the files LICENSE-GPL2 and LICENSE-GPL3 included in this package.
#
# SIP is supplied WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import keyword
import sys
import os
import glob
import optparse
from distutils import sysconfig
try:
from importlib import invalidate_caches
except ImportError:
invalidate_caches = lambda: None
import siputils
# Initialise the globals.
sip_version = 0x041315
sip_version_str = "4.19.21"
py_version = sys.hexversion >> 8
py_platform = sys.platform
plat_py_site_dir = None
plat_py_inc_dir = None
plat_py_venv_inc_dir = None
plat_py_conf_inc_dir = None
plat_py_lib_dir = None
plat_sip_dir = None
plat_bin_dir = None
platform_specs = []
sip_bin_dir = ''
sip_inc_dir = ''
sip_root_dir = ''
sip_module_dir = ''
sip_module_dest_dir = ''
sip_sip_dir = ''
pyi_dir = ''
sysroot = ''
src_dir = os.path.dirname(os.path.abspath(__file__))
sip_module_name = None
build_platform = None
# Constants.
DEFAULT_MACOSX_ARCH = 'i386 ppc'
MACOSX_SDK_DIRS = ('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs', '/Developer/SDKs')
# The names of build macros extracted from the platform specific configuration
# files.
build_macro_names = [
"DEFINES", "CONFIG",
"CC",
"CFLAGS",
"CFLAGS_RELEASE", "CFLAGS_DEBUG",
"CFLAGS_CONSOLE", "CFLAGS_SHLIB", "CFLAGS_APP", "CFLAGS_THREAD",
"CFLAGS_MT", "CFLAGS_MT_DBG", "CFLAGS_MT_DLL", "CFLAGS_MT_DLLDBG",
"CFLAGS_EXCEPTIONS_ON", "CFLAGS_EXCEPTIONS_OFF",
"CFLAGS_RTTI_ON", "CFLAGS_RTTI_OFF",
"CFLAGS_STL_ON", "CFLAGS_STL_OFF",
"CFLAGS_WARN_ON", "CFLAGS_WARN_OFF",
"CHK_DIR_EXISTS", "COPY",
"CXX",
"CXXFLAGS",
"CXXFLAGS_RELEASE", "CXXFLAGS_DEBUG",
"CXXFLAGS_CONSOLE", "CXXFLAGS_SHLIB", "CXXFLAGS_APP", "CXXFLAGS_THREAD",
"CXXFLAGS_MT", "CXXFLAGS_MT_DBG", "CXXFLAGS_MT_DLL", "CXXFLAGS_MT_DLLDBG",
"CXXFLAGS_EXCEPTIONS_ON", "CXXFLAGS_EXCEPTIONS_OFF",
"CXXFLAGS_RTTI_ON", "CXXFLAGS_RTTI_OFF",
"CXXFLAGS_STL_ON", "CXXFLAGS_STL_OFF",
"CXXFLAGS_WARN_ON", "CXXFLAGS_WARN_OFF",
"DEL_FILE",
"EXTENSION_SHLIB", "EXTENSION_PLUGIN",
"INCDIR", "INCDIR_X11", "INCDIR_OPENGL",
"LIBS_CORE", "LIBS_GUI", "LIBS_NETWORK", "LIBS_OPENGL", "LIBS_WEBKIT",
"LINK", "LINK_SHLIB", "AIX_SHLIB", "LINK_SHLIB_CMD",
"LFLAGS", "LFLAGS_CONSOLE", "LFLAGS_CONSOLE_DLL", "LFLAGS_DEBUG",
"LFLAGS_DLL",
"LFLAGS_PLUGIN", "LFLAGS_RELEASE", "LFLAGS_SHLIB", "LFLAGS_SONAME",
"LFLAGS_THREAD", "LFLAGS_WINDOWS", "LFLAGS_WINDOWS_DLL", "LFLAGS_OPENGL",
"LIBDIR", "LIBDIR_X11", "LIBDIR_OPENGL",
"LIBS", "LIBS_CONSOLE", "LIBS_RT",
"LIBS_RTMT", "LIBS_THREAD", "LIBS_WINDOWS", "LIBS_X11",
"MAKEFILE_GENERATOR",
"MKDIR",
"RPATH", "LFLAGS_RPATH",
"AR", "RANLIB", "LIB", "STRIP"
]
def show_platforms():
"""Display the different platform/compilers.
"""
sys.stdout.write("""
The following platform/compiler configurations are supported:
""")
platform_specs.sort()
sys.stdout.write(siputils.format(", ".join(platform_specs), leftmargin=2))
sys.stdout.write("\n\n")
def show_macros():
"""Display the different build macros.
"""
sys.stdout.write("""
The following options may be used to adjust the compiler configuration:
""")
build_macro_names.sort()
sys.stdout.write(siputils.format(", ".join(build_macro_names), leftmargin=2))
sys.stdout.write("\n\n")
def set_build_platform():
""" Initialise the build platform. """
global build_platform
# Set the platform specific default specification.
platdefaults = {
"aix": "aix-xlc",
"bsd": "bsdi-g++",
"cygwin": "cygwin-g++",
"darwin": "macx-g++",
"dgux": "dgux-g++",
"freebsd": "freebsd-g++",
"gnu": "hurd-g++",
"hp-ux": "hpux-acc",
"irix": "irix-cc",
"linux": "linux-g++",
"lynxos": "lynxos-g++",
"netbsd": "netbsd-g++",
"openbsd": "openbsd-g++",
"openunix": "unixware-cc",
"osf1": "tru64-cxx",
"qnx": "qnx-g++",
"reliantunix": "reliant-cds",
"sco_sv": "sco-cc",
"sinix": "reliant-cds",
"sunos5": "solaris-cc",
"ultrix": "ultrix-g++",
"unix_sv": "unixware-g++",
"unixware": "unixware-cc",
"haiku1": "haiku-g++"
}
build_platform = "none"
if py_platform == "win32":
if py_version >= 0x030500:
build_platform = "win32-msvc2015"
elif py_version >= 0x030300:
build_platform = "win32-msvc2010"
elif py_version >= 0x020600:
build_platform = "win32-msvc2008"
elif py_version >= 0x020400:
build_platform = "win32-msvc.net"
else:
build_platform = "win32-msvc"
else:
for pd in list(platdefaults.keys()):
if py_platform[:len(pd)] == pd:
build_platform = platdefaults[pd]
break
def inform_user():
""" Tell the user the option values that are going to be used. """
if not opts.no_tools:
siputils.inform("The SIP code generator will be installed in %s." % sip_bin_dir)
siputils.inform("The sip.h header file will be installed in %s." % sip_inc_dir)
if not opts.no_module:
siputils.inform("The %s module will be installed in %s." % (sip_module_name, sip_module_dest_dir))
if opts.pyi:
siputils.inform("The sip.pyi stub file will be installed in %s." % pyi_dir)
if opts.static:
siputils.inform("The %s module will be built as a static library." % sip_module_name)
siputils.inform("The default directory to install .sip files in is %s." % sip_sip_dir)
if opts.use_qmake is None:
siputils.inform("The platform/compiler configuration is %s." % build_platform)
if opts.arch:
siputils.inform("MacOS/X binaries will be created for %s." % (", ".join(opts.arch.split())))
if opts.universal:
siputils.inform("MacOS/X universal binaries will be created using %s." % opts.universal)
if opts.deployment_target:
siputils.inform("MacOS/X deployment target is %s." % opts.deployment_target)
def set_platform_directories():
""" Initialise the global variables relating to platform-specific
directories.
"""
global plat_py_site_dir, plat_py_inc_dir, plat_py_venv_inc_dir
global plat_py_conf_inc_dir, plat_bin_dir, plat_py_lib_dir, plat_sip_dir
# We trust distutils for some stuff.
plat_py_site_dir = sysconfig.get_python_lib(plat_specific=1)
plat_py_inc_dir = sysconfig.get_python_inc()
plat_py_venv_inc_dir = sysconfig.get_python_inc(prefix=sys.prefix)
plat_py_conf_inc_dir = os.path.dirname(sysconfig.get_config_h_filename())
if sys.platform == "win32":
bin_dir = sys.exec_prefix
try:
# Python v3.3 and later.
base_prefix = sys.base_prefix
if sys.exec_prefix != sys.base_exec_prefix:
bin_dir += '\\Scripts'
except AttributeError:
try:
# virtualenv for Python v2.
base_prefix = sys.real_prefix
bin_dir += '\\Scripts'
except AttributeError:
# We can't detect the base prefix in Python v3 prior to v3.3.
base_prefix = sys.prefix
plat_py_lib_dir = base_prefix + "\\libs"
plat_bin_dir = bin_dir
plat_sip_dir = sys.prefix + "\\sip"
else:
lib_dir = sysconfig.get_python_lib(plat_specific=1, standard_lib=1)
plat_py_lib_dir = lib_dir + "/config"
plat_bin_dir = sys.exec_prefix + "/bin"
plat_sip_dir = sys.prefix + "/share/sip"
def create_config(module, template, macros):
"""Create the SIP configuration module so that it can be imported by build
scripts.
module is the module file name.
template is the template file name.
macros is the dictionary of build macros.
"""
siputils.inform("Creating %s..." % module)
content = {
"sip_config_args": sys.argv[1:],
"sip_version": sip_version,
"sip_version_str": sip_version_str,
"platform": build_platform,
"sip_bin": os.path.join(sip_bin_dir, "sip"),
"sip_inc_dir": sip_inc_dir,
"sip_root_dir": sip_root_dir,
"sip_module_dir": sip_module_dir,
"default_bin_dir": plat_bin_dir,
"default_mod_dir": plat_py_site_dir,
"default_sip_dir": sip_sip_dir,
"py_version": py_version,
"py_inc_dir": plat_py_inc_dir,
"py_conf_inc_dir": plat_py_conf_inc_dir,
"py_lib_dir": plat_py_lib_dir,
"universal": opts.universal,
"arch": opts.arch,
"deployment_target": opts.deployment_target,
"qt_framework": 0
}
siputils.create_config_module(module, template, content, macros)
def create_makefiles(macros):
"""Create the Makefiles.
macros is the dictionary of platform specific build macros.
"""
# Bootstrap. Make sure we get the right one.
sys.path.insert(0, os.path.curdir)
invalidate_caches()
import sipconfig
cfg = sipconfig.Configuration()
cfg.set_build_macros(macros)
all_installs = []
top_installs = []
gen_installs = []
subdirs = []
if not opts.no_tools:
subdirs.append('sipgen')
top_installs.append(
(["sipconfig.py", os.path.join(src_dir, "sipdistutils.py")],
cfg.sip_root_dir))
gen_installs.append(
(os.path.join(src_dir, "siplib", "sip.h"), cfg.sip_inc_dir))
if not opts.no_module:
subdirs.append('siplib')
all_installs.extend(top_installs)
all_installs.extend(gen_installs)
# The command to run to generate the dist-info directory.
mk_distinfo = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'mk_distinfo.py')
distinfo_dir = os.path.join(cfg.sip_module_dir,
'%s-%s.dist-info' % (sip_module_name.replace('.', '_'),
sip_version_str))
if opts.use_qmake:
run_mk_distinfo = '%s %s \\\"$(INSTALL_ROOT)\\\" %s installed.txt' % (
sys.executable, mk_distinfo, distinfo_dir)
sipconfig.inform("Creating top level .pro file...")
pro = open("sip.pro", "w")
pro.write("TEMPLATE = subdirs\n")
pro.write("SUBDIRS = %s\n" % " ".join(subdirs))
if top_installs:
# There will only be one element.
files, path = top_installs[0]
pro.write("\n")
pro.write("build_system.files = %s\n" % " ".join(files))
pro.write("build_system.path = %s\n" % quote(path))
pro.write("INSTALLS += build_system\n")
if opts.distinfo:
pro.write("\n")
pro.write("distinfo.extra = %s\n" % run_mk_distinfo)
pro.write("distinfo.path = %s\n" % quote(cfg.sip_module_dir))
pro.write("INSTALLS += distinfo\n")
pro.close()
else:
run_mk_distinfo = '%s %s "$(DESTDIR)" %s installed.txt' % (
sys.executable, mk_distinfo, distinfo_dir)
sipconfig.inform("Creating top level Makefile...")
# Note that mk_distinfo.py won't exist if we are building from the
# repository.
if opts.distinfo and os.path.isfile(mk_distinfo):
top_installs.append((run_mk_distinfo, None))
sipconfig.ParentMakefile(
configuration=cfg,
subdirs=subdirs,
installs=top_installs
).generate()
if opts.use_qmake:
sipconfig.inform("Creating sip code generator .pro file...")
pro = open(os.path.join("sipgen", "sipgen.pro"), "w")
pro.write("TEMPLATE = app\n")
pro.write("TARGET = sip\n")
pro.write("CONFIG -= qt app_bundle\n")
pro.write("CONFIG += warn_on exceptions_off console %s\n" % (
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("target.path = %s\n" % os.path.dirname(cfg.sip_bin))
pro.write("INSTALLS += target\n")
c_sources = get_sources("sipgen", "*.c")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(
[qmake_quote(s) for s in c_sources]))
headers = get_sources("sipgen", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(
[qmake_quote(h) for h in headers]))
if gen_installs:
# There will only be one element.
files, path = gen_installs[0]
pro.write("\n")
pro.write("sip_h.files = %s\n" % " ".join(files))
pro.write("sip_h.path = %s\n" % quote(path))
pro.write("INSTALLS += sip_h\n")
pro.close()
else:
sipconfig.inform("Creating sip code generator Makefile...")
sipconfig.ProgramMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "sipgen", "sipgen.sbf"),
dir="sipgen",
install_dir=os.path.dirname(cfg.sip_bin),
installs=gen_installs,
console=1,
warnings=1,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
).generate()
# The implied code generator installs.
if not opts.no_tools:
sip_dir, sip_exe = os.path.split(cfg.sip_bin)
if sys.platform == 'win32':
sip_exe += '.exe'
all_installs.append((sip_exe, sip_dir))
# The module installs.
module_installs=[]
if opts.pyi:
module_installs.append((os.path.join(src_dir, 'sip.pyi'), pyi_dir))
all_installs.extend(module_installs)
if not opts.no_module:
if sys.platform == 'win32':
mod = 'sip.lib' if opts.static else 'sip.pyd'
else:
mod = 'libsip.a' if opts.static else 'sip.so'
all_installs.append((mod, sip_module_dest_dir))
if opts.use_qmake:
sipconfig.inform("Creating sip module .pro file...")
pro = open(os.path.join("siplib", "siplib.pro"), "w")
pro.write("TEMPLATE = lib\n")
pro.write("TARGET = sip\n")
pro.write("CONFIG -= qt\n")
pro.write("CONFIG += warn_on exceptions_off %s %s\n" % (
("staticlib" if opts.static else "plugin plugin_bundle"),
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("INCLUDEPATH += %s\n" % cfg.py_inc_dir)
if cfg.py_conf_inc_dir != cfg.py_inc_dir:
pro.write("INCLUDEPATH += %s\n" % cfg.py_conf_inc_dir)
if sip_module_name != 'sip':
pro.write("\n")
pro.write('DEFINES += SIP_MODULE_NAME=%s\n' % sip_module_name)
base_name = sip_module_name.split('.')[-1]
if base_name != 'sip':
pro.write('DEFINES += SIP_MODULE_BASENAME=%s\n' % base_name)
if not opts.static:
# These only need to be correct for Windows.
debug_suffix = "_d" if opts.debug else ""
link_lib_dir = quote("-L" + cfg.py_lib_dir)
pro.write("""
win32 {
PY_MODULE = sip%s.pyd
PY_MODULE_SRC = $(DESTDIR_TARGET)
LIBS += %s
} else {
PY_MODULE = sip.so
macx {
PY_MODULE_SRC = $(TARGET).plugin/Contents/MacOS/$(TARGET)
QMAKE_LFLAGS += "-undefined dynamic_lookup"
} else {
PY_MODULE_SRC = $(TARGET)
}
}
QMAKE_POST_LINK = $(COPY_FILE) $$PY_MODULE_SRC $$PY_MODULE
target.CONFIG = no_check_exist
target.files = $$PY_MODULE
""" % (debug_suffix, link_lib_dir))
pro.write("\n")
pro.write("target.path = %s\n" % sip_module_dest_dir)
pro.write("INSTALLS += target\n")
if opts.pyi:
pro.write("\n")
pro.write("sip_pyi.files = sip.pyi\n")
pro.write("sip_pyi.path = %s\n" % pyi_dir)
pro.write("INSTALLS += sip_pyi\n")
c_sources = get_sources("siplib", "*.c")
cpp_sources = get_sources("siplib", "*.cpp")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(
[qmake_quote(s) for s in c_sources + cpp_sources]))
headers = get_sources("siplib", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(
[qmake_quote(h) for h in headers]))
pro.close()
else:
sipconfig.inform("Creating sip module Makefile...")
build_dir = os.getcwd()
makefile = sipconfig.ModuleMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "siplib", "siplib.sbf"),
dir="siplib",
install_dir=sip_module_dest_dir,
installs=module_installs,
console=1,
warnings=1,
static=opts.static,
debug=opts.debug,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
)
if sip_module_name != 'sip':
makefile.DEFINES.append('SIP_MODULE_NAME=%s' % sip_module_name)
base_name = sip_module_name.split('.')[-1]
if base_name != 'sip':
makefile.DEFINES.append('SIP_MODULE_BASENAME=%s' % base_name)
if src_dir != build_dir:
src_siplib_dir = os.path.join(src_dir, "siplib")
makefile.extra_include_dirs.append(src_siplib_dir)
makefile.extra_source_dirs.append(src_siplib_dir)
makefile.generate()
# Create the file containing all installed files.
if opts.distinfo:
installed = open('installed.txt', 'w')
for sources, dst in all_installs:
if not isinstance(sources, (list, tuple)):
sources = [sources]
for src in sources:
installed.write(
os.path.join(dst, os.path.basename(src)) + '\n')
installed.close()
def get_sources(sources_dir, ext):
""" Get the quoted files with the specified extension from a directory. """
return [quote(f) for f in glob.glob(os.path.join(src_dir, sources_dir, ext))]
def quote(path):
""" Return a path that is quoted if necessary. """
if ' ' in path:
path = '"' + path + '"'
return path
def qmake_quote(path):
""" Return a path quoted for qmake if it contains spaces. path is the
path.
"""
if ' ' in path:
path = '$$quote(%s)' % path
return path
# Look out for recursive definitions.
_extrapolating = []
def _get_configuration_value(config, name, default=None):
""" Get a configuration value while extrapolating. """
value = config.get(name)
if value is None:
if default is None:
siputils.error("Configuration file references non-existent name '%s'." % name)
return default
parts = value.split('%(', 1)
while len(parts) == 2:
prefix, tail = parts
parts = tail.split(')', 1)
if len(parts) != 2:
siputils.error("Configuration file contains unterminated extrapolated name '%s'." % tail)
xtra_name, suffix = parts
if xtra_name in _extrapolating:
siputils.error("Configuration file contains a recursive reference to '%s'." % xtra_name)
_extrapolating.append(xtra_name)
xtra_value = _get_configuration_value(config, xtra_name)
_extrapolating.pop()
value = prefix + xtra_value + suffix
parts = value.split('%(', 1)
return value
def update_from_configuration_file(config_file):
""" Update a number of globals from values read from a configuration file.
"""
siputils.inform("Reading configuration from %s..." % config_file)
config = {}
# Read the file into the dict.
cfg = open(config_file)
line_nr = 0
for l in cfg:
line_nr += 1
# Strip comments and blank lines.
l = l.split('#')[0].strip()
if l == '':
continue
parts = l.split('=', 1)
if len(parts) == 2:
name = parts[0].strip()
value = parts[1].strip()
else:
name = value = ''
if name == '' or value == '':
siputils.error("%s:%d: Invalid line." % (config_file, line_nr))
config[name] = value
last_name = name
cfg.close()
# Enforce the presets.
version = siputils.version_to_string(py_version).split('.')
config['py_major'] = version[0]
config['py_minor'] = version[1]
config['sysroot'] = sysroot
# Override the relevant values.
global py_platform, plat_py_conf_inc_dir, plat_py_inc_dir, plat_py_lib_dir
global sip_bin_dir, sip_inc_dir, sip_module_dir, sip_sip_dir
py_platform = _get_configuration_value(config, 'py_platform', py_platform)
plat_py_inc_dir = _get_configuration_value(config, 'py_inc_dir',
plat_py_inc_dir)
plat_py_lib_dir = _get_configuration_value(config, 'py_pylib_dir',
plat_py_lib_dir)
# The pyconfig.h directory defaults to the Python.h directory.
plat_py_conf_inc_dir = _get_configuration_value(config, 'py_conf_inc_dir',
plat_py_inc_dir)
sip_bin_dir = _get_configuration_value(config, 'sip_bin_dir', sip_bin_dir)
sip_module_dir = _get_configuration_value(config, 'sip_module_dir',
sip_module_dir)
# Note that this defaults to any 'py_inc_dir' specified in the
# configuration file.
sip_inc_dir = _get_configuration_value(config, 'sip_inc_dir',
plat_py_inc_dir)
# Note that this is only used when creating sipconfig.py.
sip_sip_dir = _get_configuration_value(config, 'sip_sip_dir', sip_sip_dir)
def create_optparser(sdk_dir):
"""Create the parser for the command line.
"""
p = optparse.OptionParser(usage="python %prog [opts] [macro=value] "
"[macro+=value]", version=sip_version_str)
# Note: we don't use %default to be compatible with Python 2.3.
p.add_option("-k", "--static", action="store_true", default=False,
dest="static", help="build the SIP module as a static library")
p.add_option("-p", "--platform", action="store", type="string",
metavar="PLATFORM", dest="platform", help="the platform/compiler "
"configuration [default: %s]" % build_platform)
p.add_option("-u", "--debug", action="store_true", default=False,
help="build with debugging symbols")
p.add_option("--sip-module", action="store", default="sip", type="string",
metavar="NAME", dest="sip_module", help="the package.module name "
"of the sip module [default: sip]")
p.add_option("--configuration", dest='config_file', type='string',
action='callback', callback=store_abspath_file, metavar="FILE",
help="FILE contains the target configuration")
p.add_option("--target-py-version", dest='target_py_version',
type='string', action='callback', callback=store_version,
metavar="VERSION",
help="the major.minor version of the target Python [default: "
"%s]" % siputils.version_to_string(py_version, parts=2))
p.add_option("--sysroot", dest='sysroot', type='string', action='callback',
callback=store_abspath_dir, metavar="DIR",
help="DIR is the target system root directory")
p.add_option("--no-module", action="store_true", default=False,
dest="no_module", help="disable the installation of the sip "
"module [default: enabled]")
p.add_option("--no-tools", action="store_true", default=False,
dest="no_tools", help="disable the building of the code generator "
"and the installation of the build system [default: enabled]")
p.add_option("--use-qmake", action="store_true", default=False,
dest="use_qmake", help="generate qmake .pro files instead of "
"Makefiles")
if sys.platform == 'darwin':
# Get the latest SDK to use as the default.
sdks = glob.glob(sdk_dir + '/MacOSX*.sdk')
if len(sdks) > 0:
sdks.sort()
_, default_sdk = os.path.split(sdks[-1])
else:
default_sdk = 'MacOSX10.4u.sdk'
g = optparse.OptionGroup(p, title="MacOS X Configuration")
g.add_option("--arch", action="append", default=[], dest="arch",
choices=["i386", "x86_64", "ppc"],
help="build for architecture ARCH")
g.add_option("--deployment-target", action="store", default='',
metavar="VERSION", dest="deployment_target",
help="set the value of the MACOSX_DEPLOYMENT_TARGET "
"environment variable in generated Makefiles")
g.add_option("-n", "--universal", action="store_true", default=False,
dest="universal",
help="build the SIP code generator and module as universal "
"binaries")
g.add_option("-s", "--sdk", action="store", default=default_sdk,
type="string", metavar="SDK", dest="sdk",
help="the name of the SDK used when building universal "
"binaries [default: %s]" % default_sdk)
p.add_option_group(g)
# Querying.
g = optparse.OptionGroup(p, title="Query")
g.add_option("--show-platforms", action="store_true", default=False,
dest="show_platforms", help="show the list of supported "
"platform/compiler configurations")
g.add_option("--show-build-macros", action="store_true", default=False,
dest="show_build_macros", help="show the list of supported build "
"macros")
p.add_option_group(g)
# Installation.
g = optparse.OptionGroup(p, title="Installation")
g.add_option("-b", "--bindir", action="callback", type="string",
metavar="DIR", dest="sipbindir", callback=store_abspath,
help="where the SIP code generator will be installed [default: "
"%s]" % plat_bin_dir)
g.add_option("-d", "--destdir", action="callback", type="string",
metavar="DIR", dest="destdir", callback=store_abspath,
help="where the SIP module will be installed [default: "
"%s]" % plat_py_site_dir)
g.add_option("-e", "--incdir", action="callback", type="string",
metavar="DIR", dest="sipincdir", callback=store_abspath,
help="where the SIP header file will be installed [default: "
"%s]" % plat_py_venv_inc_dir)
g.add_option("-v", "--sipdir", action="callback", type="string",
metavar="DIR", dest="sipsipdir", callback=store_abspath,
help="where .sip files are normally installed [default: "
"%s]" % plat_sip_dir)
g.add_option("--no-dist-info", action="store_false", default=True,
dest="distinfo",
help="do not install the dist-info directory")
g.add_option("--no-stubs", "--no-pyi", action="store_false", default=True,
dest="pyi",
help="do not install the sip.pyi stub file")
g.add_option("--stubsdir", "--pyidir", action="callback", type="string",
metavar="DIR", dest="pyidir", callback=store_abspath,
help="where the sip.pyi stub file will be installed [default: "
"%s]" % plat_py_site_dir)
p.add_option_group(g)
return p
def main(argv):
"""Create the configuration module module.
argv is the list of command line arguments.
"""
siputils.inform("This is SIP %s for Python %s on %s." % (sip_version_str, sys.version.split()[0], sys.platform))
global py_version, build_platform
if py_version < 0x020300:
siputils.error("This version of SIP requires Python v2.3 or later.")
# Basic initialisation.
set_platform_directories()
set_build_platform()
# Build up the list of valid specs.
for s in os.listdir(os.path.join(src_dir, "specs")):
platform_specs.append(s)
# Determine the directory containing the default OS/X SDK.
if sys.platform == 'darwin':
for sdk_dir in MACOSX_SDK_DIRS:
if os.path.isdir(sdk_dir):
break
else:
sdk_dir = MACOSX_SDK_DIRS[0]
else:
sdk_dir = ''
# Parse the command line.
global opts
p = create_optparser(sdk_dir)
opts, args = p.parse_args()
# Override defaults that affect subsequent configuration.
if opts.target_py_version is not None:
py_version = opts.target_py_version
if opts.sysroot is not None:
global sysroot
sysroot = opts.sysroot
# Make sure MacOS specific options get initialised.
if sys.platform != 'darwin':
opts.universal = ''
opts.arch = []
opts.sdk = ''
opts.deployment_target = ''
# Handle the query options.
if opts.show_platforms or opts.show_build_macros:
if opts.show_platforms:
show_platforms()
if opts.show_build_macros:
show_macros()
sys.exit()
# Convert the list 'arch' option to a string. Multiple architectures
# imply a universal binary.
if len(opts.arch) > 1:
opts.universal = True
opts.arch = ' '.join(opts.arch)
# Convert the boolean 'universal' option to a string.
if opts.universal:
if '/' in opts.sdk:
opts.universal = os.path.abspath(opts.sdk)
else:
opts.universal = sdk_dir + '/' + opts.sdk
if not os.path.isdir(opts.universal):
siputils.error("Unable to find the SDK directory %s. Use the --sdk flag to specify the name of the SDK or its full path." % opts.universal)
if opts.arch == '':
opts.arch = DEFAULT_MACOSX_ARCH
else:
opts.universal = ''
# No sip module also implies no stubs.
if opts.no_module:
opts.pyi = False
# Apply the overrides from any configuration file.
global plat_bin_dir, plat_py_conf_inc_dir, plat_py_inc_dir
global plat_py_lib_dir, plat_py_site_dir, plat_sip_dir
global sip_bin_dir, sip_inc_dir, sip_root_dir, sip_module_dir, sip_sip_dir
global sip_module_dest_dir, sip_module_name, pyi_dir
# Set defaults.
sip_bin_dir = plat_bin_dir
sip_inc_dir = plat_py_venv_inc_dir
sip_root_dir = plat_py_site_dir
sip_sip_dir = plat_sip_dir
if opts.config_file is not None:
update_from_configuration_file(opts.config_file)
elif sysroot != '':
plat_bin_dir = apply_sysroot(plat_bin_dir)
plat_py_conf_inc_dir = apply_sysroot(plat_py_conf_inc_dir)
plat_py_inc_dir = apply_sysroot(plat_py_inc_dir)
plat_py_lib_dir = apply_sysroot(plat_py_lib_dir)
plat_py_site_dir = apply_sysroot(plat_py_site_dir)
plat_sip_dir = apply_sysroot(plat_sip_dir)
sip_bin_dir = apply_sysroot(sip_bin_dir)
sip_inc_dir = apply_sysroot(sip_inc_dir)
sip_root_dir = apply_sysroot(sip_root_dir)
sip_sip_dir = apply_sysroot(sip_sip_dir)
# Fix the name of the sip module.
if opts.destdir is not None:
sip_root_dir = opts.destdir
# The module directory might have been set in a configuration file.
if not sip_module_dir:
sip_module_dir = sip_root_dir
sip_module_name = opts.sip_module
module_path = sip_module_name.split(".")
# Check the module name is valid.
for m in module_path:
# Python v2 doesn't have isidentifier() but we don't bother to provide
# an alternative.
try:
if keyword.iskeyword(m) or not m.isidentifier():
siputils.error(
"'%s' is an invalid Python module name." % sip_module_name)
except AttributeError:
pass
if len(module_path) > 1:
del module_path[-1]
module_path.insert(0, sip_module_dir)
sip_module_dest_dir = os.path.join(*module_path)
else:
sip_module_dest_dir = sip_module_dir
# Override from the command line.
if opts.platform is not None:
build_platform = opts.platform
if opts.sipbindir is not None:
sip_bin_dir = opts.sipbindir
if opts.sipincdir is not None:
sip_inc_dir = opts.sipincdir
if opts.sipsipdir is not None:
sip_sip_dir = opts.sipsipdir
if opts.pyidir is not None:
pyi_dir = opts.pyidir
else:
pyi_dir = sip_module_dest_dir
# Get the platform specific macros for building.
macros = siputils.parse_build_macros(
os.path.join(src_dir, "specs", build_platform), build_macro_names,
args)
if macros is None:
siputils.error("Unsupported macro name specified. Use the --show-build-macros flag to see a list of supported macros.")
sys.exit(2)
# Tell the user what's been found.
inform_user()
# Install the configuration module.
create_config("sipconfig.py", os.path.join(src_dir, "siputils.py"),
macros)
# Create the Makefiles.
create_makefiles(macros)
###############################################################################
# The script starts here.
###############################################################################
if __name__ == "__main__":
try:
main(sys.argv)
except SystemExit:
raise
except:
sys.stderr.write(
"""An internal error occured. Please report all the output from the program,
including the following traceback, to support@riverbankcomputing.com.
""")
raise
| [
2,
770,
4226,
17105,
262,
311,
4061,
8398,
290,
18616,
262,
6889,
16624,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
13130,
5866,
17796,
38589,
15302,
1279,
10951,
31,
38291,
17796,
785,
48074,
13,
785,
29,
198,
2,
198,
2,
770,
2393,
3... | 2.165177 | 15,995 |
import pytest
from stix2patterns.pattern import ParseException
from stix2matcher.matcher import match
_observations = [
{
"type": "observed-data",
"first_observed": "2004-10-11T21:44:58Z",
"last_observed": "2004-10-11T21:44:58Z",
"number_observed": 1,
"objects": {
"0": {
"type": u"some-type",
"has-hyphen": 1,
"has.dot": 2,
"has-hyphen.dot": 3
}
}
},
]
@pytest.mark.parametrize("pattern", [
"[some-type:'has-hyphen' = 1]",
"[some-type:'has.dot' = 2]",
"[some-type:'has-hyphen.dot' = 3]"
])
@pytest.mark.parametrize("pattern", [
"[some-type:needs-quotes = 1]"
])
| [
11748,
12972,
9288,
198,
6738,
336,
844,
17,
33279,
82,
13,
33279,
1330,
2547,
325,
16922,
198,
198,
6738,
336,
844,
17,
6759,
2044,
13,
6759,
2044,
1330,
2872,
198,
198,
62,
672,
3168,
602,
796,
685,
198,
220,
220,
220,
1391,
198,
... | 1.859694 | 392 |
#reference: https://www.tutorialspoint.com/python/python_database_access.htm
import MySQLdb as mysqldb
if __name__ == '__main__':
store_reldb()
| [
2,
35790,
25,
3740,
1378,
2503,
13,
83,
44917,
2777,
1563,
13,
785,
14,
29412,
14,
29412,
62,
48806,
62,
15526,
13,
19211,
198,
198,
11748,
33476,
9945,
355,
616,
31166,
335,
65,
198,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
... | 2.566667 | 60 |
import json
from typing import Any, Dict
from ..request import Request
from ..response import Response
from ..router import Router
router = Router()
@router.get("/users/{user_id}")
| [
11748,
33918,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
198,
198,
6738,
11485,
25927,
1330,
19390,
198,
6738,
11485,
26209,
1330,
18261,
198,
6738,
11485,
472,
353,
1330,
48538,
198,
198,
472,
353,
796,
48538,
3419,
628,
198,
31,
472,... | 3.381818 | 55 |
import os
filename = os.path.join(os.getcwd(), "data.json")
# f = open(filename)
with open(filename, mode="w") as f:
print("name :", f.name)
print("mode :", f.mode)
print("buffer :", f.buffer)
print("encoding :", f.encoding)
print("closed :", f.closed)
print("errors :", f.errors)
print("closed :", f.closed)
| [
11748,
28686,
201,
198,
201,
198,
34345,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
1136,
66,
16993,
22784,
366,
7890,
13,
17752,
4943,
201,
198,
201,
198,
2,
277,
796,
1280,
7,
34345,
8,
201,
198,
4480,
1280,
7,
34345,
11,
4235... | 2.317881 | 151 |
#!/usr/bin/env python3
from setuptools import setup
setup(
name='notcologger',
version='0.1.2',
description='Not CO Logger, a cloud logging library.',
long_description=
'''This library is aimed at helping produce consistent searchable log
entries to stdout in a cloud/container environment.''',
keywords='logging',
url='https://github.com/jmtapio/not-co-logger',
author='Juha-Matti Tapio',
author_email='jmtapio@verkkotelakka.net',
license='MIT',
packages=['notcologger'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Logging',
],
python_requires='>=3',
test_suite='tests.test_logger',
include_package_data=True,
zip_safe=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
1662,
66,
928,
1362,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
17,
3... | 2.644886 | 352 |
from tensorflow import keras
from .tf_base import KerasVectorRegressionModel
from .. import normalisation
| [
6738,
11192,
273,
11125,
1330,
41927,
292,
198,
198,
6738,
764,
27110,
62,
8692,
1330,
17337,
292,
38469,
8081,
2234,
17633,
198,
6738,
11485,
1330,
3487,
5612,
628,
628
] | 3.793103 | 29 |
#!/usr/bin/env python
from __future__ import print_function
import sys
from PyAnalysisTools.base import get_default_argparser, default_init
from PyAnalysisTools.base.YAMLHandle import YAMLLoader, YAMLDumper
from PyAnalysisTools.AnalysisTools.XSHandle import Dataset
if __name__ == '__main__':
main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
198,
6738,
9485,
32750,
33637,
13,
8692,
1330,
651,
62,
12286,
62,
853,
48610,
11,
4277,
62,
15003,
198... | 2.954128 | 109 |
from nuclear import *
from nuclear.parser.error import CliDefinitionError
from tests.asserts import MockIO, assert_error
from functools import reduce
import base64
cli = CliBuilder()
@cli.add_command('hello')
def say_hello(name: str, decode: bool = False, repeat: int = 1):
"""
Say hello to someone
:param name: Name to say hello to
:param decode: Decode name as base64
"""
if decode:
name = base64.b64decode(name).decode('utf-8')
print(' '.join([f"I'm a {name}!"] * repeat))
@cli.add_command('calculate', 'factorial')
def calculate_factorial(n: int):
"""Calculate factorial"""
result = reduce(lambda x, y: x * y, range(1, n + 1))
print(result)
return result
@cli.add_command('calculate', 'primes')
def calculate_primes(n: int = 100):
"""
List prime numbers using Sieve of Eratosthenes
:param n: maximum number to check
"""
print(sorted(reduce((lambda r, x: r - set(range(x**2, n, x)) if (x in r) else r),
range(2, n), set(range(2, n)))))
| [
6738,
4523,
1330,
1635,
198,
6738,
4523,
13,
48610,
13,
18224,
1330,
1012,
72,
36621,
12331,
198,
6738,
5254,
13,
30493,
82,
1330,
44123,
9399,
11,
6818,
62,
18224,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
11748,
2779,
2414,
628,
... | 2.529976 | 417 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-22 18:16
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
2177,
12,
2919,
12,
1828,
1248,
25,
1433,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
n = int(input())
for i in range(0, n+1):
if n % i == 0:
print(i) | [
77,
796,
493,
7,
15414,
28955,
198,
1640,
1312,
287,
2837,
7,
15,
11,
299,
10,
16,
2599,
198,
197,
361,
299,
4064,
1312,
6624,
657,
25,
198,
197,
197,
4798,
7,
72,
8
] | 1.970588 | 34 |
# Copyright 2021 Victor Guimarães
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handles the examples.
"""
import collections
import logging
import sys
from abc import abstractmethod, ABC
from collections import OrderedDict, deque
from functools import partial
from typing import List, Tuple
import numpy as np
import tensorflow as tf
from bert.tokenization.bert_tokenization import FullTokenizer
from neurallog.knowledge.examples import Examples
from neurallog.knowledge.program import NeuralLogProgram, NO_EXAMPLE_SET, \
get_predicate_from_string
from neurallog.language.language import AtomClause, Atom, Predicate, \
get_constant_from_string, get_term_from_string, TermType, Constant, Quote
from neurallog.network import registry
PARTIAL_WORD_PREFIX = "##"
logger = logging.getLogger(__name__)
dataset_classes = dict()
def neural_log_dataset(identifier):
"""
A decorator for NeuralLog dataset.
:param identifier: the identifier of the function
:type identifier: str
:return: the decorated function
:rtype: function
"""
return lambda x: registry(x, identifier, dataset_classes)
def get_dataset_class(identifier):
"""
Returns the class of the dataset based on the `identifier`.
:param identifier: the identifier
:type identifier: str
:return: the dataset class
:rtype: function
"""
return dataset_classes.get(identifier, DefaultDataset)
# IMPROVE: create a parameter to specify whether or not to print prediction
# that are not in the dataset
# noinspection PyTypeChecker,DuplicatedCode
def print_neural_log_predictions(model, neural_program, neural_dataset, dataset,
writer=sys.stdout, dataset_name=None,
print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param neural_dataset: the NeuralLog dataset
:type neural_dataset: NeuralLogDataset
:param neural_program: the neural program
:type neural_program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to print to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before each
batch
:type print_batch_header: bool
"""
count = 0
batches = None
empty_entry = None
fix = 0
if isinstance(neural_dataset, SequenceDataset):
if print_batch_header and dataset_name is not None:
batches = list(neural_program.mega_examples[dataset_name].keys())
empty_entry = neural_dataset.empty_word_index
for features, _ in dataset:
if print_batch_header:
batch = batches[count] if batches is not None else count
print("%% Batch:", batch, file=writer, sep="\t")
count += 1
y_scores = model.predict(features)
if len(model.predicates) == 1:
# if not isinstance(neural_dataset, WordCharDataset):
y_scores = [y_scores]
if model.predicates[0][0].arity < 3:
features = [features]
for i in range(len(model.predicates)):
predicate, inverted = model.predicates[i]
if isinstance(neural_dataset, WordCharDataset):
predicate = Predicate(predicate.name, predicate.arity - 1)
if inverted:
continue
row_scores = y_scores[i]
if len(row_scores.shape) == 3:
row_scores = np.squeeze(row_scores, axis=1)
for j in range(len(row_scores)):
y_score = row_scores[j]
x = []
subjects = []
stop = False
offset = sum(model.input_sizes[:i])
for k in range(model.input_sizes[i] + fix):
x_k = features[offset + k][j].numpy()
if x_k.dtype == np.float32:
if np.max(x_k) == 0:
stop = True
break
arg_max = np.argmax(x_k)
if arg_max == empty_entry:
stop = True
break
else:
arg_max = x_k[0]
if arg_max < 0 or arg_max == empty_entry:
stop = True
break
subjects.append(neural_program.get_constant_by_index(
predicate, k, arg_max))
x.append(x_k)
offset += model.input_sizes[i]
if stop:
continue
if predicate.arity == 1:
clause = AtomClause(Atom(predicate, subjects[0],
weight=float(y_score)))
print(clause, file=writer)
else:
clauses = []
for index in range(len(y_score)):
object_term = neural_program.get_constant_by_index(
predicate, -1, index)
prediction = Atom(predicate, *subjects, object_term,
weight=float(y_score[index]))
if dataset_name is not None and \
not neural_dataset.has_example_key(
prediction.simple_key()):
continue
clauses.append(AtomClause(prediction))
if len(clauses) > 0:
clause = AtomClause(Atom(predicate, *subjects, "X"))
print("%%", clause, file=writer, sep=" ")
for clause in sorted(
clauses,
key=lambda c: c.atom.weight,
reverse=True):
print(clause, file=writer)
print(file=writer)
# print(file=writer)
# noinspection DuplicatedCode
def _print_word_char_predictions(model, neural_program, neural_dataset, dataset,
writer=sys.stdout, dataset_name=None,
print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param neural_dataset: the NeuralLog dataset
:type neural_dataset: WordCharDataset
:param neural_program: the neural program
:type neural_program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to print to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before each
batch
:type print_batch_header: bool
"""
count = 0
batches = None
fix = -1
if print_batch_header and dataset_name is not None:
batches = list(neural_program.mega_examples[dataset_name].keys())
empty_entry = neural_dataset.empty_word_index
for features, _ in dataset:
if print_batch_header:
batch = batches[count] if batches is not None else count
print("%% Batch:", batch, file=writer, sep="\t")
count += 1
y_scores = model.predict(features)
if len(model.predicates) == 1:
# if not isinstance(neural_dataset, WordCharDataset):
y_scores = [y_scores]
if model.predicates[0][0].arity < 3:
features = [features]
for i in range(len(model.predicates)):
predicate, inverted = model.predicates[i]
predicate = Predicate(predicate.name, predicate.arity - 1)
row_scores = y_scores[i]
if len(row_scores.shape) == 3:
row_scores = np.squeeze(row_scores, axis=1)
for j in range(len(row_scores)):
y_score = row_scores[j]
x = []
subjects = []
stop = False
offset = sum(model.input_sizes[:i])
for k in range(model.input_sizes[i] + fix):
x_k = features[offset + k][j].numpy()
if x_k.dtype == np.float32:
if np.max(x_k) == 0:
stop = True
break
arg_max = np.argmax(x_k)
if arg_max == empty_entry:
stop = True
break
else:
arg_max = x_k[0]
if arg_max < 0 or arg_max == empty_entry:
stop = True
break
subjects.append(neural_program.get_constant_by_index(
predicate, k, arg_max))
x.append(x_k)
offset += model.input_sizes[i]
if stop:
continue
last_feature = features[offset - 1][j].numpy()
subject_string = "\""
for k in last_feature:
if k == neural_dataset.empty_char_index:
break
subject_string += neural_program.get_constant_by_index(
neural_dataset.character_predicate,
neural_dataset.character_predicate_index,
k
).value
subject_string += "\""
subjects[-1] = get_term_from_string(subject_string)
if predicate.arity == 1:
clause = AtomClause(Atom(predicate, subjects[0],
weight=float(y_score)))
print(clause, file=writer)
else:
clauses = []
for index in range(len(y_score)):
object_term = neural_program.get_constant_by_index(
predicate, -1, index)
prediction = Atom(predicate, *subjects, object_term,
weight=float(y_score[index]))
if dataset_name is not None and \
not neural_dataset.has_example_key(
prediction.simple_key()):
continue
clauses.append(AtomClause(prediction))
if len(clauses) > 0:
clause = AtomClause(Atom(predicate, *subjects, "X"))
print("%%", clause, file=writer, sep=" ")
for clause in sorted(
clauses,
key=lambda c: c.atom.weight,
reverse=True):
print(clause, file=writer)
print(file=writer)
# print(file=writer)
def get_predicate_indices(predicate, inverted):
"""
Gets the indices of the predicate's input and output.
:param predicate: the predicate
:type predicate: Predicate
:param inverted: if the predicate is inverted
:type inverted: bool
:return: the input and output indices
:rtype: (list[int], int)
"""
if predicate.arity == 1:
input_index = [0]
output_index = 0
elif predicate.arity == 2:
if inverted:
input_index = [1]
output_index = 0
else:
input_index = [0]
output_index = 1
else:
input_index = [x for x in range(predicate.arity - 1)]
output_index = predicate.arity - 1
return input_index, output_index
# noinspection DuplicatedCode
def viterbi(potentials, transition_matrix, initial_distribution=None):
"""
Computes the best path given based on the Viterbi algorithm.
:param potentials: the emission of the neural network
:type potentials: np.ndarray
:param transition_matrix: the transition matrix
:type transition_matrix: np.ndarray
:param initial_distribution: the probabilities of the first state,
it assumes an uniform probability, if `None`.
:type initial_distribution: np.ndarray
:return: the best path
:rtype: np.ndarray
"""
sequence_length, number_of_tags = potentials.shape
state_probabilities = np.zeros((sequence_length, number_of_tags),
dtype=np.float64)
best_paths = np.zeros((sequence_length, number_of_tags), dtype=np.int32)
best_path = np.zeros(sequence_length, dtype=np.int32)
if initial_distribution is None:
# initial_distribution = np.full(number_of_tags, 1.0 / number_of_tags)
initial_distribution = np.full(number_of_tags, 1.0)
state_probabilities[0, :] = potentials[0, :] * initial_distribution
transpose = transition_matrix.transpose()
for i in range(1, sequence_length):
prev_p = state_probabilities[i - 1, :] * transpose
best_previous_nodes = np.argmax(prev_p, axis=1)
state_probabilities[i] = np.max(prev_p, axis=1)
state_probabilities[i] *= potentials[i, :]
best_paths[i, :] = best_previous_nodes
best_path[-1] = np.argmax(state_probabilities[-1, :])
for i in reversed(range(1, sequence_length)):
best_path[i - 1] = best_paths[i, best_path[i]]
return best_path
# noinspection DuplicatedCode
def log_viterbi(potentials, transition_matrix, initial_distribution=None):
"""
Computes the best path given based on the Viterbi algorithm.
This version uses sum instead of multiplication and assumes that both
`transition_matrix` and `emission` are the log of the probabilities.
:param potentials: the emission of the neural network
:type potentials: np.ndarray
:param transition_matrix: the transition matrix
:type transition_matrix: np.ndarray
:param initial_distribution: the probabilities of the first state,
it assumes an uniform probability, if `None`.
:type initial_distribution: np.ndarray
:return: the best path
:rtype: np.ndarray
"""
sequence_length, number_of_tags = potentials.shape
state_probabilities = np.zeros((sequence_length, number_of_tags),
dtype=np.float64)
best_paths = np.zeros((sequence_length, number_of_tags), dtype=np.int32)
best_path = np.zeros(sequence_length, dtype=np.int32)
if initial_distribution is None:
# initial_distribution = np.full(number_of_tags, 1.0 / number_of_tags)
initial_distribution = np.full(number_of_tags, 1.0)
state_probabilities[0, :] = potentials[0, :] + initial_distribution
transpose = transition_matrix.transpose()
for i in range(1, sequence_length):
prev_p = state_probabilities[i - 1, :] + transpose
best_previous_nodes = np.argmax(prev_p, axis=1)
state_probabilities[i] = np.max(prev_p, axis=1)
state_probabilities[i] += potentials[i, :]
best_paths[i, :] = best_previous_nodes
best_path[-1] = np.argmax(state_probabilities[-1, :])
for i in reversed(range(1, sequence_length)):
best_path[i - 1] = best_paths[i, best_path[i]]
return best_path
class NeuralLogDataset(ABC):
"""
Represents a NeuralLog dataset to train a NeuralLog network.
"""
program: NeuralLogProgram
"The NeuralLog program"
def __init__(self, program, inverse_relations=True):
"""
Creates a NeuralLogNetwork.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
self.program = program
self.inverse_relations = inverse_relations
self._target_predicates = None
@property
def target_predicates(self):
"""
Gets the target predicates.
:return: the target predicates
:rtype: List[Tuple[Predicate, bool]]
"""
return self._target_predicates
@target_predicates.setter
def target_predicates(self, value):
"""
Sets the target predicates.
:param value: the target predicates
:type value: List[Tuple[Predicate, bool]]
"""
self._target_predicates = value
@abstractmethod
def has_example_key(self, key):
"""
Checks if the dataset contains the example key.
:param key: the example key
:type key: Any
:return: if the dataset contains the atom example
:rtype: bool
"""
pass
@abstractmethod
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
"""
Gets the data set for the example set.
:param example_set: the name of the example set
:type example_set: str
:param batch_size: the batch size
:type batch_size: int
:param shuffle: if `True`, shuffles the dataset.
:type shuffle: bool
:return: the dataset
:rtype: tf.data.Dataset
"""
pass
@abstractmethod
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network based on
the `example_set`.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
pass
def get_target_predicates(self):
"""
Gets a list of tuples containing the target predicates and whether it
is inverted or not.
:return: the list of target predicates
:rtype: list[tuple[Predicate, bool]]
"""
return self._target_predicates
@abstractmethod
def print_predictions(self, model, program, dataset, writer=sys.stdout,
dataset_name=None, print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param program: the neural program
:type program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to write to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before
each batch
:type print_batch_header: bool
"""
pass
@neural_log_dataset("default_dataset")
class DefaultDataset(NeuralLogDataset):
"""
The default NeuralLog dataset.
"""
def __init__(self, program, inverse_relations=True):
"""
Creates a DefaultDataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
super(DefaultDataset, self).__init__(program, inverse_relations)
self._target_predicates = self._compute_target_predicates()
self.example_keys = self._load_example_keys()
# noinspection PyMissingOrEmptyDocstring
# noinspection PyUnusedLocal,DuplicatedCode
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
dense_features.append(feature)
count += 1
labels = tuple(map(lambda x: tf.sparse.to_dense(x), labels))
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
# all_dense_features = tuple(all_dense_features)
return dense_features, labels
__call__ = call
# noinspection PyMissingOrEmptyDocstring
def are_features_empty(self, features):
"""
Checks if the features are empty.
:param features: the features
:type features: List[List[int]] or Tuple[List[int]]
:return: `True`, if the features are empty
:rtype: bool
"""
size = len(features[0])
if not size:
return True
if size > 1:
return False
index = 0
for i in range(len(self._target_predicates)):
in_indices, out_index = \
get_predicate_indices(*self._target_predicates[i])
for j in range(len(in_indices)):
empty_value = self._get_out_of_vocabulary_index(
self._target_predicates[i][0], in_indices[j])
if empty_value != features[index][0]:
return False
index += 1
return True
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network
based on
the `example_set`.
The labels are always a sparse tensor.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor]) or
(list[tuple[tf.SparseTensor]], tuple[tf.SparseTensor])
"""
examples = self.program.examples.get(example_set, OrderedDict())
return self._build(examples)
def ground_atom(self, example):
"""
Grounds the example by replacing the value of the variables for each
possible value found in the program.
:param example: the example
:type example: Atom
:return: the grounded atoms
:rtype: collections.Iterable[Atom]
"""
if example.is_grounded():
return example,
current_atoms = deque([example])
predicate = example.predicate
term_types: Tuple[TermType] = self.program.predicates[predicate]
for i in range(example.arity()):
if example.terms[i].is_constant():
continue
next_atoms = deque()
for atom in current_atoms:
if term_types[i].number:
terms = list(atom.terms)
terms[i] = 0.0
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
else:
possible_terms = \
self.program.iterable_constants_per_term[(predicate, i)]
for constant in possible_terms.values():
terms = list(atom.terms)
terms[i] = constant
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
current_atoms = next_atoms
return current_atoms
def _build(self, examples):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: Examples
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
output_by_term = OrderedDict()
input_terms = []
for predicate, inverted in self._target_predicates:
facts = examples.get(predicate, dict())
facts = facts.values()
for example in facts:
for fact in self.ground_atom(example):
if predicate.arity < 3:
input_term = (fact.terms[-1 if inverted else 0],)
else:
input_term = tuple(fact.terms[0:predicate.arity - 1])
if input_term not in output_by_term:
output = dict()
output_by_term[input_term] = output
input_terms.append(input_term)
else:
output = output_by_term[input_term]
if predicate.arity == 1:
output[(predicate, inverted)] = fact.weight
else:
output_term = fact.terms[0 if inverted else -1]
# noinspection PyTypeChecker
output.setdefault((predicate, inverted), []).append(
(output_term, fact.weight))
all_features = []
all_labels = []
for predicate, inverted in self._target_predicates:
features = [[] for _ in range(max(1, predicate.arity - 1))]
label_values = []
label_indices = []
in_indices, out_index = get_predicate_indices(predicate, inverted)
for i in range(len(input_terms)):
outputs = output_by_term[input_terms[i]].get(
(predicate, inverted), None)
constant_index = 0
for input_index in in_indices:
index = None
if outputs is not None:
index = self.program.get_index_of_constant(
predicate, input_index,
input_terms[i][constant_index])
if index is None:
index = self._get_out_of_vocabulary_index(
predicate, input_index)
features[constant_index].append(index)
constant_index += 1
if outputs is not None:
if predicate.arity == 1:
label_indices.append([i, 0])
label_values.append(outputs)
else:
# noinspection PyTypeChecker
for output_term, output_value in outputs:
output_term_index = \
self.program.get_index_of_constant(
predicate, out_index, output_term)
label_indices.append([i, output_term_index])
label_values.append(output_value)
all_features += features
if predicate.arity == 1:
dense_shape = [len(input_terms), 1]
empty_index = [[0, 0]]
else:
dense_shape = [
len(input_terms),
self.program.get_constant_size(predicate, out_index)]
empty_index = [[0, 0]]
if len(label_values) == 0:
sparse_tensor = tf.SparseTensor(indices=empty_index,
values=[0.0],
dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(indices=label_indices,
values=label_values,
dense_shape=dense_shape)
sparse_tensor = tf.sparse.reorder(sparse_tensor)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
def _get_out_of_vocabulary_index(self, predicate, term_index):
"""
Returns the index of the entity to replace the not found entity.
:param predicate: the predicate
:type predicate: Predicate
:param term_index: the index of the term
:type term_index: int
:return: the index of entity to replace the not found one
:rtype: int
"""
return -1
# noinspection PyMissingOrEmptyDocstring
class AbstractSequenceDataset(DefaultDataset, ABC):
"""
Represents an Abstract Sequence Dataset.
"""
# noinspection PyUnusedLocal,DuplicatedCode
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
if self.expand_one_hot:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
else:
feature = tf.reshape(features[count], [-1, 1])
dense_features.append(feature)
count += 1
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
return dense_features, labels
__call__ = call
# noinspection PyMissingOrEmptyDocstring
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
mega_examples = self.program.mega_examples.get(
example_set, OrderedDict())
for _, examples in sorted(mega_examples.items(), key=lambda x: x[0]):
features, labels = self._build(examples)
labels = tuple(map(lambda x: tf.sparse.to_dense(x), labels))
yield features, labels
@abstractmethod
def _build(self, examples):
"""
Builds the features and label to train the neural network
based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: dict[Predicate, List[Atom]]
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[list[int]], tuple[tf.SparseTensor])
"""
pass
@neural_log_dataset("sequence_dataset")
class SequenceDataset(AbstractSequenceDataset):
"""
The sequence dataset.
"""
def __init__(self, program, empty_word_index, inverse_relations=True,
oov_word="<OOV>", expand_one_hot=True):
"""
Creates a SequenceDataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param empty_word_index: the index of an entity that is not found in any
example, to represent an empty entry
:type empty_word_index: int
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
:param oov_word: the value to replace out of the vocabulary
entities
:type oov_word: str
:param expand_one_hot: if `True`, expands the indices of the input
into one hot tensors
:type expand_one_hot: bool
"""
super(SequenceDataset, self).__init__(
program, empty_word_index, inverse_relations, expand_one_hot)
self.oov_word = get_constant_from_string(oov_word)
# noinspection DuplicatedCode
def _get_out_of_vocabulary_index(self, predicate, term_index):
"""
Returns the index of the entity to replace the not found entity.
:param predicate: the predicate
:type predicate: Predicate
:param term_index: the index of the term
:type term_index: int
:return: the index of entity to replace the not found one
:rtype: int
"""
return self.program.get_index_of_constant(predicate, term_index,
self.oov_word)
# noinspection PyUnusedLocal
@neural_log_dataset("language_dataset")
class LanguageDataset(AbstractSequenceDataset):
"""
Class to process mega examples as phrases.
"""
# noinspection PyMissingOrEmptyDocstring
__call__ = call
# noinspection PyMissingOrEmptyDocstring
# noinspection DuplicatedCode
# noinspection DuplicatedCode
# noinspection PyMissingOrEmptyDocstring,DuplicatedCode
@neural_log_dataset("word_char_dataset")
class WordCharDataset(SequenceDataset):
"""
Class to represent a Word and Char dataset.
This class considers ternary predicates as being composed by an word,
a sequence of characters (represented as a string) and a a class.
The word and the class will be treated as usual. The sequence of
characters will be transformed into a vector of index of the character
entity in a given predicate. The vector will have the size of the
largest sequence in the batch.
"""
def __init__(self, program, empty_word_index, empty_char_index,
character_predicate, character_predicate_index=0,
inverse_relations=True, oov_word="<OOV>", oov_char="<OOV>",
expand_one_hot=True, char_pad=0):
"""
Creates a word char dataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param empty_word_index: the index of an entity that is not found in any
example, to represent an empty entry
:type empty_word_index: int
:param character_predicate: the predicate to get the index of the
characters
:type character_predicate: str
:param character_predicate_index: the index of the term in the character
predicate, to get the index of the characters
:type character_predicate_index: int
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
:param oov_word: the value to replace out of the vocabulary words
:type oov_word: str
:param oov_char: the value to replace out of the vocabulary chars
:type oov_char: str
:param expand_one_hot: if `True`, expands the indices of the input
into one hot tensors
:type expand_one_hot: bool
:param char_pad: the number of empty elements to append at the end of
the char sequence
:type char_pad: int
"""
super(WordCharDataset, self).__init__(
program, empty_word_index, inverse_relations,
oov_word, expand_one_hot)
self.empty_char_index = empty_char_index
self.character_predicate = \
get_predicate_from_string(character_predicate)
self.character_predicate_index = character_predicate_index
self.oov_char = get_constant_from_string(oov_char)
self._ooc_char_index = self._get_out_of_vocabulary_index(
get_predicate_from_string(character_predicate),
character_predicate_index
)
self.char_pad = max(char_pad, 0)
# noinspection PyMissingOrEmptyDocstring
# noinspection PyMissingOrEmptyDocstring
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
if self.expand_one_hot:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
else:
feature = tf.constant(features[count])
dense_features.append(feature)
count += 1
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
if len(labels) == 1:
labels = labels[0]
return dense_features, labels
__call__ = call
# noinspection DuplicatedCode
def _build(self, examples):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: dict[Predicate, List[Atom]]
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[list[int]], tuple[tf.SparseTensor])
"""
all_features = [] # type: List[int] or List[List[int]]
all_label_indices = []
all_label_values = []
row_index = 0
max_lengths = []
for predicate, inverted in self._target_predicates:
input_indices, output_index = get_predicate_indices(
predicate, inverted)
output_index -= 1
real_predicate = Predicate(predicate.name, predicate.arity - 1)
feature = [[] for _ in range(max(1, predicate.arity - 1))]
label_indices = []
label_values = []
facts = examples.get(real_predicate, [])
max_length = -1
for example in facts:
for fact in self.ground_atom(example):
input_terms = tuple(fact.terms[0:predicate.arity - 2])
count = 0
for input_index, in_term in zip(input_indices, input_terms):
in_term = get_term_from_string(str(in_term).lower())
input_value = self.program.get_index_of_constant(
real_predicate, input_index, in_term)
if input_value is None:
input_value = self._get_out_of_vocabulary_index(
real_predicate, input_index)
feature[count].append([input_value])
count += 1
if predicate.arity > 2:
char_features = []
last_term = input_terms[-1].value
max_length = max(len(last_term), max_length)
for char in last_term:
input_value = self.program.get_index_of_constant(
self.character_predicate,
self.character_predicate_index,
get_constant_from_string(char)
)
if input_value is None:
input_value = self._ooc_char_index
char_features.append(input_value)
feature[-1].append(char_features)
output_term = fact.terms[output_index]
output_value = self.program.get_index_of_constant(
real_predicate, output_index, output_term)
label_indices.append([row_index, output_value])
label_values.append(fact.weight)
row_index += 1
max_lengths.append(max_length + self.char_pad)
all_label_indices.append(label_indices)
all_label_values.append(label_values)
all_features += feature
all_labels = []
examples_offset = 0
features_offset = 0
for i in range(len(self._target_predicates)):
# Features
arity = self._target_predicates[i][0].arity
number_of_features = max(arity - 1, 1)
length = len(all_features[features_offset])
if arity > 2:
number_of_features -= 1
for j in range(number_of_features):
all_features[features_offset + j] = \
([self.empty_word_index] * examples_offset) + \
all_features[features_offset + j]
all_features[features_offset + j] += \
[self.empty_word_index] * (
row_index - examples_offset - length)
if arity > 2:
j = number_of_features
adjusted_features = []
for current in all_features[features_offset + j]:
# noinspection PyTypeChecker
adjusted_features.append(
current +
([self.empty_char_index] *
(max_lengths[i] - len(current))))
all_features[features_offset + j] = \
([[self.empty_char_index] * max_lengths[i]] *
examples_offset) + adjusted_features
all_features[features_offset + j] += \
[[self.empty_char_index] * max_lengths[i]] * (
row_index - examples_offset - length)
number_of_features += 1
examples_offset += length
features_offset += number_of_features
# Labels
predicate, index = self._target_predicates[i]
real_predicate = Predicate(predicate.name, predicate.arity - 1)
_, output_index = get_predicate_indices(predicate, index)
output_index -= 1
if predicate.arity == 1:
dense_shape = [row_index, 1]
empty_index = [[0, 0]]
else:
dense_shape = [
row_index,
self.program.get_constant_size(
real_predicate, output_index)]
empty_index = [[0, 0]]
if len(all_label_values[i]) == 0:
sparse_tensor = tf.SparseTensor(
indices=empty_index, values=[0.0], dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(
indices=all_label_indices[i], values=all_label_values[i],
dense_shape=dense_shape)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
# noinspection PyMissingOrEmptyDocstring
| [
2,
220,
15069,
33448,
12622,
1962,
49399,
26102,
274,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 2.101155 | 21,818 |
# Generated by Django 2.0 on 2018-12-07 11:05
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.citext
from django.db import migrations, models
import django.db.models.deletion
import uuid
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
2864,
12,
1065,
12,
2998,
1367,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
7353,
34239,
13,
25747,
198,
11748,
42625,
14208,
... | 3.04878 | 82 |
########################################################################################################################
# #
# This file is part of kAIvy #
# #
# Copyright (c) 2019-2021 by the kAIvy team and contributors #
# #
########################################################################################################################
"""
Implements a window title bar view
"""
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.behaviors import ButtonBehavior
from kivy.graphics import Color, Rectangle, InstructionGroup
import time
from kivymd.uix.button import MDIconButton
class VirtualWindowTitleBar(ButtonBehavior, FloatLayout):
"""
Display's the title bar of a window
"""
TITLE_BAR_MIN_WIDTH = 320 # Default title bar width
TITLE_BAR_HEIGHT = 32 # Default title bar height
def set_text(self, text):
"""
Sets a new title text
:param text:
:return:
"""
self.text = text
self.title_label.text = self.text
def update_title(self, _=None, __=None):
"""
Updates the title bar's bounding
"""
self.paint_group.clear()
self.paint_group.add(Color(0.5, 0.5, 0.7, 1.0))
self.paint_group.add(Rectangle(pos=self.pos, size=self.size))
self.button.pos_hint = {'right': 1.0, 'center_y': 0.5}
self.button.bind(on_release=self.handle_resize_click)
self.do_layout()
def get_required_space(self):
"""
Returns the minimum required space of the view
:return: Width, Height tuple
"""
return (self.TITLE_BAR_MIN_WIDTH, self.TITLE_BAR_HEIGHT)
def handle_resize_click(self, btn):
"""
Triggered when the resize button was pressed
"""
self.window.handle_maximize_restore()
def handle_release(self, evt):
"""
Called on click on title bar
:param evt: The event source
"""
cur_time = time.time()
if cur_time - self.last_tap < self.double_tap_time:
self.window.handle_maximize_restore()
self.last_tap = cur_time | [
29113,
29113,
29113,
14468,
7804,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 1.963316 | 1,363 |
# -*- coding: utf-8 -*-
import os
import re
import warnings
from setuptools.dist import Distribution
from setuptools.extension import Extension
from pip.commands.wheel import WheelCommand
from pip.status_codes import SUCCESS
from semantic_version import Spec, Version
from .._compat import Path, PY2, encode
from ..utils.helpers import template
class Builder(object):
"""
Tool to transform a poet file to a setup() instruction.
It also creates the MANIFEST.in file if necessary.
"""
AUTHOR_REGEX = re.compile('(?u)^(?P<name>[- .,\w\d\'’"()]+) <(?P<email>.+?)>$')
PYTHON_VERSIONS = {
2: ['2.6', '2.7'],
3: ['3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6']
}
def build(self, poet, **options):
"""
Builds a package from a Poet instance
:param poet: The poet to build.
:type poet: poet.poet.Poet
"""
setup_kwargs = self._setup(poet, **options)
setup = os.path.join(poet.base_dir, 'setup.py')
manifest = os.path.join(poet.base_dir, 'MANIFEST.in')
self._write_setup(setup_kwargs, setup)
readme = None
if poet.has_markdown_readme():
readme = os.path.join(poet.base_dir, 'README.rst')
if os.path.exists(readme):
readme = None
else:
self._write_readme(readme, setup_kwargs['long_description'])
self._manifest.append('include README.rst')
self._write_manifest(manifest)
try:
dist = Distribution(setup_kwargs)
dist.run_command('sdist')
except Exception:
raise
finally:
os.unlink(setup)
os.unlink(manifest)
if readme:
os.unlink(readme)
# Building wheel if necessary
if not options.get('no_wheels'):
command = WheelCommand()
command_args = [
'--no-index',
'--no-deps',
'-q',
'--wheel-dir', 'dist',
'dist/{}'.format(poet.archive)
]
if options.get('universal', True):
command_args.append('--build-option=--universal')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
status = command.main(command_args)
if status != SUCCESS:
raise Exception('An error occurred while executing command.')
def _setup(self, poet, **options):
"""
Builds the setup kwargs base on the Poet instance
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
setup_kwargs = {
'name': poet.name,
'version': poet.normalized_version,
'description': poet.description,
'long_description': poet.readme,
'include_package_data': True,
'script_name': 'setup.py'
}
setup_kwargs.update(self._author(poet))
setup_kwargs['url'] = self._url(poet)
setup_kwargs['license'] = poet.license
setup_kwargs['keywords'] = self._keywords(poet)
setup_kwargs['classifiers'] = self._classifiers(poet)
setup_kwargs['entry_points'] = self._entry_points(poet)
setup_kwargs['install_requires'] = self._install_requires(poet)
setup_kwargs['tests_require'] = self._tests_require(poet)
setup_kwargs['extras_require'] = self._extras_require(poet)
setup_kwargs.update(self._packages(poet))
# Extensions
setup_kwargs.update(self._ext_modules(poet))
return setup_kwargs
def _author(self, poet):
"""
Build the author information from a Poet instance.
Transforms a author in the form "name <email>" into
a proper dictionary.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
m = self.AUTHOR_REGEX.match(poet.authors[0])
name = m.group('name')
email = m.group('email')
if PY2:
name = encode(name)
email = encode(email)
return {
'author': name,
'author_email': email
}
def _classifiers(self, poet):
"""
Builds the classifiers list from the
specified Python versions.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: list
"""
classifers = ['Programming Language :: Python']
compatible_versions = {}
for python in poet.python_versions:
constraint = Spec(python)
for major in [2, 3]:
available_versions = self.PYTHON_VERSIONS[major]
for version in available_versions:
if Version.coerce(version) in constraint:
if major not in compatible_versions:
compatible_versions[major] = []
compatible_versions[major].append(version)
for major in sorted(list(compatible_versions.keys())):
versions = compatible_versions[major]
classifer_template = 'Programming Language :: Python :: {}'
classifers.append(classifer_template.format(major))
for version in versions:
classifers.append(classifer_template.format(version))
return classifers
def _entry_points(self, poet):
"""
Builds the entry points
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: list
"""
entry_points = {
'console_scripts': []
}
# Generic entry points
for category, entry_points in poet.entry_points.items():
entry_points[category] = []
for name, script in entry_points.items():
entry_points[category].append('{} = {}'.format(name, script))
# Console scripts entry points
for name, script in poet.scripts.items():
entry_points['console_scripts'].append('{} = {}'.format(name, script))
return entry_points
def _install_requires(self, poet):
"""
Builds the dependencies list.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
requires = []
dependencies = poet.dependencies
for dependency in dependencies:
if dependency.optional:
continue
requires.append(dependency.normalized_name)
return requires
def _tests_require(self, poet):
"""
Builds the dev dependencies list.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
requires = []
dependencies = poet.dev_dependencies
for dependency in dependencies:
if dependency.optional:
continue
requires.append(dependency.normalized_name)
return requires
def _extras_require(self, poet):
"""
Builds the extras dictionary from
the configured features.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
if not poet.features:
return {}
extras = {}
for feature_name, featured_packages in poet.features.items():
extras[feature_name] = []
for package in featured_packages:
for dep in poet.dependencies:
if dep.name == package:
extras[feature_name].append(dep.normalized_name)
return extras
def _packages(self, poet):
"""
Builds the packages and modules list
based on the include and exclude sections.
It will also register files that need to be put
in the MANIFEST.in file.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
includes = poet.include
packages = []
modules = []
package_dirs = {}
crawled = []
excluded = []
root = Path(poet.base_dir)
for exclude in poet.exclude + poet.ignore:
if not exclude:
continue
if exclude.startswith('/'):
exclude = exclude[1:]
for exc in root.glob(exclude):
if exc.suffix == '.py':
exc = exc.relative_to(root)
excluded.append('.'.join(exc.with_suffix('').parts))
if not isinstance(includes, list):
includes = [includes]
for include in includes:
if isinstance(include, dict):
settings = self._find_packages_from(
root,
include['from'],
include['include'],
include.get('as', ''),
excluded=excluded,
crawled=crawled
)
else:
settings = self._find_packages_from(
root,
'',
include,
excluded=excluded,
crawled=crawled
)
packages += settings['packages']
modules += settings['modules']
package_dirs.update(settings.get('package_dirs', {}))
packages = [p for p in packages if p not in excluded]
modules = [m for m in modules if m not in excluded]
settings = {
'packages': packages,
'py_modules': modules
}
package_dir = {}
for package_name, directory in package_dirs.items():
package_dir[package_name] = directory.as_posix()
if package_dir:
settings['package_dir'] = package_dir
return settings
def _ext_modules(self, poet):
"""
Builds the extension modules.
Transforms the extensions section:
[extensions]
"my.module" = "my/module.c"
to a proper extension:
Extension('my.module', 'my/module.c')
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
extensions = []
for module, source in poet.extensions.items():
if not isinstance(source, list):
source = [source]
extensions.append(Extension(module, source))
return {
'ext_modules': extensions
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
14601,
198,
198,
6738,
900,
37623,
10141,
13,
17080,
1330,
27484,
198,
6738,
900,
37623,
10141,
13,
2302,
3004,
1330,
27995... | 2.060839 | 5,342 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper class for implementing a beam search decoder.
Individual models just need to provide a few callback functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core.ops import py_x_ops
# TODO(yonghui):
# 1) Change the tensor shape [max_decoder_time_steps, batch_size *
# num_hyps_per_beam] to [max_decoder_time_steps, num_hyps_per_beam,
# batch_size] to avoid confusing and mis-interpretation of the results.
# Defines a namedtuple to store the results of BeamSearchDecode. It contains
# the following entries:
# done_hyps: A string Tensor of shape
# [max_decoder_time_steps, batch_size * num_hyps_per_beam] which can be
# either an empty string, or a serialized Hypothesis proto. The non-empty
# hyps in done_hyps are terminated hypotheses. The 'h'-th hyp for sample
# 'b' at time step 't' can be found at done_hyps[t, batch_size * h + b].
# topk_hyps: A string Tensor of shape [batch_size, num_hyps_per_beam].
# topk_hyps[b, h] is the h-th hypothesis for the sample 'b' in the
# batch, which can either be an empty string or a serialized Hypothesis
# proto.
# topk_ids: Int32 Tensor of shape [batch_size * num_hyps_per_beam,
# target_seq_len] which contains the IDs of the targets in each of the
# hypotheses in the beam for the samples in the batch. For sample
# 'b' in the batch, the h-th hypothesis for this sample can be found at
# position [b * num_hyps_per_beam + h, :].
# topk_lens: Int32 Tensor of shape [batch_size * num_hyps_per_beam] which
# indicates the length (>=0) of each of the hypotheses.
# topk_scores: Float32 Tensor of shape [batch_size * num_hyps_per_beam]
# containing the scores (negative log probabilities) of each of the
# hypotheses in the beam.
# topk_decoded: A string Tensor of shape [batch_size * num_hyps_per_beam] which
# contains the decoded target strings in each of the hypotheses in the
# beam for the samples in the batch. The 'h'-th hyp for sample 'b' can
# be found at topk_decoded[b * num_hyps_per_beam + h]
BeamSearchDecodeOutput = collections.namedtuple(
'BeamSearchDecodeOutput',
[
'done_hyps', 'topk_hyps', 'topk_ids', 'topk_lens', 'topk_scores',
'topk_decoded', 'other_states'
],
)
# Make the last attribute default to None.
BeamSearchDecodeOutput.__new__.__defaults__ = (None,)
class BeamSearchHelper(base_layer.BaseLayer):
"""Helper class for performing beam search.
The user of this helper class needs to implement three callbacks.
This callback is called once only at the beginning of beam search:
.. code-block:: none
def InitBeamSearchState(theta, encoder_outputs, num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
num_hyps_per_beam: An int, number hyps to keep for source sentence.
Returns:
initial_results: a `.NestedMap` of initial results. It should contain
the following tensors at the minimum.
.log_probs: The initial log probs for each of the tokens in
the target vocab, of shape [num_hyps_per_beam * src_batch,
vocab_size]. src_batch "b" and hyp_per_beam "h" is
represented at index (h * src_batch + b).
.atten_probs: The initial attention probs, of shape [
num_hyps_per_beam * src_batch, src_len]. src_batch "b"
and hyp_per_beam "h" is represented at index
(h * src_batch + b).
states: a `.NestedMap` of tensors representing states that the client
would like to keep track of for each hyp.
This callback is called once every decoding time step before beam_search_step
is called:
.. code-block:: none
def PreBeamSearchStepCallback(theta,
encoder_outputs,
step_ids,
in_states,
num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
step_ids: A tensor of shape [num_hyps_per_beam * src_batch, 1].
in_states: A `.NestedMap` of tensors representing states that the
clients would like to keep track of for each of the active hyps.
Returns:
results: A `.NestedMap` of beam search results. It should contain
the 'atten_probs' and 'log_probs' tensors at the minimal.
Optionally it may contain 'is_last_chunk' if it is decoding a
neural transducer model.
.atten_probs: The updated attention probs, of shape
[num_hyps_per_beam * src_batch, src_len]. src_batch "b" and
hyp_per_beam "h" is represented at index (h * src_batch + b).
.log_probs: Log prob for each of the tokens in the target vocab.
This is of shape [num_hyps_per_beam * src_batch, vocab_size].
src_batch "b" and hyp_per_beam "h" is represented at index
(h * src_batch + b).
.is_last_chunk: Whether or not each of the hyp is at the end of a
chunk. If non-empty, it is of shape
[num_hyps_per_beam * src_batch, 1].
out_states: A `.NestedMap`. The updated states. This 'out_states'
should be of the exact same structure as 'in_states'
This callback is called once every decoding time step after beam_search_step
is called:
.. code-block:: none
def PostBeamSearchStepCallback(theta,
encoder_outputs,
new_step_ids,
other_states):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
new_step_ids: Token ids for the next beam search step.
other_states: A `.NestedMap`.
Returns:
final_states, A `.NestedMap`.
"""
@classmethod
@base_layer.initializer
def _BeamSearchStep(self, theta, encoder_outputs, cur_step, step_ids,
core_bs_states, other_states, num_hyps_per_beam,
pre_beam_search_step_callback,
post_beam_search_step_callback):
"""Extend beam search hyps for one step.
| num_beams = Number of source sequences to be decoded.
| num_hyps_per_beam = Number of hyps to keep per source sequence.
| num_hyps = num_beams * num_hyps_per_beam
| src_seq_len = Number of time steps in the source sequence.
| src_batch = Number of examples in the source sequence.
| tgt_seq_len = Maximum allowed time steps in the target sequence.
| tgt_batch = num_hyps_per_beam * src_batch
Args:
theta: A `.NestedMap` object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A `.NestedMap` containing encoder outputs to be passed
to the callbacks.
cur_step: A scalar int tensor, the current time step, 0-based.
step_ids: An int tensor of shape [num_hyps, 1]. The input ids to the
current search step.
core_bs_states: A tuple of core beam search states. This list is
maintained by this helper class.
other_states: A `.NestedMap` of other beam search states.
This `.NestedMap` is managed and updated by the client. It is
expected that each of its member tensors are of rank >= 1. t[i, ...]
is the state of the i-th hyp at the beginning of this search step.
num_hyps_per_beam: Num of hyps to keep per beam.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
See class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
See class header comments for more details.
Returns:
A tuple of following elements for the next beam search step,
(next step, all_done, step_ids, core_bs_states, other_states)
"""
p = self.params
bs_results, other_states = pre_beam_search_step_callback(
theta, encoder_outputs, step_ids, other_states, num_hyps_per_beam)
(best_scores, cumulative_scores, in_scores, in_hyps, in_prev_hyps,
in_done_hyps, in_atten_probs) = core_bs_states
(out_best_scores, out_cumulative_scores, out_scores, out_hyps,
out_prev_hyps, out_done_hyps, out_atten_probs,
all_done) = py_x_ops.beam_search_step(
bs_results.log_probs,
bs_results.atten_probs,
best_scores,
cumulative_scores,
in_scores,
in_hyps,
in_prev_hyps,
in_done_hyps,
in_atten_probs,
bs_results.is_last_chunk if self._model_uses_eoc_id else [],
cur_step,
eoc_id=p.target_eoc_id,
eos_id=p.target_eos_id,
beam_size=p.beam_size,
num_hyps_per_beam=num_hyps_per_beam,
valid_eos_max_logit_delta=p.valid_eos_max_logit_delta,
merge_paths=p.merge_paths,
allow_empty_terminated_hyp=p.allow_empty_terminated_hyp,
ensure_full_beam=p.ensure_full_beam,
force_eos_in_last_step=p.force_eos_in_last_step)
new_step_ids = tf.reshape(out_hyps[cur_step, :], tf.shape(step_ids))
new_step_ids.set_shape(step_ids.get_shape())
old_hyp_ids = tf.reshape(
tf.slice(out_prev_hyps, begin=[cur_step, 0], size=[1, -1]), [-1])
new_bs_states = (out_best_scores, out_cumulative_scores, out_scores,
out_hyps, out_prev_hyps, out_done_hyps, out_atten_probs)
def ReOrderHyps(x_in):
"""Reorders x_in based on prev hyp ids."""
if (isinstance(x_in, tf.Tensor) and x_in.shape.ndims and
x_in.shape.ndims > 0):
if x_in.shape.ndims > 2 and not p.batch_major_state:
x_out = tf.gather(x_in, old_hyp_ids, axis=1)
else:
x_out = tf.gather(x_in, old_hyp_ids)
x_out.set_shape(x_in.get_shape())
return x_out
else:
return x_in
new_other_states = other_states.Transform(ReOrderHyps)
final_other_states = post_beam_search_step_callback(
theta, encoder_outputs, new_step_ids, new_other_states)
return (cur_step + 1, all_done, new_step_ids, new_bs_states,
final_other_states)
def BeamSearchDecode(self,
theta,
encoder_outputs,
num_hyps_per_beam_override=0,
init_beam_search_state=None,
pre_beam_search_step_callback=None,
post_beam_search_step_callback=None,
max_steps=None):
"""Performs beam-search based decoding.
Args:
theta: A NestedMap object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A NestedMap containing encoder outputs to be passed
to the callbacks.
num_hyps_per_beam_override: If set to a value <= 0, this parameter is
ignored. If set to a value > 0, then this value will be used to
override `p.num_hyps_per_beam`.
init_beam_search_state: The `InitBeamSearchState` callback. Please refer
to the class header comments for more details.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
max_steps: maximum beam search steps. If None,
use self.params.target_seq_len.
Returns:
A `BeamSearchDecodeOutput`.
"""
p = self.params
num_hyps_per_beam = p.num_hyps_per_beam
if num_hyps_per_beam_override > 0:
num_hyps_per_beam = num_hyps_per_beam_override
if max_steps is None:
max_steps = p.target_seq_len
initial_results, other_states = init_beam_search_state(
theta, encoder_outputs, num_hyps_per_beam)
num_hyps = tf.shape(initial_results.log_probs)[0]
num_beams = num_hyps // num_hyps_per_beam
step_ids = tf.fill([num_hyps, 1],
tf.constant(p.target_sos_id, dtype=tf.int32))
min_score = -1e36
best_scores = (tf.zeros(shape=[num_beams], dtype=p.dtype) + min_score)
cumulative_scores = tf.zeros(shape=[num_hyps], dtype=p.dtype)
in_scores = tf.zeros([max_steps, num_hyps], dtype=p.dtype)
in_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_prev_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_done_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.string)
bs_atten_probs = tf.zeros(
[max_steps, num_hyps,
tf.shape(initial_results.atten_probs)[1]],
dtype=p.dtype)
cur_step = tf.constant(0, dtype=tf.int32)
all_done = tf.constant(False, dtype=tf.bool)
core_bs_states = (best_scores, cumulative_scores, in_scores, in_hyps,
in_prev_hyps, in_done_hyps, bs_atten_probs)
flat_other_states = other_states.Flatten()
_, _, _, final_bs_states, flat_final_other_states = tf.while_loop(
LoopContinue,
LoopBody,
loop_vars=(cur_step, all_done, step_ids, core_bs_states,
flat_other_states),
parallel_iterations=10,
back_prop=False,
swap_memory=False,
shape_invariants=(tf.TensorShape(cur_step.get_shape()),
tf.TensorShape(all_done.get_shape()),
tf.TensorShape(step_ids.get_shape()),
_GetShapes(core_bs_states),
_GetShapes(flat_other_states, none_shapes=True)))
# [target_seq_len, num_beams * num_hyps_per_beam].
final_done_hyps = final_bs_states[5]
final_other_states = other_states.Pack(flat_final_other_states)
# TODO(rpang): avoid inspecting 'encoder_outputs'.
source_paddings = encoder_outputs.padding
if isinstance(source_paddings, py_utils.NestedMap):
source_seq_lengths = tf.to_int32(
tf.reduce_sum(1.0 - tf.transpose(source_paddings.Flatten()[0]), 1))
else:
source_seq_lengths = tf.to_int32(
tf.reduce_sum(1.0 - tf.transpose(source_paddings), 1))
# [num_beams, num_hyps_per_beam].
topk_hyps = py_x_ops.top_k_terminated_hyps(
final_done_hyps,
source_seq_lengths,
k=num_hyps_per_beam,
num_hyps_per_beam=num_hyps_per_beam,
length_normalization=p.length_normalization,
coverage_penalty=p.coverage_penalty,
target_seq_length_ratio=p.target_seq_length_ratio,
eoc_id=p.target_eoc_id,
merge_paths=p.merge_paths)
# [num_beams * num_hyps_per_beam, ...].
max_seq_length = 0 if isinstance(max_steps, tf.Tensor) else max_steps
topk_ids, topk_lens, topk_scores = py_x_ops.unpack_hyp(
tf.reshape(topk_hyps, [-1]), max_seq_length=max_seq_length)
# [num_beams, num_hyps_per_beam].
topk_scores = tf.reshape(topk_scores, tf.shape(topk_hyps))
return BeamSearchDecodeOutput(final_done_hyps, topk_hyps, topk_ids,
topk_lens, topk_scores, None,
final_other_states)
def _GetShapes(tensors, none_shapes=False):
"""Util for getting nested structure of shapes from structure of tensors.
Args:
tensors: Structure of Tensors to get shapes for.
none_shapes: Returns None shapes if true.
Returns:
The same structure as tensors but of corresponding `TensorShape` objects.
"""
shapes = []
for t in tf.contrib.framework.nest.flatten(tensors):
shape = t.get_shape() if isinstance(t, tf.Tensor) else None
if none_shapes:
if shape:
shapes.append(tf.TensorShape([None] * len(shape)))
else:
shapes.append(tf.TensorShape(None))
else:
shapes.append(tf.TensorShape(shape))
return type(tensors)(
tf.contrib.framework.nest.pack_sequence_as(tensors, shapes))
def MergeBeamSearchOutputs(max_hyps_per_beam, beam_search_outputs):
"""Merges beam search hyps from multiple decoders.
Args:
max_hyps_per_beam: the number of top hyps in the merged results. Must be
less than or equal to total number of input hyps.
beam_search_outputs: a list of BeamSearchDecodeOutput objects. Must share
the same source_batch and max sequence length.
Returns:
A BeamSearchDecodeOutput object containing max_hyps_per_beam hypotheses per
beam.
"""
source_batch = tf.shape(beam_search_outputs[0].topk_hyps)[0]
value_dict = {}
for output in beam_search_outputs:
hyps_per_beam = py_utils.with_dependencies([
py_utils.assert_equal(source_batch,
tf.shape(output.topk_hyps)[0]),
],
tf.shape(output.topk_hyps)[1])
for k, v in output._asdict().iteritems():
if v is None:
continue
if k == 'done_hyps':
v = tf.transpose(v)
if k not in value_dict:
value_dict[k] = []
value_dict[k].append(tf.reshape(v, [source_batch, hyps_per_beam, -1]))
# Concatenate the tensors along the 'num_hyps_per_beam' dimension.
concatenated = {}
for k, values in value_dict.iteritems():
if len(values) != len(beam_search_outputs):
raise ValueError(
'Incomplete values for %s: %s' % (k, beam_search_outputs))
concatenated[k] = tf.concat(values, axis=1)
scores = concatenated['topk_scores']
scores = tf.where(
tf.equal(concatenated['topk_lens'], 0), tf.fill(tf.shape(scores), -1e6),
scores)
scores = tf.squeeze(scores, -1)
# Select top max_hyps_per_beam indices per beam.
_, top_indices = tf.nn.top_k(scores, max_hyps_per_beam)
batch_ids = tf.tile(
tf.expand_dims(tf.range(source_batch), -1), [1, max_hyps_per_beam])
# [source_batch, max_hyps_per_beam, 2]
gather_indices = tf.stack([batch_ids, top_indices], axis=-1)
# Gather the merged top hyps according to 'gather_indices'.
top = beam_search_outputs[0]._asdict()
total_hyps = source_batch * max_hyps_per_beam
for k, v in concatenated.iteritems():
v = tf.gather_nd(v, gather_indices)
if k == 'done_hyps':
v = tf.transpose(tf.reshape(v, [total_hyps, -1]))
elif k == 'topk_hyps':
v = tf.reshape(v, [source_batch, max_hyps_per_beam])
elif k == 'topk_ids':
v = tf.reshape(v, [total_hyps, -1])
elif k in ('topk_lens', 'topk_scores', 'topk_decoded'):
v = tf.reshape(v, [total_hyps])
else:
raise ValueError('Unexpected field: %s' % k)
top[k] = v
return BeamSearchDecodeOutput(**top)
| [
2,
15069,
2864,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.292143 | 8,667 |
"""
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT license applies.
*
* Website : http://www.synerty.com
* Support : support@synerty.com
"""
import logging
from abc import ABCMeta, abstractmethod
from collections import deque
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import ConnectionDone, ConnectionLost
from twisted.internet.protocol import Protocol, connectionDone
from vortex.DeferUtil import nonConcurrentMethod
from vortex.PayloadEnvelope import PayloadEnvelope
from vortex.PayloadIO import PayloadIO
logger = logging.getLogger(name=__name__)
| [
37811,
198,
1635,
15622,
416,
1632,
1008,
774,
350,
774,
12052,
198,
1635,
198,
1635,
770,
3788,
318,
1280,
2723,
11,
262,
17168,
5964,
8991,
13,
198,
1635,
198,
1635,
15887,
1058,
2638,
1378,
2503,
13,
1837,
1008,
774,
13,
785,
198,
... | 3.717514 | 177 |
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use("Agg")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
11748,
2603,
29487,
8019,
201,
198,
6759,
29487,
8019,
13,
1904,
7203,
46384,
4943,
201,
198,
201,
198
] | 2.090909 | 33 |
import numpy as np
import tensorflow as tf
from src.etc import audio
from src.tac.core.wavenet_vocoder import util
from src.tac.core.wavenet_vocoder.models.gaussian import sample_from_gaussian
from src.tac.core.wavenet_vocoder.models.mixture import \
sample_from_discretized_mix_logistic
from src.tac.core.wavenet_vocoder.models.modules import (
Conv1D1x1, ConvTranspose1D, ConvTranspose2D,
DiscretizedMixtureLogisticLoss, Embedding,
GaussianMaximumLikelihoodEstimation, LeakyReluActivation,
MaskedCrossEntropyLoss, MaskedMeanSquaredError, NearestNeighborUpsample,
ReluActivation, ResidualConv1DGLU, ResizeConvolution, SubPixelConvolution,
WeightNorm)
from src.tac.core.wavenet_vocoder.util import *
from src.tac.infolog import log
def _expand_global_features(batch_size, time_length, global_features, data_format='BCT'):
"""Expand global conditioning features to all time steps
Args:
batch_size: int
time_length: int
global_features: Tensor of shape [batch_size, channels] or [batch_size, channels, 1]
data_format: string, 'BCT' to get output of shape [batch_size, channels, time_length]
or 'BTC' to get output of shape [batch_size, time_length, channels]
Returns:
None or Tensor of shape [batch_size, channels, time_length] or [batch_size, time_length, channels]
"""
accepted_formats = ['BCT', 'BTC']
if not (data_format in accepted_formats):
raise ValueError('{} is an unknow data format, accepted formats are "BCT" and "BTC"'.format(data_format))
if global_features is None:
return None
#[batch_size, channels] ==> [batch_size, channels, 1]
# g = tf.cond(tf.equal(tf.rank(global_features), 2),
# lambda: tf.expand_dims(global_features, axis=-1),
# lambda: global_features)
g = tf.reshape(global_features, [tf.shape(global_features)[0], tf.shape(global_features)[1], 1])
g_shape = tf.shape(g)
#[batch_size, channels, 1] ==> [batch_size, channels, time_length]
# ones = tf.ones([g_shape[0], g_shape[1], time_length], tf.int32)
# g = g * ones
g = tf.tile(g, [1, 1, time_length])
if data_format == 'BCT':
return g
else:
#[batch_size, channels, time_length] ==> [batch_size, time_length, channels]
return tf.transpose(g, [0, 2, 1])
def receptive_field_size(total_layers, num_cycles, kernel_size, dilation=lambda x: 2**x):
"""Compute receptive field size.
Args:
total_layers; int
num_cycles: int
kernel_size: int
dilation: callable, function used to compute dilation factor.
use "lambda x: 1" to disable dilated convolutions.
Returns:
int: receptive field size in sample.
"""
assert total_layers % num_cycles == 0
layers_per_cycle = total_layers // num_cycles
dilations = [dilation(i % layers_per_cycle) for i in range(total_layers)]
return (kernel_size - 1) * sum(dilations) + 1
def maybe_Normalize_weights(layer, weight_normalization=True, init=False, init_scale=1.):
"""Maybe Wraps layer with Weight Normalization wrapper.
Args;
layer: tf layers instance, the layer candidate for normalization
weight_normalization: Boolean, determines whether to normalize the layer
init: Boolean, determines if the current run is the data dependent initialization run
init_scale: Float, Initialisation scale of the data dependent initialization. Usually 1.
"""
if weight_normalization:
return WeightNorm(layer, init, init_scale)
return layer
class WaveNet():
"""Tacotron-2 Wavenet Vocoder model.
"""
def initialize(self, y, c, g, input_lengths, x=None, synthesis_length=None, test_inputs=None, split_infos=None):
'''Initialize wavenet graph for train, eval and test cases.
'''
hparams = self._hparams
self.is_training = x is not None
self.is_evaluating = not self.is_training and y is not None
#Set all convolutions to corresponding mode
self.set_mode(self.is_training)
split_device = '/cpu:0' if self._hparams.wavenet_num_gpus > 1 or self._hparams.split_on_cpu else '/gpu:0'
with tf.device(split_device):
hp = self._hparams
lout_int = [tf.int32] * hp.wavenet_num_gpus
lout_float = [tf.float32] * hp.wavenet_num_gpus
tower_input_lengths = tf.split(input_lengths, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if input_lengths is not None else [input_lengths] * hp.wavenet_num_gpus
tower_y = tf.split(y, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if y is not None else [y] * hp.wavenet_num_gpus
tower_x = tf.split(x, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if x is not None else [x] * hp.wavenet_num_gpus
tower_c = tf.split(c, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if self.local_conditioning_enabled() else [None] * hp.wavenet_num_gpus
tower_g = tf.split(g, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if self.global_conditioning_enabled() else [None] * hp.wavenet_num_gpus
tower_test_inputs = tf.split(test_inputs, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if test_inputs is not None else [test_inputs] * hp.wavenet_num_gpus
self.tower_y_hat_q = []
self.tower_y_hat_train = []
self.tower_y = []
self.tower_input_lengths = []
self.tower_means = []
self.tower_log_scales = []
self.tower_y_hat_log = []
self.tower_y_log = []
self.tower_c = []
self.tower_y_eval = []
self.tower_eval_length = []
self.tower_y_hat = []
self.tower_y_target = []
self.tower_eval_c = []
self.tower_mask = []
self.tower_upsampled_local_features = []
self.tower_eval_upsampled_local_features = []
self.tower_synth_upsampled_local_features = []
log('Initializing Wavenet model. Dimensions (? = dynamic shape): ')
log(' Train mode: {}'.format(self.is_training))
log(' Eval mode: {}'.format(self.is_evaluating))
log(' Synthesis mode: {}'.format(not (self.is_training or self.is_evaluating)))
#1. Declare GPU devices
gpus = ['/gpu:{}'.format(i) for i in range(hp.wavenet_num_gpus)]
for i in range(hp.wavenet_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('inference') as scope:
log(' device: {}'.format(i))
#Training
if self.is_training:
batch_size = tf.shape(x)[0]
#[batch_size, time_length, 1]
self.tower_mask.append(self.get_mask(tower_input_lengths[i], maxlen=tf.shape(tower_x[i])[-1])) #To be used in loss computation
#[batch_size, channels, time_length]
y_hat_train = self.step(tower_x[i], tower_c[i], tower_g[i], softmax=False) #softmax is automatically computed inside softmax_cross_entropy if needed
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length, channels]
self.tower_y_hat_q.append(tf.transpose(y_hat_train, [0, 2, 1]))
self.tower_y_hat_train.append(y_hat_train)
self.tower_y.append(tower_y[i])
self.tower_input_lengths.append(tower_input_lengths[i])
#Add mean and scale stats if using Guassian distribution output (there would be too many logistics if using MoL)
if self._hparams.out_channels == 2:
self.tower_means.append(y_hat_train[:, 0, :])
self.tower_log_scales.append(y_hat_train[:, 1, :])
else:
self.tower_means.append(None)
#Graph extension for log saving
#[batch_size, time_length]
shape_control = (batch_size, tf.shape(tower_x[i])[-1], 1)
with tf.control_dependencies([tf.assert_equal(tf.shape(tower_y[i]), shape_control)]):
y_log = tf.squeeze(tower_y[i], [-1])
if is_mulaw_quantize(hparams.input_type):
self.tower_y[i] = y_log
y_hat_log = tf.cond(tf.equal(tf.rank(y_hat_train), 4),
lambda: tf.squeeze(y_hat_train, [-1]),
lambda: y_hat_train)
y_hat_log = tf.reshape(y_hat_log, [batch_size, hparams.out_channels, -1])
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length]
y_hat_log = tf.argmax(tf.nn.softmax(y_hat_log, axis=1), 1)
y_hat_log = util.inv_mulaw_quantize(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw_quantize(y_log, hparams.quantize_channels)
else:
#[batch_size, time_length]
if hparams.out_channels == 2:
y_hat_log = sample_from_gaussian(
y_hat_log, log_scale_min_gauss=hparams.log_scale_min_gauss)
else:
y_hat_log = sample_from_discretized_mix_logistic(
y_hat_log, log_scale_min=hparams.log_scale_min)
if is_mulaw(hparams.input_type):
y_hat_log = util.inv_mulaw(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw(y_log, hparams.quantize_channels)
self.tower_y_hat_log.append(y_hat_log)
self.tower_y_log.append(y_log)
self.tower_c.append(tower_c[i])
self.tower_upsampled_local_features.append(self.upsampled_local_features)
log(' inputs: {}'.format(tower_x[i].shape))
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' targets: {}'.format(y_log.shape))
log(' outputs: {}'.format(y_hat_log.shape))
#evaluating
elif self.is_evaluating:
#[time_length, ]
idx = 0
length = tower_input_lengths[i][idx]
y_target = tf.reshape(tower_y[i][idx], [-1])[:length]
test_inputs = tf.reshape(y_target, [1, -1, 1]) if not hparams.wavenet_natural_eval else None
if tower_c[i] is not None:
tower_c[i] = tf.expand_dims(tower_c[i][idx, :, :length], axis=0)
with tf.control_dependencies([tf.assert_equal(tf.rank(tower_c[i]), 3)]):
tower_c[i] = tf.identity(tower_c[i], name='eval_assert_c_rank_op')
if tower_g[i] is not None:
tower_g[i] = tf.expand_dims(tower_g[i][idx], axis=0)
batch_size = tf.shape(tower_c[i])[0]
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
#[channels, ]
if is_mulaw_quantize(hparams.input_type):
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
#Fast eval
y_hat = self.incremental(initial_input, c=tower_c[i], g=tower_g[i], time_length=length, test_inputs=test_inputs,
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
#Save targets and length for eval loss computation
if is_mulaw_quantize(hparams.input_type):
self.tower_y_eval.append(tf.reshape(y[idx], [1, -1])[:, :length])
else:
self.tower_y_eval.append(tf.expand_dims(y[idx], axis=0)[:, :length, :])
self.tower_eval_length.append(length)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [-1])
y_hat = inv_mulaw_quantize(y_hat, hparams.quantize_channels)
y_target = inv_mulaw_quantize(y_target, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = inv_mulaw(tf.reshape(y_hat, [-1]), hparams.quantize_channels)
y_target = inv_mulaw(y_target, hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [-1])
self.tower_y_hat.append(y_hat)
self.tower_y_target.append(y_target)
self.tower_eval_c.append(tower_c[i][idx])
self.tower_eval_upsampled_local_features.append(self.upsampled_local_features[idx])
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' targets: {}'.format(y_target.shape))
log(' outputs: {}'.format(y_hat.shape))
#synthesizing
else:
batch_size = tf.shape(tower_c[i])[0]
if c is None:
assert synthesis_length is not None
else:
#[batch_size, local_condition_time, local_condition_dimension(num_mels)]
message = ('Expected 3 dimension shape [batch_size(1), time_length, {}] for local condition features but found {}'.format(
hparams.cin_channels, tower_c[i].shape))
with tf.control_dependencies([tf.assert_equal(tf.rank(tower_c[i]), 3, message=message)]):
tower_c[i] = tf.identity(tower_c[i], name='synthesis_assert_c_rank_op')
Tc = tf.shape(tower_c[i])[1]
upsample_factor = audio.get_hop_size(self._hparams)
#Overwrite length with respect to local condition features
synthesis_length = Tc * upsample_factor
#[batch_size, local_condition_dimension, local_condition_time]
#time_length will be corrected using the upsample network
tower_c[i] = tf.transpose(tower_c[i], [0, 2, 1])
if tower_g[i] is not None:
assert tower_g[i].shape == (batch_size, 1)
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
if is_mulaw_quantize(hparams.input_type):
assert initial_value >= 0 and initial_value < hparams.quantize_channels
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
y_hat = self.incremental(initial_input, c=tower_c[i], g=tower_g[i], time_length=synthesis_length, test_inputs=tower_test_inputs[i],
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [batch_size, -1])
y_hat = util.inv_mulaw_quantize(y_hat, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = util.inv_mulaw(tf.reshape(y_hat, [batch_size, -1]), hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [batch_size, -1])
self.tower_y_hat.append(y_hat)
self.tower_synth_upsampled_local_features.append(self.upsampled_local_features)
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' outputs: {}'.format(y_hat.shape))
self.variables = tf.trainable_variables()
log(' Receptive Field: ({} samples / {:.1f} ms)'.format(self.receptive_field, self.receptive_field / hparams.sample_rate * 1000.))
#1_000_000 is causing syntax problems for some people?! Python please :)
log(' WaveNet Parameters: {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.variables]) / 1000000))
self.ema = tf.train.ExponentialMovingAverage(decay=hparams.wavenet_ema_decay)
def add_loss(self):
'''Adds loss computation to the graph. Supposes that initialize function has already been called.
'''
self.tower_loss = []
total_loss = 0
gpus = ['/gpu:{}'.format(i) for i in range(self._hparams.wavenet_num_gpus)]
for i in range(self._hparams.wavenet_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('loss') as scope:
if self.is_training:
if is_mulaw_quantize(self._hparams.input_type):
tower_loss = MaskedCrossEntropyLoss(self.tower_y_hat_q[i][:, :-1, :], self.tower_y[i][:, 1:], mask=self.tower_mask[i])
else:
if self._hparams.out_channels == 2:
tower_loss = GaussianMaximumLikelihoodEstimation(self.tower_y_hat_train[i][:, :, :-1], self.tower_y[i][:, 1:, :],
hparams=self._hparams, mask=self.tower_mask[i])
else:
tower_loss = DiscretizedMixtureLogisticLoss(self.tower_y_hat_train[i][:, :, :-1], self.tower_y[i][:, 1:, :],
hparams=self._hparams, mask=self.tower_mask[i])
elif self.is_evaluating:
if is_mulaw_quantize(self._hparams.input_type):
tower_loss = MaskedCrossEntropyLoss(self.tower_y_hat_eval[i], self.tower_y_eval[i], lengths=[self.tower_eval_length[i]])
else:
if self._hparams.out_channels == 2:
tower_loss = GaussianMaximumLikelihoodEstimation(self.tower_y_hat_eval[i], self.tower_y_eval[i],
hparams=self._hparams, lengths=[self.tower_eval_length[i]])
else:
tower_loss = DiscretizedMixtureLogisticLoss(self.tower_y_hat_eval[i], self.tower_y_eval[i],
hparams=self._hparams, lengths=[self.tower_eval_length[i]])
else:
raise RuntimeError('Model not in train/eval mode but computing loss: Where did this go wrong?')
#Compute final loss
self.tower_loss.append(tower_loss)
total_loss += tower_loss
if self.is_training:
self.loss = total_loss / self._hparams.wavenet_num_gpus
else:
self.eval_loss = total_loss / self._hparams.wavenet_num_gpus
def add_optimizer(self, global_step):
'''Adds optimizer to the graph. Supposes that initialize function has already been called.
'''
hp = self._hparams
tower_gradients = []
# 1. Declare GPU devices
gpus = ['/gpu:{}'.format(i) for i in range(hp.wavenet_num_gpus)]
grad_device = '/cpu:0' if hp.tacotron_num_gpus > 1 else gpus[0]
with tf.device(grad_device):
with tf.variable_scope('optimizer'):
#Create lr schedule
if hp.wavenet_lr_schedule == 'noam':
learning_rate = self._noam_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
warmup_steps=hp.wavenet_warmup)
else:
assert hp.wavenet_lr_schedule == 'exponential'
learning_rate = self._exponential_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
hp.wavenet_decay_rate,
hp.wavenet_decay_steps)
#Adam optimization
self.learning_rate = learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate, hp.wavenet_adam_beta1,
hp.wavenet_adam_beta2, hp.wavenet_adam_epsilon)
# 2. Compute Gradient
for i in range(hp.wavenet_num_gpus):
#Device placemenet
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('optimizer') as scope:
gradients = optimizer.compute_gradients(self.tower_loss[i])
tower_gradients.append(gradients)
# 3. Average Gradient
with tf.device(grad_device):
avg_grads = []
variables = []
for grad_and_vars in zip(*tower_gradients):
# each_grads_vars = ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
if grad_and_vars[0][0] is not None:
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
#Append on a "tower" dimension which we will average over below.
grads.append(expanded_g)
#Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
else:
grad = grad_and_vars[0][0]
v = grad_and_vars[0][1]
avg_grads.append(grad)
variables.append(v)
self.gradients = avg_grads
#Gradients clipping
if hp.wavenet_clip_gradients:
#Clip each gradient by a [min, max] range of values and its norm by [0, max_norm_value]
clipped_grads = []
for g in avg_grads:
if g is not None:
clipped_g = tf.clip_by_norm(g, hp.wavenet_gradient_max_norm)
clipped_g = tf.clip_by_value(clipped_g, -hp.wavenet_gradient_max_value, hp.wavenet_gradient_max_value)
clipped_grads.append(clipped_g)
else:
clipped_grads.append(g)
else:
clipped_grads = avg_grads
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
adam_optimize = optimizer.apply_gradients(zip(clipped_grads, variables),
global_step=global_step)
#Add exponential moving average
#https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
#Use adam optimization process as a dependency
with tf.control_dependencies([adam_optimize]):
#Create the shadow variables and add ops to maintain moving averages
#Also updates moving averages after each update step
#This is the optimize call instead of traditional adam_optimize one.
assert set(self.variables) == set(variables) #Verify all trainable variables are being averaged
self.optimize = self.ema.apply(variables)
#Sanity check functions
def step(self, x, c=None, g=None, softmax=False):
"""Forward step
Args:
x: Tensor of shape [batch_size, channels, time_length], One-hot encoded audio signal.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features.
g: Tensor of shape [batch_size, gin_channels, 1] or Ids of shape [batch_size, 1],
Global conditioning features.
Note: set hparams.use_speaker_embedding to False to disable embedding layer and
use extrnal One-hot encoded features.
softmax: Boolean, Whether to apply softmax.
Returns:
a Tensor of shape [batch_size, out_channels, time_length]
"""
#[batch_size, channels, time_length] -> [batch_size, time_length, channels]
batch_size = tf.shape(x)[0]
time_length = tf.shape(x)[-1]
if g is not None:
if self.embed_speakers is not None:
#[batch_size, 1] ==> [batch_size, 1, gin_channels]
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, gin_channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
#Expand global conditioning features to all time steps
g_bct = _expand_global_features(batch_size, time_length, g, data_format='BCT')
if c is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
elif self._hparams.upsample_type == '1D':
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
else:
assert self._hparams.upsample_type in ('Resize', 'SubPixel', 'NearestNeighbor')
#[batch_size, cin_channels, time_length, 1]
expand_dim = 3
c = tf.expand_dims(c, axis=expand_dim)
for transposed_conv in self.upsample_conv:
c = transposed_conv(c)
#[batch_size, cin_channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], tf.shape(x)[-1])]):
c = tf.identity(c, name='control_c_and_x_shape')
self.upsampled_local_features = c
#Feed data to network
x = self.first_conv(x)
skips = None
for conv in self.residual_layers:
x, h = conv(x, c=c, g=g_bct)
if skips is None:
skips = h
else:
skips = skips + h
if self._hparams.legacy:
skips = skips * np.sqrt(0.5)
x = skips
for conv in self.last_conv_layers:
x = conv(x)
return tf.nn.softmax(x, axis=1) if softmax else x
def incremental(self, initial_input, c=None, g=None,
time_length=100, test_inputs=None,
softmax=True, quantize=True, log_scale_min=-7.0, log_scale_min_gauss=-7.0):
"""Inceremental forward step
Inputs of shape [batch_size, channels, time_length] are reshaped to [batch_size, time_length, channels]
Input of each time step is of shape [batch_size, 1, channels]
Args:
Initial input: Tensor of shape [batch_size, channels, 1], initial recurrence input.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features
g: Tensor of shape [batch_size, gin_channels, time_length] or [batch_size, gin_channels, 1]
global conditioning features
T: int, number of timesteps to generate
test_inputs: Tensor, teacher forcing inputs (debug)
softmax: Boolean, whether to apply softmax activation
quantize: Whether to quantize softmax output before feeding to
next time step input
log_scale_min: float, log scale minimum value.
Returns:
Tensor of shape [batch_size, channels, time_length] or [batch_size, channels, 1]
Generated one_hot encoded samples
"""
batch_size = tf.shape(initial_input)[0]
#Note: should reshape to [batch_size, time_length, channels]
#not [batch_size, channels, time_length]
if test_inputs is not None:
if self.scalar_input:
if tf.shape(test_inputs)[1] == 1:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
else:
test_inputs = tf.cast(test_inputs, tf.int32)
test_inputs = tf.one_hot(indices=test_inputs, depth=self._hparams.quantize_channels, dtype=tf.float32)
test_inputs = tf.squeeze(test_inputs, [2])
if tf.shape(test_inputs)[1] == self._hparams.out_channels:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
batch_size = tf.shape(test_inputs)[0]
if time_length is None:
time_length = tf.shape(test_inputs)[1]
else:
time_length = tf.maximum(time_length, tf.shape(test_inputs)[1])
#Global conditioning
if g is not None:
if self.embed_speakers is not None:
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
self.g_btc = _expand_global_features(batch_size, time_length, g, data_format='BTC')
#Local conditioning
if c is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
elif self._hparams.upsample_type == '1D':
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
else:
assert self._hparams.upsample_type in ('Resize', 'SubPixel', 'NearestNeighbor')
#[batch_size, cin_channels, time_length, 1]
expand_dim = 3
c = tf.expand_dims(c, axis=expand_dim)
for upsample_conv in self.upsample_conv:
c = upsample_conv(c)
#[batch_size, channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], time_length)]):
self.c = tf.transpose(c, [0, 2, 1])
self.upsampled_local_features = c
#Initialize loop variables
if initial_input.shape[1] == self._hparams.out_channels:
initial_input = tf.transpose(initial_input, [0, 2, 1])
initial_time = tf.constant(0, dtype=tf.int32)
# if test_inputs is not None:
# initial_input = tf.expand_dims(test_inputs[:, 0, :], axis=1)
initial_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
initial_loss_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
#Only use convolutions queues for Residual Blocks main convolutions (only ones with kernel size 3 and dilations, all others are 1x1)
initial_queues = [tf.zeros((batch_size, res_conv.layer.kw + (res_conv.layer.kw - 1) * (res_conv.layer.dilation_rate[0] - 1), self._hparams.residual_channels),
name='convolution_queue_{}'.format(i+1)) for i, res_conv in enumerate(self.residual_layers)]
res = tf.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_input, initial_loss_outputs_ta, initial_queues
],
parallel_iterations=32,
swap_memory=self._hparams.wavenet_swap_with_cpu)
outputs_ta = res[1]
#[time_length, batch_size, channels]
outputs = outputs_ta.stack()
#Save eval prediction for eval loss computation
eval_outputs = res[3].stack()
self.tower_y_hat_eval = []
if is_mulaw_quantize(self._hparams.input_type):
self.tower_y_hat_eval.append(tf.transpose(eval_outputs, [1, 0, 2]))
else:
self.tower_y_hat_eval.append(tf.transpose(eval_outputs, [1, 2, 0]))
#[batch_size, channels, time_length]
return tf.transpose(outputs, [1, 2, 0])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
12351,
13,
14784,
1330,
6597,
198,
6738,
12351,
13,
83,
330,
13,
7295,
13,
86,
4005,
316,
62,
18893,
12342,
1330,
7736,
198,
6738,
12351,
13,
83... | 2.312933 | 12,124 |
"""Custom User Model"""
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.db import models
class CustomUserManager(BaseUserManager):
"""Custom User Manager overridden from BaseUserManager for CustomUser"""
def _create_user(self, email, password=None, **extra_fields):
"""Creates and returns a new user using an email address"""
if not email: # check for an empty email
raise ValueError("User must set an email address")
else: # normalizes the provided email
email = self.normalize_email(email)
# create user
user = self.model(email=email, **extra_fields)
user.set_password(password) # hashes/encrypts password
user.save(using=self._db) # safe for multiple databases
return user
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""Custom User model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True, blank=False,
null=True)
name = models.CharField(max_length=255, blank=True, null=True)
is_staff = models.BooleanField('Staff status', default=False, null=True)
is_active = models.BooleanField('Active', default=True, null=True)
date_joined = models.DateTimeField(auto_now_add=True, null=True)
objects = CustomUserManager() # uses the custom manager
USERNAME_FIELD = 'email' # overrides username to email field
| [
37811,
15022,
11787,
9104,
37811,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
14881,
12982,
11,
7308,
12982,
13511,
11,
3467,
198,
220,
220,
220,
2448,
8481,
35608,
259,
198,
6738,
42625,
14208,
13,
9... | 2.889961 | 518 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.utils
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
# for mixed precision
import torch.nn.utils as torch_utils
from torch.cuda.amp import autocast
from torch.cuda.amp import GradScaler
import matplotlib.pyplot as plt
import os
import numpy as np
import argparse
import csv
import random
from tqdm import tqdm
from utils import *
from kpcn import *
from kpal import *
from multiscale import *
from decomp import *
from path import *
from losses import *
from dataset import MSDenoiseDataset, init_data
# from test_cython import *
# L = 9 # number of convolutional layers
# n_kernels = 100 # number of kernels in each layer
# kernel_size = 5 # size of kernel (square)
# # input_channels = dataset[0]['X_diff'].shape[-1]
# hidden_channels = 100
permutation = [0, 3, 1, 2]
eps = 0.00316
parser = argparse.ArgumentParser(description='Train the model')
'''
Needed parameters
1. Data & Model specifications
device : which device will the data & model should be loaded
mode : which kind of model should it train
input_channel : input channel
hidden_channel : hidden channel
num_layer : number of layers / depth of models
'''
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--mode', default='kpcn')
parser.add_argument('--num_layers', default=9, type=int)
parser.add_argument('--input_channels', default=34, type=int)
parser.add_argument('--hidden_channels', default=100, type=int)
parser.add_argument('--kernel_size', default=5, type=int)
'''
2. Preprocessing specifications
eps
'''
parser.add_argument('--eps', default=0.00316, type=float)
'''
3. Training Specification
val : should it perform validation
early_stopping : should it perform early stopping
trainset : dataset for training
valset : dataset for validation
lr : learning rate
epoch : epoch
criterion : which loss function should it use
'''
parser.set_defaults(do_feature_dropout=False)
parser.add_argument('--do_feature_dropout', dest='do_feature_dropout', action='store_true')
parser.set_defaults(do_finetune=False)
parser.add_argument('--do_finetune', dest='do_finetune', action='store_true')
parser.add_argument('--use_llpm_buf', default=False, type=bool)
parser.set_defaults(do_val=False)
parser.add_argument('--do_val', dest='do_val', action='store_true')
parser.set_defaults(do_early_stopping=False)
parser.add_argument('--do_early_stopping', dest='do_early_stopping', action='store_true')
parser.add_argument('--data_dir')
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--manif_w', default=0.1, type=float)
parser.add_argument('--loss', default='L1')
save_dir = 'kpcn_manif_valid_fix'
writer = SummaryWriter('kpcn/'+save_dir)
if __name__ == '__main__':
main() | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
10178,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
11748,
28034,
10178,
13,
26791,
198,
11748,... | 3.092308 | 975 |
from django.conf.urls import url
from tenant_tutorial.views import HomeView
urlpatterns = [
url(r'^$', HomeView.as_view()),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
18285,
62,
83,
44917,
13,
33571,
1330,
5995,
7680,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
5995,
7680,
13,
292,
62... | 2.64 | 50 |
from django.views.generic import ListView, CreateView, DetailView
from events.models import Talk
from . import forms
from . import models
| [
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
7343,
7680,
11,
13610,
7680,
11,
42585,
7680,
198,
198,
6738,
2995,
13,
27530,
1330,
12167,
198,
6738,
764,
1330,
5107,
198,
6738,
764,
1330,
4981,
628,
628
] | 3.944444 | 36 |
from discord.ext import commands
from random import choice, shuffle
import aiohttp
import asyncio
import discord
import urllib.request, json
import random
import requests
class Funs:
"""Commandes funs."""
@commands.command()
async def avatar(self, ctx, user : discord.Member):
"""Récuperer l'avatar de ..."""
embed = discord.Embed(title="Avatar de : " + user.name, url=user.avatar_url, description="[Voir en plus grand]({})".format(user.avatar_url))
embed.set_thumbnail(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.command(pass_context=True)
async def poke(self, ctx, user : discord.Member):
"""Poke quelqu'un"""
await ctx.send(":clap: Hey {0} tu t'es fait poker par {1} !".format(user.mention, ctx.message.author.name))
await ctx.message.delete()
@commands.command()
async def btcprice(self, ctx):
"""Le prix du BTC"""
loading = await ctx.send("_réfléchis..._")
try:
with urllib.request.urlopen("http://api.coindesk.com/v1/bpi/currentprice/EUR.json") as url:
data = json.loads(url.read().decode())
btc = data['bpi']['EUR']['rate']
btc = btc.split(".")
except:
btc = 1
if btc == 1:
await ctx.send("Impossible d'accèder à l'API coindesk.com, veuillez réessayer ultérieurment !")
else:
await loading.edit(content="Un bitcoin est égal à : " + btc[0] + " €")
@commands.command()
async def joke(self, ctx):
"""Print a random joke in a json file"""
with open('texts/jokes.json') as js:
jk = json.load(js)
clef = str(random.randint(1,13))
joke = jk["{}".format(clef)]
embed = discord.Embed(title="Blague _{}_ : ".format(clef), description=joke['content'], colour=0x03C9A9)
embed.set_footer(text="Par " + joke['author'])
embed.set_thumbnail(url='https://outout.tech/tuxbot/blobjoy.png')
await ctx.send(embed=embed)
@commands.command()
async def ethylotest(self, ctx):
"""Ethylotest simulator 2018"""
results_poulet = ["Désolé mais mon ethylotest est sous Windows Vista, merci de patienter...", "_(ethylotest)_ ``Une erreur est survenue. Windows cherche une solution à se problème...``", "Mais j'l'ai foutu où ce p*** d'ethylotest de m*** bordel fait ch*** tab***", "C'est pas possible z'avez cassé l'ethylotest !"]
results_client = ["D'accord, il n'y a pas de problème à cela je suis complètement clean", "Bien sur si c'est votre devoir !", "Suce bi** !", "J'ai l'air d'être bourré ?", "_laissez moi prendre un bonbon à la menthe..._"]
result_p = random.choice(results_poulet)
result_c = random.choice(results_client)
await ctx.send(":oncoming_police_car: Bonjour bonjour, controle d'alcoolémie !")
await asyncio.sleep(0.5)
await ctx.send(":man: " + result_c)
await asyncio.sleep(1)
await ctx.send(":police_car: " + result_p)
@commands.command()
async def coin(self, ctx):
"""Coin flip simulator 2025"""
starts_msg = ["Je lance la pièce !", "C'est parti !", "C'est une pièce d'un cent faut pas la perdre", "C'est une pièce d'un euro faut pas la perdre", "Je lance !"]
results_coin = ["{0} pile", "{0} face", "{1} Heu c'est quoi pile c'est quoi face enfaite ?", "{1} Oh shit, je crois que je l'ai perdue", "{1} Et bim je te vol ta pièce !", "{0} Oh une erreur d'impression il n'y a ni pile ni face !"]
start = random.choice(starts_msg)
result = random.choice(results_coin)
await ctx.send(start)
await asyncio.sleep(0.6)
await ctx.send(result.format(":moneybag: Et la pièce retombe sur ...", ":robot:"))
@commands.command()
async def pokemon(self, ctx):
"""Random pokemon fight"""
with open('texts/pokemons.json') as js:
jk = json.load(js)
poke1 = jk[random.randint(1, 150)]
poke2 = jk[random.randint(1, 150)]
try:
if poke1['MaxHP'] > poke2['MaxHP']:
winer = poke1
else:
winer = poke2
except:
winer = poke1
await ctx.send(":flag_white: **Le combat commence !**")
await asyncio.sleep(1)
await ctx.send(":loudspeaker: Les concurants sont {} contre {} ! Bonne chance à eux !".format(poke1["Name"], poke2["Name"]))
await asyncio.sleep(0.5)
await ctx.send(":boom: {} commence et utilise {}".format(poke1["Name"], poke1["Fast Attack(s)"][0]["Name"]))
await asyncio.sleep(1)
await ctx.send(":dash: {} réplique avec {}".format(poke2["Name"], poke2["Fast Attack(s)"][0]["Name"]))
await asyncio.sleep(1.2)
await ctx.send("_le combat continue de se dérouler..._")
await asyncio.sleep(1.5)
await ctx.send(":trophy: Le gagnant est **{}** !".format(winer["Name"]))
@commands.command()
async def randomcat(self, ctx):
"""Display a random cat"""
r = requests.get('http://random.cat/meow.php')
cat = str(r.json()['file'])
embed = discord.Embed(title="Meow", description="[Voir le chat plus grand]({})".format(cat), colour=0x03C9A9)
embed.set_thumbnail(url=cat)
embed.set_author(name="Random.cat", url='https://random.cat/', icon_url='http://outout.tech/tuxbot/nyancat2.gif')
await ctx.send(embed=embed)
| [
6738,
36446,
13,
2302,
1330,
9729,
201,
198,
6738,
4738,
1330,
3572,
11,
36273,
201,
198,
11748,
257,
952,
4023,
201,
198,
11748,
30351,
952,
201,
198,
11748,
36446,
201,
198,
11748,
2956,
297,
571,
13,
25927,
11,
33918,
201,
198,
117... | 2.143951 | 2,612 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
"""
Create a Marker table from an AnnData Object.
"""
from statsmodels.stats.proportion import proportions_ztest
from scipy.stats import ttest_ind
import pandas as pd
import numpy as np
from ctwingest.scanpyapi import proportion_expressed_cluster, centroids, get_expression, std_gt_0_genes
def scale_centroids(centers, max=2.5, min=-2.5):
"""avg.exp.scaled"""
scaled = centers.subtract(centers.mean(axis=1), axis=0)
scaled = scaled.divide(centers.std(axis=1), axis=0)
scaled = scaled.where(scaled <= max, other=max)
scaled = scaled.where(scaled >= min, other=min)
return scaled
def run_pipe(ad, cluster_solution_name="louvain", use_raw=True):
"""Returns a markers table from an anndata object. Looks for anndata.raw to
make metrics directly from counts. If .raw is not there then proceeds with whatever is in anndata.expression_matrix.
Metrics are t-statistic a proportions z-statistic, their pvalues and log2fc."""
# Grab the expression matrix and get ready for processing.
expression_matrix = get_expression(ad, use_raw=use_raw)
expression_matrix = expression_matrix.transpose()
expression_matrix = expression_matrix.dropna(axis='columns', how='all')
# A cluster solution is a mapping from cell->cluster.name
cluster_solution = ad.obs[cluster_solution_name]
cluster_solution = cluster_solution.dropna()
clusters = cluster_solution.unique()
print("Calculating centroids and proportions of %d samples and %d genes with %d clusters" % (
expression_matrix.shape[0], expression_matrix.shape[1], len(clusters)
))
proportions = proportion_expressed_cluster(ad, cluster_solution, use_raw=use_raw)
centroid_df = centroids(ad, cs_name=cluster_solution_name, use_raw=use_raw)
# Filter to genes that have some standard deviation across thier means
# Weak filtering intended to prevent downstream errors.
marker_genes = std_gt_0_genes(centroid_df)
centroid_df = centroid_df.loc[marker_genes]
scaled_centroid_df = scale_centroids(centroid_df)
print(
"Removing %d genes because standard deviation across means is 0"
% (expression_matrix.shape[1] - len(marker_genes))
)
print(scaled_centroid_df.head())
expression_matrix = expression_matrix[marker_genes]
# Current implementation builds one dataframe for each cluster and then concats them together.
dfs = []
for cluster_name in clusters:
print("Calculating Cluster ", cluster_name)
df = pd.DataFrame(
index=expression_matrix.columns,
#columns=["tstat", "pct.exp", "zstat", "log2fc", "zpval", "tpval", "cluster"]
#columns=["gene", "avg.exp.scaled", "pct.exp", "t-statistic", "p-value", "cluster"]
columns=["gene", "avg.exp.scaled", "pct.exp", "u-statistic", "p-value", "cluster"]
)
df['cluster'] = cluster_name
cell_names = cluster_solution.index[(cluster_solution == cluster_name).tolist()]
other_cell_names = cluster_solution.index[(cluster_solution != cluster_name).tolist()]
#pseudocount = .1
#df['log2fc'] = np.log2(expression_matrix.loc[cell_names].mean() + pseudocount) - np.log2(
# expression_matrix.loc[other_cell_names].mean() + pseudocount)
# set up for proportions z test
# expressed_in_cluster = (expression_matrix.loc[cell_names] > 0).sum()
# expressed_out_cluster = (expression_matrix.loc[other_cell_names] > 0).sum()
#out_size = len(other_cell_names)
#cluster_size = len(cell_names)
#ztest_df = pd.DataFrame([expressed_in_cluster, expressed_out_cluster])
#ztest = lambda x: proportions_ztest(
# count=[x[0], x[1]],
# nobs=[cluster_size, out_size],
# alternative='larger'
#)
#zstat_zpval = ztest_df.apply(ztest, axis='index')
#zstat = zstat_zpval.apply(lambda x: x[0])
#zpval = zstat_zpval.apply(lambda x: x[1])
from scipy.stats import mannwhitneyu
#test = lambda x: ttest_ind(x[cell_names], x[other_cell_names])
test = lambda x: mannwhitneyu(x[cell_names], x[other_cell_names])
stat_pval = expression_matrix.apply(test, axis="index")
stat = stat_pval.apply(lambda x: x[0])
pval = stat_pval.apply(lambda x: x[1])
rownames = df.index.tolist()
df["u-statistic"] = stat
df['p-value'] = pval
#df["zstat"] = zstat
#df["zpval"] = zpval
df['gene'] = rownames
df['pct.exp'] = proportions.loc[rownames, str(cluster_name)]
df['avg.exp'] = centroid_df.loc[rownames, str(cluster_name)]
df['avg.exp.scaled'] = scaled_centroid_df.loc[rownames, str(cluster_name)]
dfs.append(df)
markers_table = pd.concat(dfs, axis=0)
return markers_table
DEFAULT_LEGEND_METRICS = pd.Series(["avg.exp", "avg.exp.scaled", "pct.exp"])
| [
37811,
198,
16447,
257,
2940,
263,
3084,
422,
281,
5506,
6601,
9515,
13,
198,
37811,
198,
6738,
9756,
27530,
13,
34242,
13,
1676,
16864,
1330,
23250,
62,
89,
9288,
198,
6738,
629,
541,
88,
13,
34242,
1330,
256,
9288,
62,
521,
198,
1... | 2.415846 | 2,032 |
#################################################################
# Copyright (C) #
# 2019 Qiskit Team #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#################################################################
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, optimizers
from typing import Dict, Tuple, Sequence, List
import copy
from Agent.network.nets import *
class dqn:
"""
Deep Q Network
Action Space: {x1, x2, y1, y2, z1, z2, h1, h2, c12, c21}
Attribute
self.num_qubits:
self.input_dim:
Methods
parse_action: convert 0 to 9 to specific gate and its argument
"""
# convert 1 * 2^n array into 2 * 2^n array
| [
29113,
29113,
2,
198,
2,
15069,
357,
34,
8,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.504298 | 349 |
import numpy as np, time
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dropout, concatenate
from keras.layers.convolutional import Conv2D, Conv2DTranspose, Conv3D, Conv3DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D, MaxPooling3D
from keras.layers.merge import concatenate
from keras.utils import plot_model
| [
11748,
299,
32152,
355,
45941,
11,
640,
198,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
11,
3440,
62,
19849,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
23412,
11,
347,
963,
26447,
1634,
11,
13144,
341,
11,
14258,
448,
11,
1673,
3... | 3.101563 | 128 |
from django.contrib import admin
from . import models
admin.site.register(models.Address, AddressAdmin)
admin.site.register(models.CreditCard, CreditCardAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
1330,
4981,
198,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
20231,
11,
17917,
46787,
8,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
23690,
16962,
11,
10504,
1696... | 3.577778 | 45 |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_base_instance'
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLuint)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.POINTER(_cs.void),_cs.GLsizei,_cs.GLuint)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.POINTER(_cs.void),_cs.GLsizei,_cs.GLint,_cs.GLuint)
def glInitBaseInstanceARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| [
7061,
6,
16541,
519,
877,
515,
416,
651,
62,
4743,
62,
2302,
5736,
4226,
11,
466,
407,
4370,
0,
7061,
6,
198,
6738,
30672,
1330,
3859,
355,
4808,
79,
11,
38491,
355,
4808,
6359,
11,
26515,
198,
6738,
30672,
13,
8763,
1330,
1278,
1... | 2.462366 | 279 |
import configparser
import requests
import time
import numpy as np
from PIL import Image
from io import BytesIO
import configparser
from code import CodeRecognizer
from utils import *
if __name__ == "__main__":
cf = configparser.ConfigParser()
cf.read("info.conf")
while process_orders(cf):
time.sleep(2) | [
11748,
4566,
48610,
198,
11748,
7007,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
11748,
4566,
48610,
198,
198,
6738,
2438,
1330,
6127,
6690,
2360,
... | 2.938053 | 113 |
#!/usr/bin/env python
'''
A solution to a ROSALIND bioinformatics problem.
Problem Title: Finding a Motif in DNA
Rosalind ID: SUBS
Rosalind #: 009
URL: http://rosalind.info/problems/subs/
'''
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
32,
4610,
284,
257,
48263,
1847,
12115,
13401,
259,
18982,
873,
1917,
13,
198,
40781,
11851,
25,
27063,
257,
6543,
361,
287,
7446,
198,
35740,
282,
521,
4522,
25,
13558,
... | 2.566667 | 90 |
from .wrapper import CDataReader | [
6738,
764,
48553,
1330,
6458,
1045,
33634
] | 4.571429 | 7 |
from django.db import models
from .timestamp import TimeStamp
# from .user import UserProfile
from customer.models import UserProfile
from .item import Item
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
764,
16514,
27823,
1330,
3862,
1273,
696,
198,
2,
422,
764,
7220,
1330,
11787,
37046,
198,
6738,
6491,
13,
27530,
1330,
11787,
37046,
198,
6738,
764,
9186,
1330,
9097,
198
] | 3.95 | 40 |
#!/usr/bin/env python
import os
import random
from google.protobuf import text_format
from google.protobuf.descriptor import Descriptor, FieldDescriptor
from .document_pb2 import Document
from dremel.consts import *
from dremel.simple import create_simple_storage
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
4738,
198,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
2420,
62,
18982,
198,
6738,
23645,
13,
11235,
672,
3046,
13,
20147,
1968,
273,
1330,
2935,
6519,
... | 3.308642 | 81 |
print("Way to transform two lists into one dict")
purchases = ["rice", "beans", "pasta"]
prices = ["2.00", "3.80", "4.90"]
new_list = {}
# for x in range(len(purchases)):
# new_list[purchases[x]] = prices[x]
# for id, item in enumerate(compras):
# new_list[purchases[id]] = prices[id]
new_list = {item: prices[purchases.index(item)] for item in purchases}
# new_list = dict(zip(purchases, prices))
print(new_list)
| [
4798,
7203,
25309,
284,
6121,
734,
8341,
656,
530,
8633,
4943,
198,
198,
79,
2575,
1386,
796,
14631,
20970,
1600,
366,
44749,
1600,
366,
30119,
64,
8973,
198,
1050,
1063,
796,
14631,
17,
13,
405,
1600,
366,
18,
13,
1795,
1600,
366,
... | 2.468208 | 173 |
'''Plotting Utility.
Grad-CAM implementation in Pytorch
Reference:
[1] xyz
[2] xyz
'''
import matplotlib.pyplot as plt
import numpy as np
import torch
def denormalize(tensor, mean, std):
"""Denormalize the image for given mean and standard deviation.
Args:
tensor: Image tensor
mean: Dataset mean
std: Dataset standard deviation
Returns:
tensor
Raises:
No Exception
"""
if not tensor.ndimension() == 4:
raise TypeError('tensor should be 4D')
mean = torch.FloatTensor(mean).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
std = torch.FloatTensor(std).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
return tensor.mul(std).add(mean)
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.5404, 0.5918, 0.6219])
std = np.array([0.2771, 0.2576, 0.2998])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title[:4])
plt.pause(0.001) # pause a bit so that plots are updated
| [
7061,
6,
43328,
889,
34030,
13,
198,
198,
42731,
12,
34,
2390,
7822,
287,
9485,
13165,
354,
198,
198,
26687,
25,
198,
58,
16,
60,
2124,
45579,
198,
58,
17,
60,
2124,
45579,
198,
7061,
6,
198,
198,
11748,
2603,
29487,
8019,
13,
907... | 2.242 | 500 |
# coding:utf-8
import os
import subprocess
if __name__ == "__main__":
j = JavaImage(codeDir=os.path.abspath(os.curdir), shell_file="javapack.sh",
imageTag="harbor.dev.21vianet.com/cmdb/cmdb_javatopo:latest")
j.begin()
# print()
| [
2,
19617,
25,
40477,
12,
23,
198,
11748,
28686,
198,
11748,
850,
14681,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
474,
796,
7349,
5159,
7,
8189,
35277,
28,
418,
13,
6978,
13,
397,
... | 2.071429 | 126 |
from django import forms
from uploads.core.models import Document
#from uploads.core.models import File
# 創造一個依照model的form,會繼承欄位description document
# class FileForm(forms.ModelForm):
# class Meta:
# model = File
# fields = ('filename',)
# file = forms.FileField()
# pid = forms.CharField(max_length=20)
# name = forms.CharField(max_length=20)
# sex = forms.CharField()
# age = forms.IntegerField()
# mp = forms.IntegerField()
# scanType = forms.CharField(max_length=10)
# fracture = forms.IntegerField()
# tscore = forms.CharField()
# zscore = forms.CharField()
# region = forms.CharField()
# lva = forms.CharField()
# apspine = forms.CharField()
# dualfemur = forms.CharField()
# combination = forms.CharField() | [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
9516,
82,
13,
7295,
13,
27530,
1330,
16854,
198,
2,
6738,
9516,
82,
13,
7295,
13,
27530,
1330,
9220,
198,
198,
2,
10263,
231,
113,
34460,
254,
31660,
161,
222,
233,
160,
122,
251,
163,... | 2.47205 | 322 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pathway_file = '../../drp-data/pathways/9606.enrichr_pathway.edge'
pathway = pd.read_csv(pathway_file, sep='\t', header=None)
print("pathways:", pathway[0].nunique())
print("pathway genes:", pathway[1].nunique())
gsc_filtered = '../../KnowEng_GSC/GSC_10mod/drawr_filtered/DraWR_GSC_Enrichr_STRINGExp.xlsx'
ppi_file = '../../drp-data/pathways/9606.STRING_experimental.edge'
ppi = pd.read_csv(ppi_file, sep='\t', header=None)
print("PPI original edges:", len(ppi))
ppi['norm_score'] = ppi[2]/ppi[2].max()
ppi = ppi.loc[ppi['norm_score'] > 0.5]
print("PPI filtered edges:", len(ppi))
nodes = list(set(ppi[0]).union(set(ppi[1])))
print("PPI nodes:", len(nodes) )
folder = 'CX_ens10'
mean_attribution_file = 'results/CX_ens10/all_attributions.csv'
feature_attr = pd.read_csv(mean_attribution_file, index_col=0)
top_genes_file = 'results/CX_ens10/top_genes_mean_aggregation_info.xlsx'
writer_a = pd.ExcelWriter('results/%s/one_hop.xlsx'%folder, engine='xlsxwriter')
drugs = [
'bleomycin',
'cisplatin',
'cyclophosphamide',
'docetaxel',
'doxorubicin',
'etoposide',
'gemcitabine',
'irinotecan',
'oxaliplatin',
'paclitaxel',
'pemetrexed',
'tamoxifen',
'temozolomide',
'vinorelbine']
# use dictionary coz it's faster
conv_file = '../../drp-data/lists/hgnc2ensembl.txt'
f = open(conv_file, 'r')
conv_table = {}
for line in f:
line = line.strip().split(',')
if line[1] != "":
conv_table[line[0]] = line[1]
# print(conv_table)
for drug in drugs:
gsc_pathways = pd.read_excel(gsc_filtered, sheet_name=drug, index_col='property_gene_set_id')
pathway_genes = pathway.loc[pathway[0].isin(gsc_pathways.index)][1].unique()
top_features = pd.read_excel(top_genes_file, sheet_name=drug, index_col='ensembl')
one_hop_from_top_feats_left = ppi.loc[ppi[0].isin(top_features.index)][1]
one_hop_from_top_feats_right = ppi.loc[ppi[1].isin(top_features.index)][0]
one_hop_from_top_feats = set(one_hop_from_top_feats_left).union(set(one_hop_from_top_feats_right))
one_hop_from_pathway_left = ppi.loc[ppi[0].isin(pathway_genes)][1]
one_hop_from_pathway_right = ppi.loc[ppi[1].isin(pathway_genes)][0]
one_hop_from_pathway = set(one_hop_from_pathway_left).union(set(one_hop_from_pathway_right))
one_hop = one_hop_from_top_feats.union(one_hop_from_pathway)
nodes_of_interest = set(top_features.index).union(set(pathway_genes)).union(one_hop)
features = feature_attr[drug].sort_values(ascending=False).index
ranks = pd.Series(range(1, len(features) + 1), index=features)
paths = list(gsc_pathways.index)
cols = ['hgnc', 'is_feature', 'attribution', 'rank',
'is_top_feat', 'is_1H_from_pathway',
'is_1H_from_top_feat'] + paths
df = pd.DataFrame(columns=cols)
print(drug)
print('nodes of interest:', len(nodes_of_interest))
for node in nodes_of_interest:
info = {"hgnc": node}
if node in conv_table:
info['hgnc'] = conv_table[node]
if node in features:
info['attribution'] = feature_attr.loc[node][drug]
info['rank'] = ranks[node]
info['is_feature'] = 1
else:
info['attribution'] = np.nan
info['rank'] = np.nan
info['is_feature'] = 0
info['is_1H_from_pathway'] = 1*(node in one_hop_from_pathway)
info['is_1H_from_top_feat'] = 1*(node in one_hop_from_top_feats)
info['is_top_feat'] = 1*(node in top_features.index)
for path in paths:
info[path] = 1*(node in (pathway.loc[pathway[0] == path][1].unique()))
df.loc[node] = info
df['score'] = 0.5*(df['is_1H_from_pathway'] + df['is_1H_from_top_feat']) + df['is_top_feat'] + 1*(df[paths].sum(axis=1) > 0)
# df['score'] = df['is_1H_from_top_feat']*0.5*(df['is_1H_from_top_feat']==0) + df['is_1H_from_top_feat'] \
# + 1*(df[paths].sum(axis=1) > 0) + (df[paths].sum(axis=1) == 0)*0.5*df['is_1H_from_pathway']
df = df.sort_values(['score', 'rank'],ascending=[False, True])
# df = df.sort_values('rank')
df.to_excel(writer_a, sheet_name=drug)
desc = {
'hgnc':'HGNC gene name',
'is_feature': 'gene is used as a feature by the model',
'attribution': 'attribution value for the feature/gene',
'rank': 'ranking of the attribution value for the feature/gene',
'is_top_feat': '1 if the feature/gene is in the top features found by kneedle method',
'is_1H_from_pathway': '1 if the gene is an immediate neighbor of a member of any of our pathways-of-interest',
'is_1H_from_top_feat': '1 if the gene is an immediate neighbor of a top feature/gene',
'score': 'arbitrary scoring for sorting (0.5*(is_1H_from_pathway+is_1H_from_top_feat) + is_top_feat + is_a_pathway_member)',
'other columns': '1 if the gene is a member of the specific pathway'
}
# df = pd.Series(df)
df = pd.DataFrame(index=desc.keys())
df['description'] = desc.values()
df.to_excel(writer_a, sheet_name='legend')
writer_a.save()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
6978,
1014,
62,
7753,
796,
705,
40720,
40720,
7109,
79,
12,
7890,
14,
6978,
1322,
14,
24,
3... | 2.210276 | 2,316 |
import os
import platform
if(platform.system() == 'Windows'):
FILENAME = 'terminateFile.bat'
elif(platform.system() == 'Linux'):
FILENAME = 'idle.sh'
TIME = 900
PATH = os.getcwd()
# print(platform.system()) | [
11748,
28686,
198,
11748,
3859,
198,
198,
361,
7,
24254,
13,
10057,
3419,
6624,
705,
11209,
6,
2599,
198,
220,
220,
220,
34020,
1677,
10067,
796,
705,
23705,
378,
8979,
13,
8664,
6,
198,
417,
361,
7,
24254,
13,
10057,
3419,
6624,
70... | 2.654321 | 81 |
import sys
import numpy as np
import itertools
import deeptrack as dt
import pytest
u = dt.units
@pytest.mark.parametrize(
"size,gpu",
[
*itertools.product(
(64, 256, 512),
[True, False],
)
],
)
| [
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
340,
861,
10141,
198,
11748,
390,
19598,
39638,
355,
288,
83,
198,
11748,
12972,
9288,
628,
198,
84,
796,
288,
83,
13,
41667,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
... | 1.953488 | 129 |
# helper functions for Euca2ools
import subprocess
import os
import time
| [
2,
31904,
5499,
329,
412,
43120,
17,
10141,
198,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
11748,
640,
198
] | 3.7 | 20 |
"""ClaudiusIrae song logic."""
import OSC
from Psc2.songs import song
from Psc2.modes import bass_doubler
from Psc2.modes import looper
class ClaudiusIrae(song.Song):
"""This defines the logic for ClaudiusIrae.
For most of the song it is in bass-doubling mode, except for the solo section
where the bass is automated.
"""
def __init__(self, client):
"""Initialize the ClaudiusIrae Song.
Args:
client: OSCClient, used to send messages for playback.
"""
self.client = client
self.eighth_note_duration = 0.5
self.avg_velocity = 60
self.modes = {
'doubler': bass_doubler.BassDoubler(client, highest_bass_note=54),
'solo': looper.Looper(client,
[[(45, 5, 5), (52, 3, 3), (50, 5, 5), (57, 3, 3),
(52, 5, 5), (59, 3, 3), (61, 5, 5), (57, 3, 3),
(50, 5, 5), (57, 3, 3), (53, 5, 5), (57, 3, 3),
(56, 5, 5), (52, 3, 3), (49, 5, 5), (52, 3, 3)
]],
eigths_per_tap=4)
}
self.current_mode = 'doubler'
self.modes_to_process = ['doubler'] # Add 'solo' to auto-detect solo sect.
self.mode_detected = None
def process_program(self, program):
"""Process program hits (footpedal)."""
if self.current_mode == 'solo':
self.modes['solo'].increment_loop()
elif program == 0: # Tap to set tempo.
self.modes['solo'].set_tempo()
else: # Start bass for solo.
msg = OSC.OSCMessage()
msg.setAddress('/allnotesoff')
self.client.send(msg)
self.modes_to_process = []
self.current_mode = 'solo'
self.modes['solo'].start_looper_thread()
| [
37811,
2601,
3885,
3754,
40,
430,
68,
3496,
9156,
526,
15931,
198,
198,
11748,
440,
6173,
198,
198,
6738,
350,
1416,
17,
13,
82,
28079,
1330,
3496,
198,
6738,
350,
1416,
17,
13,
76,
4147,
1330,
12702,
62,
67,
12944,
1754,
198,
6738,... | 2.02439 | 861 |
import random
from emulators.Device import Device
from emulators.Medium import Medium
from emulators.MessageStub import MessageStub
# We extend the MessageStub here for the message-types we wish to communicate
# the constructor-function takes the source and destination as arguments. These are used for "routing" but also
# for pretty-printing. Here we also take the specific flag of "is_ping"
# remember to implement the __str__ method such that the debug of the framework works!
# This class extends on the basic Device class. We will implement the protocol in the run method
# The constructor must have exactly this form.
# this method implements the actual algorithm
# for pretty-printing and debugging, implement this function
| [
11748,
4738,
198,
198,
6738,
795,
24325,
13,
24728,
1330,
16232,
198,
6738,
795,
24325,
13,
31205,
1330,
13398,
198,
6738,
795,
24325,
13,
12837,
1273,
549,
1330,
16000,
1273,
549,
628,
198,
2,
775,
9117,
262,
16000,
1273,
549,
994,
3... | 4.140541 | 185 |
#!/usr/bin/env python
import setpath
import unittest
from rename import rename
import compiler
from bike import testdata
from bike.testutils import*
from bike.transformer.save import save
# Generic tests. These tests are designed to be run in the context of a ui
# and in a package hierarchy structure
# tests that cover stuff not renamed automatically
# (I.e. are renamed after user manually expresses desire to do so)
# template method
MethodTestdata = trimLines("""
class TheClass:
def theMethod(self):
pass
def differentMethod(self):
pass
class DifferentClass:
def theMethod(self):
pass
""")
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
900,
6978,
198,
11748,
555,
715,
395,
198,
6738,
36265,
1330,
36265,
198,
11748,
17050,
198,
6738,
7161,
1330,
1332,
7890,
198,
198,
6738,
7161,
13,
9288,
26791,
1330,
9,
198,
1... | 3.064378 | 233 |
from rest_framework import serializers
from care.users.models import District, LocalBody, State, Ward, Block
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
1337,
13,
18417,
13,
27530,
1330,
5665,
11,
10714,
25842,
11,
1812,
11,
12150,
11,
9726,
628,
628,
628,
198
] | 3.866667 | 30 |
import ijson, csv, json, datetime
import sys
sys.path.append('../lib')
from accounts.company import Company
if __name__ == '__main__':
main() | [
11748,
1312,
17752,
11,
269,
21370,
11,
33918,
11,
4818,
8079,
198,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
8019,
11537,
198,
198,
6738,
5504,
13,
39722,
1330,
5834,
628,
628,
198,
361,
11593,
3672,
834,
... | 2.921569 | 51 |
import numpy as np
import random
import re
import copy
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
4866,
628
] | 3.733333 | 15 |
# -*- coding: utf-8 -*-
__version__ = '1.1.15'
from .filemaker import create_files # noqa
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9641,
834,
796,
705,
16,
13,
16,
13,
1314,
6,
198,
198,
6738,
764,
7753,
10297,
1330,
2251,
62,
16624,
220,
220,
220,
220,
1303,
645,
20402,
198
] | 2.181818 | 44 |
from dataclasses import dataclass
from functools import partial
from typing import Callable, List, Optional
from colassigner.constants import PREFIX_SEP
from ...utils import chainmap
from .artifact_metadata import ArtifactMetadata
from .column import Column
from .feature_types import CompositeFeature, ForeignKey, PrimitiveFeature
from .namespace_metadata import NamespaceMetadata
from .namespaced_id import NamespacedId
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
19720,
1330,
4889,
540,
11,
7343,
11,
32233,
198,
198,
6738,
951,
562,
570,
263,
13,
9979,
1187,
1330,
22814,
47084,
62,
5188,
47,
19... | 3.901786 | 112 |
from jinfo.utils.one_hot_dna import one_hot_dna
from jinfo.utils.random_DNASeq import random_DNASeq
from jinfo.utils.DNASeq_from_NCBI import DNASeq_from_NCBI
from jinfo.utils.seq_list_to_fasta import seq_list_to_fasta
from jinfo.utils.seq_list_from_fasta import seq_list_from_fasta
from jinfo.utils.seq_from_fasta import seq_from_fasta
from jinfo.utils.alignment_from_fasta import alignment_from_fasta
from jinfo.utils.multialign import multialign
from jinfo.utils.calc_phylo_tree import calc_phylo_tree
from jinfo.utils.percentage_identity import percentage_identity
from jinfo.utils.remove_degenerate_seqs import remove_degenerate_seqs | [
6738,
474,
10951,
13,
26791,
13,
505,
62,
8940,
62,
67,
2616,
1330,
530,
62,
8940,
62,
67,
2616,
198,
6738,
474,
10951,
13,
26791,
13,
25120,
62,
35,
18293,
27363,
1330,
4738,
62,
35,
18293,
27363,
198,
6738,
474,
10951,
13,
26791,
... | 2.79386 | 228 |
from multiprocessing import Pool
from random import randrange
from absl import logging
from patterns.command.action import Action
from patterns.command.callback.handler import Callback
| [
6738,
18540,
305,
919,
278,
1330,
19850,
198,
6738,
4738,
1330,
43720,
9521,
198,
198,
6738,
2352,
75,
1330,
18931,
198,
198,
6738,
7572,
13,
21812,
13,
2673,
1330,
7561,
198,
6738,
7572,
13,
21812,
13,
47423,
13,
30281,
1330,
4889,
1... | 4.318182 | 44 |
'''
Created on February 25, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
class LocationMidnightMicroservice(Intelligence):
"""
Announce midnight throughout the microservices framework
"""
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
if schedule_id == "MIDNIGHT":
self.parent.distribute_datastream_message(botengine, "midnight_fired", None, internal=True, external=False)
| [
7061,
6,
198,
41972,
319,
3945,
1679,
11,
13130,
198,
198,
1212,
2393,
318,
2426,
284,
262,
2846,
290,
3403,
5447,
287,
262,
198,
7753,
705,
43,
2149,
24290,
13,
14116,
3256,
543,
318,
636,
286,
428,
2723,
2438,
5301,
13,
198,
198,
... | 3.238281 | 256 |
import hashlib
from typing import List
| [
11748,
12234,
8019,
198,
6738,
19720,
1330,
7343,
628
] | 4.444444 | 9 |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 20, 2015
@author: senrs
based on alfoa design
"""
from __future__ import division, print_function, unicode_literals, absolute_import
#External Modules------------------------------------------------------------------------------------
import abc
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ..utils import utils, InputData
from ..BaseClasses import MessageUser
from .. import MessageHandler
#Internal Modules End--------------------------------------------------------------------------------
class Assembler(MessageUser):
"""
Assembler class is used as base class for all the objects that need, for initialization purposes,
to get pointers (links) of other objects at the Simulation stage (Simulation.run() method)
"""
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.type = self.__class__.__name__ # type
self.name = self.__class__.__name__ # name
self.assemblerObjects = {} # {MainClassName(e.g.Distributions):[class(e.g.Models),type(e.g.ROM),objectName]}
# where name_list is the tokens required (if check_number is True)
# and number_list is a list of InputData.Quantity for the number required
self._requiredAsmbObject = [False, [], []]
self.assemblerDict = {} # {'class':[['class','type','name',instance]]}}
# list. first entry boolean flag. True if the XML parser must look for objects;
# second entry tuple.first entry list of object can be retrieved, second entry multiplicity (-1,-2,-n means optional (max 1 object,2 object, no number limit))
def whatDoINeed(self):
"""
This method is used mainly by the Simulation class at the Step construction stage.
It is used for inquiring the class, which is implementing the method, about the kind of objects the class needs to
be initialize.
@ In, None
@ Out, needDict, dict, dictionary of objects needed (class:tuple(object type{if None, Simulation does not check the type}, object name))
"""
if '_localWhatDoINeed' in dir(self):
needDict = self._localWhatDoINeed()
else:
needDict = {}
for val in self.assemblerObjects.values():
for value in val:
if value[0] not in needDict.keys():
needDict[value[0]] = []
needDict[value[0]].append((value[1],value[2]))
return needDict
def generateAssembler(self, initDict):
"""
This method is used mainly by the Simulation class at the Step construction stage.
It is used for sending to the instanciated class, which is implementing the method, the objects that have been requested through "whatDoINeed" method
It is an abstract method -> It must be implemented in the derived class!
@ In, initDict, dict, dictionary ({'mainClassName(e.g., Databases):{specializedObjectName(e.g.,DatabaseForSystemCodeNamedWolf):ObjectInstance}'})
@ Out, None
"""
if '_localGenerateAssembler' in dir(self):
self._localGenerateAssembler(initDict)
for key, value in self.assemblerObjects.items():
self.assemblerDict[key] = []
for entity, etype, name in value:
self.assemblerDict[key].append([entity, etype, name, initDict[entity][name]])
def _readAssemblerObjects(self, subXmlNode, found, testObjects):
"""
This method is used to look for the assemble objects in an subNodes of an xmlNode
@ In, subXmlNode, ET, the XML node that needs to be inquired
@ In, found, dict, a dictionary that check if all the tokens (requested) are found
@ In, testObjects, dict, a dictionary that contains the number of time a token (requested) has been found
@ Out, returnObject, tuple, tuple(found, testObjects) containing in [0], found -> a dictionary that check if all the tokens (requested) are found ;
[1], testObjects -> a dictionary that contains the number of time a token (requested) has been found
"""
for subNode in subXmlNode:
for token in self._requiredAsmbObject[1]:
if subNode.tag == token:
found[token] = True
if 'class' not in subNode.attrib.keys():
self.raiseAnError(IOError, 'In '+self.type+' Object ' + self.name+ ', block ' + subNode.tag + ' does not have the attribute class!!')
tag = subNode.tag.strip()
if tag not in self.assemblerObjects:
self.assemblerObjects[tag] = []
# check if already present
entry = [subNode.attrib['class'],subNode.attrib['type'],subNode.text.strip()]
if entry not in self.assemblerObjects.get(tag, []):
self.assemblerObjects[tag].append(entry)
testObjects[token] += 1
returnObject = found, testObjects
return returnObject
def _readMoreXML(self, xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class
and initialize some variables based on the inputs got. This method is used to automatically generate the Assembler 'request'
based on the input of the daughter class.
@ In, self, Any, an instance of the class to read into this.
@ In, xmlNode, xml.etree.ElementTree.Element, XML element node that represents the portion of the input that belongs to this class
@ Out, None
"""
self.type = xmlNode.tag
if 'name' in xmlNode.attrib:
self.name = xmlNode.attrib['name']
if 'verbosity' in xmlNode.attrib.keys():
self.verbosity = xmlNode.attrib['verbosity'].lower()
#XXX Once InputData checks numbers of subnodes, everything in this
# if block can be removed
if self._requiredAsmbObject[0]:
testObjects = {}
for token in self._requiredAsmbObject[1]:
testObjects[token] = 0
found = dict.fromkeys(testObjects.keys(),False)
found, testObjects = self._readAssemblerObjects(xmlNode, found, testObjects)
for subNode in xmlNode:
found, testObjects = self._readAssemblerObjects(subNode, found, testObjects)
for i,token in enumerate(self._requiredAsmbObject[1]):
quantity = self._requiredAsmbObject[2][i]
if not InputData.checkQuantity(quantity, testObjects[token]):
self.raiseAnError(IOError, 'the object '+token+' has wrong quantity Expected: '+str(quantity)+' Found: '+str(testObjects[token])+ ' in block '+self.name)
if '_handleInput' in dir(self) and self._handleInput.__func__.__qualname__.split(".")[0] == self.__class__.__name__:
#_handleInput in class and not from superclass
#print(self, self.getInputSpecification, self.getInputSpecification.__func__.__qualname__, self._handleInput, self._handleInput.__func__.__qualname__)
paramInput = self.getInputSpecification()()
paramInput.parseNode(xmlNode)
self._handleInput(paramInput)
elif '_localReadMoreXML' in dir(self):
self._localReadMoreXML(xmlNode)
def addAssemblerObject(self, name, flag):
"""
Method to add required assembler objects to the _requiredAsmbObject dictionary.
@ In, name, string, the node name to search for (e.g. Function, Model)
@ In, flag, InputData.Quantity, the number of nodes to look for
@ Out, None
"""
self._requiredAsmbObject[0] = True
self._requiredAsmbObject[1].append(name)
self._requiredAsmbObject[2].append(flag)
def retrieveObjectFromAssemblerDict(self, objectMainClass, objectName, pop=False):
"""
Method to retrieve an object from the assembler
@ In, objectName, str, the object name that needs to be retrieved
@ In, objectMainClass, str, the object main Class name (e.g. Input, Model, etc.) of the object that needs to be retrieved
@ In, pop, bool, optional, if found, pop it out (i.e. remove it from the self.assemblerDict?). Default = False
@ Out, assemblerObject, instance, the instance requested (None if not found)
"""
assemblerObject = None
if objectMainClass in self.assemblerDict.keys():
for assemblerObj in self.assemblerDict[objectMainClass]:
if objectName == assemblerObj[2]:
assemblerObject = assemblerObj[3]
break
if pop and assemblerObject is not None:
self.assemblerDict[objectMainClass].remove(assemblerObj)
if assemblerObject is None:
self.raiseAnError(IOError, 'Required Object: ', objectName, 'is not found among', objectMainClass)
return assemblerObject
| [
2,
15069,
2177,
12350,
13485,
6682,
10302,
11,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.929184 | 3,149 |
from rest_framework import serializers
from goods.models import SKU
from orders.models import OrderInfo, OrderGoods
class SKUSerializer(serializers.ModelSerializer):
'''SKU'''
class OrderGoodsSerialzier(serializers.ModelSerializer):
"""
订单商品表
"""
sku = SKUSerializer()
class OrderSerializer(serializers.ModelSerializer):
"""
订单表序列化
"""
user = serializers.StringRelatedField(read_only=True)
address = serializers.StringRelatedField(read_only=True)
skus = OrderGoodsSerialzier(many=True) | [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
7017,
13,
27530,
1330,
14277,
52,
198,
6738,
6266,
13,
27530,
1330,
8284,
12360,
11,
8284,
10248,
82,
198,
198,
4871,
14277,
2937,
48499,
7509,
7,
46911,
11341,
13,
17633,
32634... | 2.426087 | 230 |
#!/usr/bin/env python
#
# This application creates a Name Server, Event Server,
# Pyro server, and clients, and uses a custom event loop to keep them
# all running in parallel.
# The custom loop runs in its own server thread otherwise we
# can't run client invocations, obviously.
# The main loop calls Pyro objects to set some artificial
# properties. Those objects publish those events on a ES channel,
# on which an event listener is subscribed. That listener prints
# the events that it receives.
#
import time
import random
import string
import Pyro.naming
import Pyro.EventService.Server
from Pyro.EventService.Clients import Publisher, Subscriber
from Pyro.errors import *
import Pyro.util
import select
from threading import Thread
####################### EVENT SERVER LISTENER & PUBLISHER #################
################ Multi-purpose monolithic server. #####################
# handles all socket events from NS, ES, Pyro daemon.
############################# MAIN LOOP #############################
if __name__=="__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
770,
3586,
8075,
257,
6530,
9652,
11,
8558,
9652,
11,
198,
2,
44954,
4382,
11,
290,
7534,
11,
290,
3544,
257,
2183,
1785,
9052,
284,
1394,
606,
198,
2,
477,
2491,
287,
... | 4.081081 | 259 |
# Make it run from the examples directory
import sys
sys.path.append("..")
from liquer import *
@first_command
@command
# with default delimiters
print (evaluate_template("""
Template example [[]]
- $hello$
- $hello/greet$
- $hello/greet-everybody$
"""))
# with custom delimiters
print (evaluate_template("""
Template example $$$
- [[hello]]
- [[hello/greet]]
- [[hello/greet-everybody]]
""","[[","]]")) | [
2,
6889,
340,
1057,
422,
262,
6096,
8619,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
492,
4943,
198,
198,
6738,
14756,
263,
1330,
1635,
198,
198,
31,
11085,
62,
21812,
198,
198,
31,
21812,
198,
198,
2,
351,
4277,
4672... | 2.921986 | 141 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |