content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# For large systems, each fuel market tier is a category of capacity expansion, and
# it can be built fractionally. For small systems, each fuel market tier is one
# capacity-expansion project, and it must be fully built and/or activated each period.
# To do this, we add binary variables and confine additions and activations to match them.
# Each tier has a capital cost and duration (locked in if it is developed)
# and a fixed and variable cost. Variable costs are already shown in fuel_markets.py,
# and this module adds fixed costs (some economies of scale, but assuming 100% salvage
# value at all times, i.e., projects can be deactivated without losing any capital cost.)
# Later we may add a more complete capital cost system.
import os
from pyomo.environ import *
inf = float('inf')
| [
2,
1114,
1588,
3341,
11,
1123,
5252,
1910,
14249,
318,
257,
6536,
286,
5339,
7118,
11,
290,
198,
2,
340,
460,
307,
3170,
13390,
453,
13,
1114,
1402,
3341,
11,
1123,
5252,
1910,
14249,
318,
530,
220,
198,
2,
5339,
12,
11201,
5487,
... | 4.103093 | 194 |
from PyQt5.QtCore import pyqtSignal, Qt, QSize
from PyQt5.QtWidgets import QLabel, QListWidget, QListWidgetItem, QFileDialog
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QStyle, QSizePolicy, QPushButton, QAbstractItemView
# For testing individual widget
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
main = FilePicker()
main.show()
exit(app.exec_())
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
12972,
39568,
11712,
282,
11,
33734,
11,
1195,
10699,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
33986,
11,
1195,
8053,
38300,
11,
1195,
8053,
38300,
7449,
... | 2.622857 | 175 |
"""
This module contains some utility callbacks for Keras training.
"""
# System
from time import time
# Externals
import tensorflow as tf
class TimingCallback(tf.keras.callbacks.Callback):
"""A Keras Callback which records the time of each epoch"""
#class LearningRateScheduleCallback(tf.keras.callbacks.Callback):
# def __init__(self, multiplier,
# start_epoch=0, end_epoch=None,
# momentum_correction=True):
# super().__init__()
# self.start_epoch = start_epoch
# self.end_epoch = end_epoch
# self.momentum_correction = momentum_correction
# self.initial_lr = None
# self.restore_momentum = None
| [
37811,
198,
1212,
8265,
4909,
617,
10361,
869,
10146,
329,
17337,
292,
3047,
13,
198,
37811,
198,
198,
2,
4482,
198,
6738,
640,
1330,
640,
198,
198,
2,
1475,
759,
874,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
4871,
5045,
... | 2.469314 | 277 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
"""
Otter twisted application plugins for the various services.
"""
from twisted.application.service import ServiceMaker
OtterMetrics = ServiceMaker(
"Otter Metrics Collector",
"otter.metrics",
"Collects metrics for a region on an interval basis",
"otter-metrics"
)
| [
37811,
198,
46,
83,
353,
19074,
3586,
20652,
329,
262,
2972,
2594,
13,
198,
37811,
198,
198,
6738,
19074,
13,
31438,
13,
15271,
1330,
4809,
48890,
198,
198,
46,
83,
353,
9171,
10466,
796,
4809,
48890,
7,
198,
220,
220,
220,
366,
46,... | 3.166667 | 90 |
import os
import shutil
import requests
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
7007,
628,
628,
628,
628,
628,
198
] | 3.333333 | 15 |
import timeit
from typing import Any, Dict, Iterable, Mapping, Callable, Optional, Set
from models.solver import Solver
import pandas as pd
def get_stats_df(
solvers: Iterable[Solver],
constructive: Optional[Callable[..., Set[int]]],
local_search: Callable[..., Set[int]],
args: Mapping[str, Any]
) -> pd.DataFrame:
'''
TODO: Refactor to include Solution class.
Formats the statistics of each solver as a DataFrame.
Both `constructive` and `local_search` need to be a method of `Solver` class,
e.g. `constructive=Solver.pdp_based`.
`args` is a dictionary of custom arguments for local search methods.
'''
data = list()
for solver in solvers:
start = timeit.default_timer()
if constructive:
constructive(solver)
constructive_name = constructive.__name__
else:
solver.set_random_solution()
constructive_time = 0
constructive_name = "random"
constructive_time = timeit.default_timer() - start
constructive_of = solver.objective_function
start = timeit.default_timer()
if local_search:
local_search(solver, **args)
local_search_time = timeit.default_timer() - start
strategy = 'first' if args['is_first'] else 'best'
local_search_name = f"{local_search.__name__}_{strategy}_{args['k']}"
data.append((
solver.instance.n,
solver.p,
solver.alpha,
constructive_name,
constructive_of,
constructive_time,
local_search_name,
solver.objective_function,
local_search_time
))
common_cols = ('heuristic', 'OF', 'seconds')
df = pd.DataFrame(
data,
columns=(
'n', 'p', 'alpha',
*common_cols * 2
)
)
params = df.loc[:, ('n', 'p', 'alpha')]
# create column multiindex
params = pd.concat({'instance': params}, axis=1)
stats = (
# create column multiindexes
pd.concat(
{
'constructive': df.iloc[:, [3, 4, 5]],
'local search': df.iloc[:, [6, 7, 8]]
},
axis=1
)
.join(params)
# reorder multiindexes and columns
.loc[:, (
('instance', 'constructive', 'local search'),
('n', 'p', 'alpha', *common_cols)
)]
)
stats['improvement', 'absolute'] = stats['constructive', 'OF'] - stats['local search', 'OF']
stats['improvement', 'relative %'] = (stats['improvement', 'absolute'] / stats['constructive', 'OF']) * 100
return stats
def add_improvement_stats(dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Adds how many improvements were made and the average of results.
`dataframe` needs to be the return value of `get_stats_df`,
but filtered by instance paramaters.
'''
stats = dataframe.copy()
improved = [
'', '', '', '', '', '', '',
stats[stats['improvement', 'absolute'] > 0].count()[0],
'', '', ''
]
average = [
stats[top, sub].mean()
if sub in {'OF', 'seconds', 'absolute', 'relative %'}
else ''
for top, sub in stats.columns
]
stats.loc['number improved'] = improved
stats.loc['average'] = average
return stats
| [
11748,
640,
270,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
40806,
540,
11,
337,
5912,
11,
4889,
540,
11,
32233,
11,
5345,
198,
198,
6738,
4981,
13,
82,
14375,
1330,
4294,
332,
198,
198,
11748,
19798,
292,
355,
279,
67,
628,
... | 2.19783 | 1,567 |
"""This module provides helper functionality with JWT."""
from datetime import datetime
import jwt
from app.utils.errors import TokenError
def generate_token(secret_key, private_claims=None, exp_days=None):
"""Return encoded json web token."""
token_exp = None
now = int(datetime.now().timestamp())
payload = {"iat": now}
if exp_days is not None:
token_exp = now + (exp_days * 60 * 60 * 24)
payload.update({"exp": token_exp})
if private_claims:
payload.update(private_claims)
token = jwt.encode(payload, secret_key).decode("UTF-8")
return token, token_exp
def decode_token(token, secret_key):
"""Return decoded payload from json web token."""
try:
return jwt.decode(token, secret_key)
except jwt.DecodeError:
raise TokenError("The token is invalid.")
except jwt.ExpiredSignatureError:
raise TokenError("The token has expired.")
| [
37811,
1212,
8265,
3769,
31904,
11244,
351,
449,
39386,
526,
15931,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
474,
46569,
198,
198,
6738,
598,
13,
26791,
13,
48277,
1330,
29130,
12331,
628,
198,
4299,
7716,
62,
3000... | 2.730994 | 342 |
# Python Standard Library Imports
import json
# Third Party (PyPI) Imports
import requests
import rollbar
# HTK Imports
from htk.lib.redfin.constants import *
from htk.utils.urls import build_url_with_query_params
| [
2,
11361,
8997,
10074,
1846,
3742,
198,
11748,
33918,
198,
198,
2,
10467,
3615,
357,
20519,
11901,
8,
1846,
3742,
198,
11748,
7007,
198,
11748,
4836,
5657,
198,
198,
2,
7154,
42,
1846,
3742,
198,
6738,
289,
30488,
13,
8019,
13,
445,
... | 3.191176 | 68 |
import numpy as np
import imutils
import cv2
import argparse
| [
11748,
299,
32152,
355,
45941,
198,
11748,
545,
26791,
198,
11748,
269,
85,
17,
198,
11748,
1822,
29572,
628
] | 3.263158 | 19 |
#!/usr/bin/env python
import sys
import h5py
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import rdMolDescriptors as rdmd
from tqdm import tqdm
from functools import wraps
from time import time
def timing(f):
"""
Decorator to measure execution time, adapted from
# https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d
# https://codereview.stackexchange.com/questions/169870/decorator-to-measure-execution-time-of-a-function
"""
@wraps(f)
return wrapper
@timing
def make_np_array(lst, dtype=np.float32):
"""
Convert a list to a numpy array
:param lst: input list
:param dtype: data type
:return: output array
"""
return np.array(lst, dtype=dtype)
@timing
def save_data(fp_array, smiles_list, name_list, outfile_name):
"""
Write the fingerprints to an hdf5 file
:param fp_array: numpy array with fingerprints
:param smiles_list: list of SMILES
:param name_list: list of molecule names
:param outfile_name: output file name
:return: None
"""
h5f = h5py.File(outfile_name, 'w')
dt = h5py.special_dtype(vlen=bytes)
h5f.create_dataset('fp_list', data=fp_array)
h5f.create_dataset('smiles_list', (len(smiles_list), 1), dt, smiles_list)
h5f.create_dataset('name_list', (len(name_list), 1), dt, name_list)
h5f.close()
@timing
def generate_fingerprints(infile_name):
"""
Generate fingerprints from an input file, currently generates a 256 bit morgan fingerprint
:param infile_name: input file name
:return: lists with fingerprints, SMILES, and molecule names
"""
ifs = open(infile_name)
fp_list = []
smiles_list = []
name_list = []
for line in tqdm(ifs):
toks = line.strip().split(" ", 1)
if len(toks) >= 2:
smiles, name = toks
mol = Chem.MolFromSmiles(smiles)
if mol:
fp = rdmd.GetMorganFingerprintAsBitVect(mol, 2, 256)
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
fp_list.append(arr)
smiles_list.append(smiles.encode("ascii", "ignore"))
name_list.append(name.encode("ascii", "ignore"))
return fp_list, smiles_list, name_list
@timing
def main(input_smiles_file, output_fp_file):
"""
Generate fingerprints and write to an hdf5 file
:return:
"""
fp_list, smiles_list, name_list = generate_fingerprints(input_smiles_file)
outfile_name = output_fp_file
fp_array = make_np_array(fp_list)
save_data(fp_array, smiles_list, name_list, outfile_name)
if __name__ == "__main__":
if len(sys.argv) != 3:
print(f"usage: {sys.argv[0]} infile.smi outfile.h5")
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
198,
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
374,
67,
15813,
1330,
12870,
11,
6060,
44909,
82,
198,
6738,
374,
67,
15813,
13,
41829... | 2.285944 | 1,245 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from math import ceil, log, pow
if sys.version < '3':
PY3 = False
else:
PY3 = True
def get_SHA1_bin(word):
"""
Return SHA1 hash of any string
:param word:
:return:
"""
from hashlib import sha1
if PY3 and isinstance(word, str):
word = word.encode('utf-8')
hash_s = sha1()
hash_s.update(word)
return bin(int(hash_s.hexdigest(), 16))[2:].zfill(160)
def get_index(binstr, end_index=160):
"""
Return the position of the first 1 bit
from the left in the word until end_index
:param binstr:
:param end_index:
:return:
"""
res = -1
try:
res = binstr.index('1') + 1
except ValueError:
res = end_index
return res
class HyperLogLog(object):
"""
Implements a HyperLogLog
"""
__ALPHA16 = 0.673
__ALPHA32 = 0.697
__ALPHA64 = 0.709
def __call__(self, buffer, item):
"""
Add the items to the HyperLogLog.
:param buffer:
:param item:
:return:
"""
items = [str(item), ]
if self._splitter is not None:
items = str(item).split(self._splitter)
for item in items:
binword = get_SHA1_bin(item)
pos = int(binword[:self._k], 2)
# retrieve the position of leftmost 1
aux = get_index(binword[self._k:], 160 - self._k)
# set its own register value to maximum value seen so far
buffer[pos] = max(aux, buffer[pos])
def _estimate(self, buffer):
"""
Return the estimate of the cardinality
:return: esitimate of the cardinality
"""
m = self._bucket_number
raw_e = self._alpha * pow(m, 2) / sum([pow(2, -x) for x in buffer])
if raw_e <= 5 / 2.0 * m:
v = buffer.count(0)
if v != 0:
return m * log(m / float(v), 2)
else:
return raw_e
elif raw_e <= 1 / 30.0 * 2 ** 160:
return raw_e
else:
return -2 ** 160 * log(1 - raw_e / 2.0 ** 160, 2)
def merge(self, buffer, other_hyper_log_log):
"""
Merge the HyperLogLog
:param other_hyper_log_log:
:return:
"""
for i in range(len(buffer)):
buffer[i] = max(buffer[i], other_hyper_log_log[i])
def _word_size_calculator(self, n_max):
"""
Estimate the size of the memory units, using the maximum cardinality as an argument
:param n_max: maximum cardinality
:return: size of the memory units
"""
return int(ceil(log(log(n_max, 2), 2)))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
7358,
12,
5539,
41992,
4912,
31703,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
36... | 2.263736 | 1,456 |
"""Plot templates for the FOOOF module."""
import numpy as np
from fooof.core.modutils import safe_import, check_dependency
plt = safe_import('.pyplot', 'matplotlib')
###################################################################################################
###################################################################################################
@check_dependency(plt, 'matplotlib')
def plot_spectrum(freqs, power_spectrum, plt_log=False, ax=None, **kwargs):
"""Plot a line plot of a power-spectrum.
Parameters
----------
freqs : 1d array
X-axis data, frequency values.
power_spectrum : 1d array
Y-axis data, power_spectrum power values.
plt_log : boolean, optional
Whether or not to plot the frequency axis in log space. default: False
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**kwargs
Keyword arguments to be passed to the plot call.
"""
# Create plot axes, if not provided
if not ax:
_, ax = plt.subplots(figsize=(12, 10))
# Set frequency vector, logged if requested
plt_freqs = np.log10(freqs) if plt_log else freqs
# Create the plot
ax.plot(plt_freqs, power_spectrum, **kwargs)
# Aesthetics and axis labels
ax.set_xlabel('Frequency', fontsize=20)
ax.set_ylabel('Power', fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.grid(True)
# If labels were provided, add a legend
if ax.get_legend_handles_labels()[0]:
ax.legend(prop={'size': 16})
@check_dependency(plt, 'matplotlib')
def plot_scatter_1(data, label, title=None, x_val=0, ax=None):
"""Plot a scatter plot with the given data.
Parameters
----------
data : 1d array
Data to plot.
label : str
Label for the data, to be set as the y-axis label.
title : str, optional
Title for the plot.
x_val : int, optional
Position along the x-axis to plot set of data. default: 0
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
Notes
-----
Data is jittered slightly, for visualization purposes (deviations on x-axis are meaningless).
"""
if not ax:
_, ax = plt.subplots()
# Create x-axis data, with small jitter for visualization purposes
x_data = np.ones_like(data) * x_val + np.random.normal(0, 0.025, data.shape)
ax.scatter(x_data, data, s=36, alpha=0.5)
if label:
ax.set_ylabel(label, fontsize=16)
if title:
ax.set_title(title, fontsize=20)
plt.xticks([x_val], [label])
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlim([-0.5, 0.5])
@check_dependency(plt, 'matplotlib')
def plot_scatter_2(data_0, label_0, data_1, label_1, title=None, ax=None):
"""Plot a scatter plot with two y-axes, with the given data.
Parameters
----------
data_0 : 1d array
Data to plot on the first axis.
label_0 : str
Label for the data on the first axis, to be set as the y-axis label.
data_1 : 1d array
Data to plot on the second axis.
label_0 : str
Label for the data on the second axis, to be set as the y-axis label.
title : str
Title for the plot.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
Notes
-----
Data is jittered slightly, for visualization purposes (deviations on x-axis are meaningless).
"""
if not ax:
_, ax = plt.subplots()
ax1 = ax.twinx()
plot_scatter_1(data_0, label_0, ax=ax)
plot_scatter_1(data_1, label_1, x_val=1, ax=ax1)
if title:
ax.set_title(title, fontsize=20)
ax.set_xlim([-0.5, 1.5])
plt.xticks([0, 1], [label_0, label_1])
ax.tick_params(axis='x', labelsize=16)
@check_dependency(plt, 'matplotlib')
def plot_hist(data, label, title=None, n_bins=25, x_lims=None, ax=None):
"""Plot a histogram with the given data.
Parameters
----------
data : 1d array
Data to plot.
label : str
Label for the data, to be set as the y-axis label.
title : str, optional
Title for the plot.
n_bins : int, optional
Number of bins to use for the histogram. Default: 20
x_lims : list of float
X-axis limits for the plot.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
"""
if not ax:
_, ax = plt.subplots()
ax.hist(data[~np.isnan(data)], n_bins, alpha=0.8)
ax.set_xlabel(label, fontsize=16)
ax.set_ylabel('Count', fontsize=16)
if x_lims:
ax.set_xlim(x_lims)
if title:
ax.set_title(title, fontsize=20)
ax.tick_params(axis='both', labelsize=12)
| [
37811,
43328,
24019,
329,
262,
376,
6684,
19238,
8265,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
22944,
1659,
13,
7295,
13,
4666,
26791,
1330,
3338,
62,
11748,
11,
2198,
62,
45841,
1387,
198,
198,
489,
83,
7... | 2.486656 | 1,911 |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from backtrader import Analyzer
from backtrader.mathsupport import average, standarddev
from backtrader.utils import AutoOrderedDict
class SQN(Analyzer):
'''SQN or SystemQualityNumber. Defined by Van K. Tharp to categorize trading
systems.
- 1.6 - 1.9 Below average
- 2.0 - 2.4 Average
- 2.5 - 2.9 Good
- 3.0 - 5.0 Excellent
- 5.1 - 6.9 Superb
- 7.0 - Holy Grail?
The formula:
- SquareRoot(NumberTrades) * Average(TradesProfit) / StdDev(TradesProfit)
The sqn value should be deemed reliable when the number of trades >= 30
Methods:
- get_analysis
Returns a dictionary with keys "sqn" and "trades" (number of
considered trades)
'''
alias = ('SystemQualityNumber',)
def create_analysis(self):
'''Replace default implementation to instantiate an AutoOrdereDict
rather than an OrderedDict'''
self.rets = AutoOrderedDict()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
26,
12972,
12,
521,
298,
12,
28968,
25,
19,
532,
9,
12,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
198,
2,
15069,
357,
34,
8,
185... | 3.097978 | 643 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2019-01-13 06:18
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
18,
319,
13130,
12,
486,
12,
1485,
9130,
25,
1507,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.719298 | 57 |
from gbdxtools import Interface
from gbdxtools.s3 import S3
from auth_mock import get_mock_gbdx_session
import vcr
import os
import tempfile
import unittest
"""
How to use the mock_gbdx_session and vcr to create unit tests:
1. Add a new test that is dependent upon actually hitting GBDX APIs.
2. Decorate the test with @vcr appropriately, supply a yaml file path to gbdxtools/tests/unit/cassettes
note: a yaml file will be created after the test is run
3. Replace "dummytoken" with a real gbdx token after running test successfully
4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
5. Replace the real gbdx token with "dummytoken" again
6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
"""
cassette_name = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cassettes', 'test_s3_download.yaml')
| [
6738,
308,
17457,
742,
10141,
1330,
26491,
198,
6738,
308,
17457,
742,
10141,
13,
82,
18,
1330,
311,
18,
198,
6738,
6284,
62,
76,
735,
1330,
651,
62,
76,
735,
62,
70,
17457,
87,
62,
29891,
198,
11748,
410,
6098,
198,
11748,
28686,
... | 3.238434 | 281 |
#!/home/sam/shared-space/linux-system/anaconda3/bin/python
from math import factorial
import sympy as sp
from bistochastic import generate_matrix
import fpiter
from polynomials import product_polynomial
from functionals import elementary_symmetric_differential_operator
import numpy.random
SEED = 1
numpy.random.seed(SEED)
if __name__ == '__main__':
main()
| [
2,
48443,
11195,
14,
37687,
14,
28710,
12,
13200,
14,
23289,
12,
10057,
14,
272,
330,
13533,
18,
14,
8800,
14,
29412,
198,
198,
6738,
10688,
1330,
1109,
5132,
198,
11748,
10558,
88,
355,
599,
198,
6738,
275,
396,
5374,
3477,
1330,
7... | 3.067797 | 118 |
from pypy.rpython.rmodel import inputconst
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem.rclass import InstanceRepr, mangle, OBJECT
from pypy.rpython.rvirtualizable2 import AbstractVirtualizable2InstanceRepr
VABLERTI = OBJECT
| [
6738,
279,
4464,
88,
13,
81,
29412,
13,
81,
19849,
1330,
5128,
9979,
198,
6738,
279,
4464,
88,
13,
81,
29412,
13,
1025,
9497,
6781,
1330,
267,
8690,
198,
6738,
279,
4464,
88,
13,
81,
29412,
13,
1025,
9497,
6781,
13,
81,
4871,
1330... | 3 | 86 |
import random | [
11748,
4738
] | 6.5 | 2 |
import pandas as pd
from laptimize.branch_and_bound_solver import BranchAndBoundSolver
from laptimize.lap_model import LAPModel
from laptimize.log import LogFactory
class Solver:
"""This class does solving non linear optimization problems using piecewise linear approximated programming
branch and bond technique"""
def solve(self):
"""
solve the piecewise linear lp problems and create sub lp problems using branch and bond technique
Parameter
--------
constraints : dict
have all the problem related information
ex: {'objective':{'x1':lambda x: 12*x,'x2':lambda x: 7*x - x**2,'value':None},
'constraints_1': {'x1':lambda x:-2*(x**4), 'x2':lambda x: -x ,'value':-2},
'constraints_2': {'x1':lambda x: 2*x, 'x2':lambda x: x ,'value' :3},
'capacity': {'x1':[0,2], 'x2':[0,3],'value':None}}
Returns
-------
lp_intervals : dict
Approximated solution for decision variables
ex: {'x1':2.0,'X2':1.746}
"""
try:
self.constraints = self.constraints.fillna(0)
solution_df = pd.DataFrame()
piecewise_lp, segment, curve = self.lap_model.model_solver(self.constraints, self.partition_len)
segment_0 = segment.copy()
# create a combined dictionary with including segment and curve dictionaries.
combine_segment_curve = pd.concat([segment, curve], axis=1)
lb0, ub0, k, k_lower, k_upper, segment_key = BranchAndBoundSolver(self.error).create_subproblems(
piecewise_lp, combine_segment_curve, self.constraints)
solution_df = solution_df.append({'iteration_no': 0, 'sub_problem_no': 0, 'piecewise_lp': piecewise_lp,
'segment': segment, 'curve': curve,
'lb': lb0, 'ub': ub0, 'k': k, 'k_lower': k_lower, 'k_upper': k_upper,
'branching_node': segment_key}, ignore_index=True)
global_df = pd.DataFrame()
while (len(solution_df)) > 0 and (len(solution_df) <= 100):
solution_df, global_df = self.sub_problem_solve(solution_df, combine_segment_curve, global_df)
global_solution = global_df
global_solution['solution'] = pd.Series((dict() for i in range(len(global_solution))),
index=global_solution.index)
global_solution = global_df.sort_values(['lb']).reset_index(drop=True)
self.constraints = self.constraints.drop(['capacity'], axis=1)
self.constraints = self.constraints.drop(['value'], axis=0)
for index, row in global_solution.iterrows():
lap_intervals = self.final_solution(row.piecewise_lp, segment_0)
global_solution.at[index, 'solution'] = lap_intervals
lap_intervals = global_solution.solution[0]
lap_intervals['obj_value'] = global_solution.lb[0]
return lap_intervals
except Exception as err:
self.logger.info('solve method ended with error ')
self.logger.error(str(err))
raise
def sub_problem_solve(self, solution_df, combine_segment_curve, global_df):
"""
create and solve all the sub problems for each branching nodes
Parameter
--------
solution_df: pandas data frame
includes all the details related to the sub problems solutions
constraints: pandas data frame
problem definition
combine_segment_curve: pandas data frame
all piece wise linear segments and respective function values
global_df: pandas data frame
includes all the local and global solutions
Returns
-------
solution_df: pandas data frame
updated solution_df
global_df: pandas data frame
updated global_df
"""
iteration_no = 1
for index, node in solution_df.iterrows():
if (node.ub - node.lb) > self.error:
branches = [node.k, node.k_lower, node.k_upper]
sub_problem_no = 1
for branch in branches:
if len(branch) >= 2:
piecewise_lp1, segment1, curve1 = self.lap_model.initialize(
segment=node.segment,
curve=node.curve).global_solver(node.branching_node, branch, self.constraints)
lb1, ub1, k1, k_lower1, k_upper1, segment_key1 = BranchAndBoundSolver(
self.error).create_subproblems(
piecewise_lp1, combine_segment_curve, self.constraints)
if (node.lb < ub1 <= node.ub) | (node.lb <= lb1 < node.ub):
ub1 = min(node.ub, ub1)
lb1 = max(node.lb, lb1)
solution_df = solution_df.append(
{'iteration_no': iteration_no, 'sub_problem_no': sub_problem_no,
'piecewise_lp': piecewise_lp1,
'segment': segment1, 'curve': curve1,
'lb': lb1, 'ub': ub1, 'k': k1, 'k_lower': k_lower1, 'k_upper': k_upper1,
'branching_node': segment_key1}, ignore_index=True)
sub_problem_no += 1
iteration_no += 1
else:
global_df = global_df.append(node, ignore_index=True)
solution_df.drop([index], inplace=True)
solution_df = solution_df.reset_index(drop=True)
return solution_df, global_df
def final_solution(self, piecewise_lp, segment):
"""
calculate the final solutions for the decision variables using piecewise linear variables
Parameters
----------
piecewise_lp: dict
final lp solution
constraints: pandas data frame
problem definition
segment: pandas data frame
piecewise linear segments
Return
------
lap_value: dict
linear approximated solution for decision variables
"""
try:
lap_value = dict()
for _, lp_constraint in self.constraints.iterrows():
var_value = 0
lp_allocation = piecewise_lp[lp_constraint.name]
for key in lp_allocation:
try:
var_value = var_value + segment.loc[key].segment * lp_allocation[key].value()
except:
var_value = var_value + segment.loc[key].segment * lp_allocation[key]
lap_value[lp_constraint.name] = var_value
return lap_value
except Exception as err:
self.logger.info('final_solution method ended with error ')
self.logger.error(str(err))
raise
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
10882,
48439,
13,
1671,
3702,
62,
392,
62,
7784,
62,
82,
14375,
1330,
20551,
1870,
49646,
50,
14375,
198,
6738,
10882,
48439,
13,
37796,
62,
19849,
1330,
406,
2969,
17633,
198,
6738,
10... | 2.003372 | 3,559 |
from unittest import TestCase
import pandas as pd
from moonstone.parsers.transform.base import TransformBase
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
8824,
6440,
13,
79,
945,
364,
13,
35636,
13,
8692,
1330,
26981,
14881,
628
] | 3.5 | 32 |
from typing import Any, Dict, Iterable, List, Optional
from fastapi import Depends
from rubrix.server.commons.es_wrapper import ElasticsearchWrapper, create_es_wrapper
from rubrix.server.commons.helpers import unflatten_dict
from rubrix.server.commons.settings import settings
from rubrix.server.datasets.dao import (
DATASETS_RECORDS_INDEX_NAME,
dataset_records_index,
)
from rubrix.server.datasets.model import DatasetDB
from rubrix.server.tasks.commons.dao.model import RecordSearch, RecordSearchResults
from rubrix.server.tasks.commons.es_helpers import (
EsRecordDataFieldNames,
aggregations,
parse_aggregations,
)
from stopwordsiso import stopwords
SUPPORTED_LANGUAGES = ["es", "en", "fr", "de"]
DATASETS_RECORDS_INDEX_TEMPLATE = {
"settings": {
"number_of_shards": settings.es_records_index_shards,
"number_of_replicas": settings.es_records_index_replicas,
"analysis": {
"analyzer": {
"multilingual_stop_analyzer": {
"type": "stop",
"stopwords": [w for w in stopwords(SUPPORTED_LANGUAGES)],
}
}
},
},
"index_patterns": [DATASETS_RECORDS_INDEX_NAME.format("*")],
"mappings": {
"properties": {
"event_timestamp": {"type": "date"},
EsRecordDataFieldNames.words: {
"type": "text",
"fielddata": True,
"analyzer": "multilingual_stop_analyzer",
},
# TODO: Not here since is task dependant
"tokens": {"type": "text"},
},
"dynamic_templates": [
# TODO: Not here since is task dependant
{"inputs": {"path_match": "inputs.*", "mapping": {"type": "text"}}},
{
"status": {
"path_match": "*.status",
"mapping": {
"type": "keyword",
},
}
},
{
"predicted": {
"path_match": "*.predicted",
"mapping": {
"type": "keyword",
},
}
},
{
"strings": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 128, # Avoid bulk errors with too long keywords
# Some elasticsearch versions includes automatically raw fields, so
# we must limit those fields too
"fields": {"raw": {"type": "keyword", "ignore_above": 128}},
},
}
},
],
},
}
class DatasetRecordsDAO:
"""Datasets records DAO"""
def init(self):
"""Initializes dataset records dao. Used on app startup"""
self._es.create_index_template(
name=DATASETS_RECORDS_INDEX_NAME,
template=DATASETS_RECORDS_INDEX_TEMPLATE,
force_recreate=True,
)
def add_records(
self,
dataset: DatasetDB,
records: List[Dict[str, Any]],
) -> int:
"""
Add records to dataset
Parameters
----------
dataset:
The dataset
records:
The list of records
Returns
-------
The number of failed records
"""
return self._es.add_documents(
index=dataset_records_index(dataset.id),
documents=records,
doc_id=lambda r: r.get("id"),
)
def search_records(
self,
dataset: DatasetDB,
search: Optional[RecordSearch] = None,
size: int = 100,
record_from: int = 0,
) -> RecordSearchResults:
"""
SearchRequest records under a dataset given a search parameters.
Parameters
----------
dataset:
The dataset
search:
The search params
size:
Number of records to retrieve (for pagination)
record_from:
Record from witch retrieve records (for pagination)
Returns
-------
The search result
"""
search = search or RecordSearch()
records_index = dataset_records_index(dataset.id)
metadata_fields = self._es.get_field_mapping(
index=records_index, field_name="metadata.*"
)
search_aggregations = (
{
**(search.aggregations or {}),
**aggregations.predicted_as(),
**aggregations.predicted_by(),
**aggregations.annotated_as(),
**aggregations.annotated_by(),
**aggregations.status(),
**aggregations.predicted(),
**aggregations.words_cloud(),
**aggregations.score(), # TODO: calculate score directly from dataset
**aggregations.custom_fields(metadata_fields),
}
if record_from == 0
else None
)
if record_from > 0:
search_aggregations = None
es_query = {
"size": size,
"from": record_from,
"query": search.query or {"match_all": {}},
"sort": [{"_id": {"order": "asc"}}], # TODO: Sort by event timestamp?
"aggs": search_aggregations or {},
}
results = self._es.search(index=records_index, query=es_query, size=size)
hits = results["hits"]
total = hits["total"]
docs = hits["hits"]
search_aggregations = results.get("aggregations", {})
result = RecordSearchResults(
total=total,
records=[doc["_source"] for doc in docs],
)
if search_aggregations:
parsed_aggregations = parse_aggregations(search_aggregations)
parsed_aggregations = unflatten_dict(
parsed_aggregations, stop_keys=["metadata"]
)
result.words = parsed_aggregations.pop("words")
result.metadata = parsed_aggregations.pop("metadata", {})
result.aggregations = parsed_aggregations
return result
def scan_dataset(
self,
dataset: DatasetDB,
search: Optional[RecordSearch] = None,
) -> Iterable[Dict[str, Any]]:
"""
Iterates over a dataset records
Parameters
----------
dataset:
The dataset
search:
The search parameters. Optional
Returns
-------
An iterable over found dataset records
"""
search = search or RecordSearch()
es_query = {
"query": search.query,
}
docs = self._es.list_documents(
dataset_records_index(dataset.id), query=es_query
)
for doc in docs:
yield doc["_source"]
_instance: Optional[DatasetRecordsDAO] = None
def dataset_records_dao(
es: ElasticsearchWrapper = Depends(create_es_wrapper),
) -> DatasetRecordsDAO:
"""
Creates a dataset records dao instance
Parameters
----------
es:
The elasticserach wrapper dependency
"""
global _instance
if not _instance:
_instance = DatasetRecordsDAO(es)
return _instance
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
40806,
540,
11,
7343,
11,
32233,
198,
198,
6738,
3049,
15042,
1330,
2129,
2412,
198,
6738,
6437,
8609,
13,
15388,
13,
9503,
684,
13,
274,
62,
48553,
1330,
48567,
12947,
36918,
2848,
11,
2251,... | 1.993303 | 3,733 |
import time
import unittest
import os
import numpy as np
import torch.nn.functional as F
from torch import nn
import torch
from fastNLP import DataSet
from fastNLP import Instance
from fastNLP import BCELoss
from fastNLP import CrossEntropyLoss
from fastNLP import AccuracyMetric
from fastNLP import SGD
from fastNLP import Trainer
from fastNLP.models.base_model import NaiveClassifier
from fastNLP import TorchLoaderIter
| [
11748,
640,
198,
11748,
555,
715,
395,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
1330,
299,
77,
198,
11748,
28034,
198,
198,
6738,
3049,
45,
19930,
... | 3.471545 | 123 |
# Copyright (c) 2016 Shreyas Kulkarni (shyran@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import inspect
def virtual(func):
"""
annotation to set a method for override,
any method, that doesnt have this annotation, cannot be overridden with @overrides(cls) annotation
"""
# hack to throw exception if the virtual function is not inside a class
# ref: http://stackoverflow.com/questions/8793233/python-can-a-decorator-determine-if-a-function-is-being-defined-inside-a-class
frames = inspect.stack()
if not (len(frames) > 2 and frames[2][4][0].strip().startswith('class ')):
raise OverrideError("function '%s' should be inside a class to be virtual" % func.__name__);
func.func_doc = "@virtual available for override\n" + (func.func_doc or '')
func.__virtual__ = True
return func
##### TEST CODE #######
# class myclass(object):
# def __init__(self):
# pass
#
# @virtual
# def add(self):
# pass
#
# @virtual
# def delete(self):
# pass
#
# def edit(self):
# pass
#
#
# class anotherclass(myclass):
# @overrides(myclass)
# def delete(self):
# pass
#
# @overrides(myclass)
# def add(self):
# pass
#
#
# @virtual
# def myfunc():
# """
# this is the
# docstring
# of my func
# """
# print("inside myfunc")
#
#
# #@overrides(myclass)
# def add():
# """i am anotherfunc"""
# print("inside anotherfunc")
#
#
# if __name__ == "__main__":
# print(myfunc.func_doc)
# print(add.func_doc)
# print(anotherclass.delete.func_doc)
| [
2,
15069,
357,
66,
8,
1584,
911,
4364,
292,
509,
12171,
1501,
72,
357,
1477,
2417,
272,
31,
14816,
13,
785,
8,
198,
2,
220,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,... | 2.80597 | 938 |
from rest_framework import serializers
from .models import School, Student, Activity, Assignment
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
764,
27530,
1330,
3961,
11,
13613,
11,
24641,
11,
50144,
628,
628,
198
] | 4.590909 | 22 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import time
import unittestX as unittest
if __name__ == "__main__":
main()
# version
__id__ = "$Id$"
# End of file
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
220,
27156,
27156,
27156,
27156,
15116,
8728,
4907,
198,
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.451613 | 217 |
# Generated by Django 3.1.5 on 2021-01-10 09:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
486,
12,
940,
7769,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
1420... | 3.019231 | 52 |
# Copyright (c) 2004-2006 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
import unittest
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from Products.AlphaFlow.tests.AlphaFlowTestCase import AlphaFlowTestCase
from Products.AlphaFlow.activities.interfaces import \
IAlarmWorkItem, IAlarmActivity
from Products.AlphaFlow.activities.alarm import AlarmWorkItem, AlarmActivity
from Products.AlphaFlow.interfaces import ILifeCycleController
| [
2,
15069,
357,
66,
8,
5472,
12,
13330,
467,
984,
308,
2022,
71,
1222,
763,
13,
14211,
198,
2,
4091,
635,
38559,
24290,
13,
14116,
198,
2,
720,
7390,
3,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
7536,
7575,
1330,
7536,
7575,
... | 3.324138 | 145 |
from evawiz_basic import *
| [
6738,
819,
707,
528,
62,
35487,
1330,
1635,
628
] | 3.111111 | 9 |
import queue
from collections import Counter, OrderedDict
import random
import matplotlib.pyplot as plt
class FIFO(Policy):
"Notice that things close to 0 are closer to being evicted"
if __name__ == "__main__":
# raise Exception
hit_rate = []
for file in ["ftrace_combined.csv"]:
for p in [TwoQ, RR, FIFO, LRU, LFU]:
hit_rate = []
for i in range(6, 400):
try:
policy = p(i)
simulator = Simulator(file)
simulator.simulate(policy)
hit_rate.append(float(simulator.hits)/simulator.total)
except Exception as e:
hit_rate.append(0)
print(hit_rate)
plt.scatter(range(6, 400), hit_rate, label=policy.name, s=3)
plt.title("Buffer Size versus Hit Rate")
plt.xlabel("Buffer Size (# Video Frames)")
plt.ylabel("Hit Rate (proportion)")
plt.legend()
plt.savefig(f'{file[:-4]}-results')
plt.clf()
break | [
11748,
16834,
198,
6738,
17268,
1330,
15034,
11,
14230,
1068,
35,
713,
198,
11748,
4738,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
198,
4871,
376,
5064,
46,
7,
36... | 1.91944 | 571 |
{
"name": "Clear User Access rights",
"summary": """Useful tool to reset user rights""",
"version": "12.0.1.1.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "Other OSI approved licence", # MIT
"support": "apps@itpp.dev",
"website": "https://it-projects.info",
"category": "Access",
"images": ["images/clear_user_access_rights.jpg"],
"depends": ["base"],
"external_dependencies": {"python": [], "bin": []},
"data": ["views.xml"],
"demo": [],
"installable": True,
"auto_install": False,
}
| [
90,
198,
220,
220,
220,
366,
3672,
1298,
366,
19856,
11787,
8798,
2489,
1600,
198,
220,
220,
220,
366,
49736,
1298,
37227,
11041,
913,
2891,
284,
13259,
2836,
2489,
15931,
1600,
198,
220,
220,
220,
366,
9641,
1298,
366,
1065,
13,
15,
... | 2.497778 | 225 |
import serial, argparse
from gui.controller import Controller
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Arduino connecton configuration
parser.add_argument('port', type=str, default='', help='Where to find the Arduino.')
parser.add_argument('--baudrate', type=int, default=9600, help='Baudrate for the serial connection.')
# Maze configuration
parser.add_argument('--rows', type=int, default=2, help='Number of rows in the maze.')
parser.add_argument('--cols', type=int, default=3, help='Number of cols in the maze.')
args = parser.parse_args()
try:
# Setup the serial connection to the Arduino
with serial.Serial(args.port, args.baudrate) as ser:
# Setup the GUI controller
controller = Controller(args.rows, args.cols)
while True:
# Note: readline blocks.. If you do not terminate your message
# with a newline, this will block forever...
msg = ser.readline()
print 'Received message: %s' % msg.strip()
controller.handle_msg(msg)
except serial.serialutil.SerialException as e:
print 'Could not connect to the Arduino.'
print e | [
11748,
11389,
11,
1822,
29572,
198,
6738,
11774,
13,
36500,
1330,
22741,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
220,
1303,
27634,
2018,
2... | 3.023747 | 379 |
import random
ANSWERS = [
"of course not you idiot",
"sure, why not",
"do i look like an oracle to you?",
"yes, obviously",
"no",
"yes",
"literally kys",
"absolutely haram",
"idk, probably",
"is grass green? is the sky blue? is taiwan numbah wan?"
]
| [
11748,
4738,
198,
198,
15037,
54,
4877,
796,
685,
198,
220,
220,
220,
366,
1659,
1781,
407,
345,
22324,
1600,
198,
220,
220,
220,
366,
19532,
11,
1521,
407,
1600,
198,
220,
220,
220,
366,
4598,
1312,
804,
588,
281,
393,
6008,
284,
... | 2.385246 | 122 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import datetime
import mock
import re
import tempfile
import unittest
from tempestmail import Config
from tempestmail import Mail
from tempestmail import TempestMailCmd
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.815686 | 255 |
# coding=utf-8
import RPi.GPIO as GPIO #引入RPi.GPIO模块,并实例化为GPIO,简化后面的模块调用
import time #引入time模块
import sys
GPIO.setmode(GPIO.BOARD) #定义GPIO编码方式
print(len(sys.argv))
if (len(sys.argv) == 3):
param1 = 1 if sys.argv[1].lower() == 'false' else 0
param2 = 1 if sys.argv[2].lower() == 'false' else 0
GPIO.setup(31, GPIO.OUT) #将GPIO设置为输出模式
GPIO.setup(33, GPIO.OUT) #将GPIO设置为输出模式
GPIO.output(31, param1)
GPIO.output(33, param2)
else:
print('参数不足') | [
2,
19617,
28,
40477,
12,
23,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
220,
220,
1303,
28156,
243,
17739,
98,
20031,
72,
13,
16960,
9399,
162,
101,
94,
161,
251,
245,
171,
120,
234,
33176,
114,
22522,
252,
160,
122,
233,
... | 1.459877 | 324 |
import argparse
import cv2
import numpy as np
import os
import random
import shutil
import openface
import openface.helper
from openface.data import iterImgs
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '.', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
| [
11748,
1822,
29572,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
4423,
346,
198,
11748,
1280,
2550,
198,
11748,
1280,
2550,
13,
2978,
525,
198,
6738,
1280,
2550,
13,
7890,... | 2.858268 | 127 |
"""
utility functions for working with DataFrames
"""
import pandas as pd
from sklearn.model_selection import train_test_split
class Data:
"""
For use with dataframes and the many things you want to do to them
"""
def check_null(self):
"""
Prints the columns with and ammounts of null values
"""
columns = self.df.columns
null_list = []
for column in columns:
if self.df[column].isnull().sum() > 0:
null_list.append({column: self.df[column].isnull().sum()})
for i in range(0, len(null_list)):
print(null_list[i], '\n')
def split(self):
"""
Makes a train, val, and test split from one dataframe
"""
train, test = train_test_split(self.df, random_state=42, test_size=0.2)
train, val = train_test_split(train, random_state=42, test_size=0.2)
return train, val, test
def add_to_df(to_series, name, df):
"""
Takes a list and adds it to a dataframe as a new columns
"""
new_col = pd.Series(to_series)
df[name] = new_col
| [
37811,
198,
315,
879,
5499,
329,
1762,
351,
6060,
35439,
198,
37811,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
628,
198,
4871,
6060,
25,
198,
220,
220,
... | 2.326316 | 475 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# Adam Zsarnóczay
"""
This module has classes and methods that auto-populate DL models.
.. rubric:: Contents
.. autosummary::
auto_populate
"""
from .base import *
import importlib
import json
from pathlib import Path
ap_DesignLevel = {
1940: 'Pre-Code',
1940: 'Low-Code',
1975: 'Moderate-Code',
2100: 'High-Code'
}
ap_DesignLevel_W1 = {
0: 'Pre-Code',
0: 'Low-Code',
1975: 'Moderate-Code',
2100: 'High-Code'
}
ap_Occupancy = {
'Other/Unknown': 'RES3',
'Residential - Single-Family': 'RES1',
'Residential - Town-Home': 'RES3',
'Residential - Multi-Family': 'RES3',
'Residential - Mixed Use': 'RES3',
'Office': 'COM4',
'Hotel': 'RES4',
'School': 'EDU1',
'Industrial - Light': 'IND2',
'Industrial - Warehouse': 'IND2',
'Industrial - Heavy': 'IND1',
'Retail': 'COM1',
'Parking' : 'COM10'
}
convert_design_level = {
'High-Code' : 'HC',
'Moderate-Code': 'MC',
'Low-Code' : 'LC',
'Pre-Code' : 'PC'
}
def auto_populate(DL_input_path, EDP_input_path,
DL_method, realization_count, coupled_EDP, event_time,
ground_failure, auto_script_path = None):
"""
Short description
Assumptions:
- The BIM is stored under 'GeneralInformation' or 'GI' in the root of
DL_input
Parameters
----------
DL_input_path:
Returns
-------
DL_input
DL_ap_path
"""
# load the available DL input information
with open(DL_input_path, 'r') as f:
DL_input = json.load(f)
# get the BIM data
BIM = DL_input.get('GeneralInformation', None)
if BIM is None:
raise ValueError(
"No Building Information provided for the auto-population routine."
)
if auto_script_path is not None: # if an external auto pop script is provided
# load the module
ASP = Path(auto_script_path).resolve()
sys.path.insert(0, str(ASP.parent)+'/')
auto_script = importlib.__import__(ASP.name[:-3], globals(), locals(), [], 0)
auto_populate_ext = auto_script.auto_populate
# generate the DL input data
BIM_ap, DL_ap = auto_populate_ext(BIM = BIM)
# add the response model information
DL_ap.update({
'ResponseModel': {
'ResponseDescription': {
'EDP_Distribution': 'empirical',
'Realizations' : realization_count
}
}
})
# add the even time information - if needed
if (('Inhabitants' in DL_ap['LossModel'].keys()) and
(event_time is not None)):
DL_ap['LossModel']['Inhabitants'].update({'EventTime': event_time})
# assemble the extended DL input
DL_input['GeneralInformation'].update(BIM_ap)
DL_input.update({'DamageAndLoss': DL_ap})
# save it to the DL file with the ap suffix
DL_ap_path = DL_input_path[:-5] + '_ap.json'
with open(DL_ap_path, 'w') as f:
json.dump(DL_input, f, indent=2)
# and also return these information
return DL_input, DL_ap_path
else: # otherwise, use the old autopop method
EDP_input = pd.read_csv(EDP_input_path, sep='\s+', header=0,
index_col=0)
is_IM_based = DL_method[-2:] == 'IM'
stories = BIM['NumberOfStories']
# use only 1 story if DM is based on IM
if DL_method == 'HAZUS MH EQ IM':
stories = 1
BIM.update({'NumberOfStories':stories})
# HAZUS Earthquake
if DL_method in ['HAZUS MH EQ', 'HAZUS MH EQ IM']:
bt = BIM['StructureType']
if bt == 'RV.structType':
bt = EDP_input['structType'].values[0]
year_built = BIM['YearBuilt']
if bt not in ['W1', 'W2', 'S3', 'PC1', 'MH']:
if bt not in ['URM']:
if stories <= 3:
bt += 'L'
elif stories <= 7:
bt += 'M'
else:
if bt in ['RM']:
bt += 'M'
else:
bt += 'H'
else:
if stories <= 2:
bt += 'L'
else:
bt += 'M'
if BIM['OccupancyClass'] in ap_Occupancy.keys():
ot = ap_Occupancy[BIM['OccupancyClass']]
else:
ot = BIM['OccupancyClass']
replacementCost = BIM.get('ReplacementCost', 1.0)
replacementTime = BIM.get('ReplacementTime', 1.0)
population = BIM.get('Population', 1.0)
loss_dict = {
'_method': DL_method,
'DamageModel': {
'StructureType': bt
},
'LossModel': {
'DecisionVariables': {
'ReconstructionCost': True,
'ReconstructionTime': True,
'Injuries': True
},
'Inhabitants': {
'OccupancyType': ot,
'PeakPopulation': f'{population}'
},
'ReplacementCost': replacementCost,
'ReplacementTime': replacementTime
},
'ResponseModel': {
'ResponseDescription': {
'Realizations': realization_count,
"CoupledAssessment": coupled_EDP
}
},
"Dependencies": {
"Fragilities": "btw. Performance Groups"
}
}
# add uncertainty if the EDPs are not coupled
if not coupled_EDP:
loss_dict['ResponseModel'].update({
"AdditionalUncertainty": {
"GroundMotion": "0.10",
"Modeling" : "0.20"
}})
if is_IM_based:
loss_dict.update({
"ComponentDataFolder": pelicun_path+"/resources/HAZUS_MH_2.1_EQ_eqv_PGA.hdf"
})
else:
loss_dict['ResponseModel'].update({
'DetectionLimits': {
"PID": "0.20",
"PRD": "0.20"
}})
loss_dict.update({
"ComponentDataFolder": pelicun_path+"/resources/HAZUS_MH_2.1_EQ_story.hdf"
})
if 'W1' in bt:
DesignL = ap_DesignLevel_W1
else:
DesignL = ap_DesignLevel
for year in sorted(DesignL.keys()):
if year_built <= year:
loss_dict['DamageModel'].update(
{'DesignLevel': DesignL[year]})
break
dl = convert_design_level[loss_dict['DamageModel']['DesignLevel']]
if 'C3' in bt:
if dl not in ['LC', 'PC']:
dl = 'LC'
# only one structural component for IM-based approach
if is_IM_based:
FG_S = f'S-{bt}-{dl}-{ot}'
loss_dict.update({
'Components': {
FG_S: [
{'location': '1',
'direction': '1',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}]
}})
# story-based approach
else:
FG_S = f'S-{bt}-{dl}-{ot}'
FG_NSD = f'NSD-{ot}'
FG_NSA = f'NSA-{dl}-{ot}'
loss_dict.update({
'Components': {
FG_S: [
{'location': 'all',
'direction': '1, 2',
#'median_quantity': '{q}'.format(q = 0.5), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'S')/stories/2.),
'unit': 'ea',
'distribution': 'N/A'
}],
FG_NSA: [
{'location': 'all',
'direction': '1',
#'median_quantity': '{q}'.format(q = 1.0), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'NSA')/stories),
'unit': 'ea',
'distribution': 'N/A'
}],
FG_NSD: [
{'location': 'all',
'direction': '1, 2',
#'median_quantity': '{q}'.format(q = 0.5), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'NSD')/stories/2.),
'unit': 'ea',
'distribution': 'N/A'
}]
}})
# if damage from ground failure is included
if ground_failure:
foundation_type = 'S'
FG_GF_H = f'GF-H_{foundation_type}-{bt}'
FG_GF_V = f'GF-V_{foundation_type}-{bt}'
loss_dict['Components'].update({
FG_GF_H: [
{'location': '1',
'direction': '1',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}],
FG_GF_V: [
{'location': '1',
'direction': '3',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}]
})
# define logic that connects ground failure with building damage
loss_dict.update({
'DamageLogic': [
{'type': 'propagate',
'source_FG': FG_GF_H,
'target_FG': FG_S,
'DS_links': {
'1_1': '3_1',
'2_1': '4_1',
'2_2': '4_2'
}
},
{'type': 'propagate',
'source_FG': FG_GF_V,
'target_FG': FG_S,
'DS_links': {
'1_1': '3_1',
'2_1': '4_1',
'2_2': '4_2'
}
}
]
})
# HAZUS Hurricane
elif DL_method == 'HAZUS MH HU':
#TODO: use the HU NJ autopop script by default
pass
elif DL_method == 'FEMA P58':
if BIM.get('AssetType',None) == 'Water_Pipe':
material = BIM['Material']
if material in ['Asbestos cement', 'Cast iron']:
# brittle material
config = 'P0001a'
else:
# ductile material
config = 'P0001b'
segment_count = BIM['SegmentCount']
segment_length = BIM['Segments'][0]['length']
cg_count = int(segment_length / (100 * ft))
quantities = '1'
for s in range(1, cg_count):
quantities += ', 1'
loss_dict = {
"_method" : "FEMA P58",
"ResponseModel" : {
"ResponseDescription": {
"EDP_Distribution" : "empirical",
"Realizations" : "1000", # need to fix this later
"CoupledAssessment": True
}
},
"DamageModel" : {
"CollapseProbability": {
"Value": "0.0",
},
},
"LossModel" : {
"ReplacementCost" : "1",
"ReplacementTime" : "180",
"DecisionVariables": {
"Injuries" : False,
"ReconstructionCost": True,
"ReconstructionTime": True,
"RedTag" : False
},
},
"Dependencies" : {
"CostAndTime" : True,
"Fragilities" : "btw. Damage States",
"Quantities" : "Independent",
"ReconstructionCosts": "Independent",
"ReconstructionTimes": "Independent",
},
"ComponentDataFolder": "c:/Adam/Dropbox/Kutatas/2019 SC Testbeds/Memphis/",
"Components" : {
config: [
{
"location" : "all",
"direction" : "1",
"median_quantity": quantities,
"unit" : "ea",
"distribution" : "N/A",
}
],
}
}
if (('Inhabitants' in loss_dict['LossModel'].keys()) and
(event_time is not None)):
loss_dict['LossModel']['Inhabitants'].update({'EventTime': event_time})
DL_input.update({'DamageAndLoss':loss_dict})
DL_ap_path = DL_input_path[:-5]+'_ap.json'
with open(DL_ap_path, 'w') as f:
json.dump(DL_input, f, indent = 2)
return DL_input, DL_ap_path
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
406,
8822,
13863,
20000,
2059,
198,
2,
15069,
357,
66,
8,
2864,
383,
3310,
658,
286,
262,
2059,
286,
3442,
198,
2,
198,
2,
770,... | 1.711328 | 9,516 |
# -*- coding:utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine('mysql+pymysql://macd:macd@localhost/test')
User.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
for i in range(10):
user = User("user%d" % i)
session.add(user)
session.commit()
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
201,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
201,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,... | 2.664865 | 185 |
#!/usr/bin/python
import json
import os.path
import re
import subprocess
zs_api_config_file = '/.zsapi.ini'
zs_api_target = 'localadmin'
if os.path.isfile("/usr/local/zend/bin/zs-client.sh"):
extensions_details = subprocess.check_output(["/usr/local/zend/bin/zs-client.sh", "configurationExtensionsList", "--target=localadmin", "--output-format=json"])
## Strip the PHP notices from the json
extensions_details = re.sub("Notice:.*\n", "", extensions_details)
## Strip the newlines from the json
extensions_details = re.sub("\n", "", extensions_details)
arr = json.loads(extensions_details)
for extension in arr[u"responseData"][u"extensions"]:
name = extension["name"]
for key, value in extension.iteritems():
if not isinstance(value, list):
print ('zend_extension_' + key + '_' + name + '=' + value)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
11748,
302,
198,
11748,
850,
14681,
198,
198,
89,
82,
62,
15042,
62,
11250,
62,
7753,
796,
705,
11757,
89,
82,
15042,
13,
5362,
6,
198,
... | 2.813333 | 300 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_base_test
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| [
2,
15069,
12131,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.705502 | 309 |
__all__ = ("QuietExit", "EmbedExit")
class QuietExit(Exception):
"""
An exception that is silently ignored by its error handler added
in :ref:`cogs_error_handlers`.
The primary purpose of this class is to allow a command to be exited
from within a nested call without having to propagate return values.
"""
pass
class EmbedExit(Exception):
r"""
An exception that can be used to show a custom error message.
The keyword arguments passed into the constructor of this
exception are propagated into :func:`~senko.CommandContext.embed`
by the error handler defined for this exception.
See :func:`~cogs.error_handlers.handlers.handle_embed_exit`.
Examples
--------
.. code-block:: python3
# Somewhere inside a command.
_ = ctx.locale
raise EmbedExit(
description=_("This is the embed description."),
fields=[dict(name=_("It is fully localized."), value=_("How neat!"))]
)
Parameters
----------
\*\*kwargs
The same keyword arguments as accepted by
:func:`~senko.CommandContext.embed`.
"""
| [
834,
439,
834,
796,
5855,
4507,
1155,
30337,
1600,
366,
31567,
276,
30337,
4943,
628,
198,
4871,
37355,
30337,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1052,
6631,
326,
318,
24595,
9514,
416,
663,
4049,
21360,
20... | 2.846154 | 403 |
import pytest
from abstract_open_traffic_generator.flow import *
from abstract_open_traffic_generator.flow_ipv4 import *
from abstract_open_traffic_generator.config import Config
from abstract_open_traffic_generator.control import *
def test_phb_ecn(serializer, api, tx_port, rx_port):
"""
This will test that phb and ecn are set on an ipv4 header
"""
port_endpoint = PortTxRx(tx_port_name=tx_port.name,
rx_port_name=rx_port.name)
dscp = Dscp(phb=Pattern([Dscp.PHB_CS2, Dscp.PHB_CS1, Dscp.PHB_CS5]),
ecn=Pattern(Dscp.ECN_CAPABLE_TRANSPORT_1))
priority = Priority(dscp)
ipv4 = Ipv4(priority=priority)
flow = Flow(name='Ipv4 with Phb and Ecn',
tx_rx=TxRx(port_endpoint),
packet=[Header(Ethernet()), Header(ipv4)])
config = Config(ports=[tx_port, rx_port], flows=[flow])
api.set_state(State(ConfigState(config=config, state='set')))
if __name__ == '__main__':
pytest.main(['-s', __file__])
| [
11748,
12972,
9288,
198,
6738,
12531,
62,
9654,
62,
9535,
2108,
62,
8612,
1352,
13,
11125,
1330,
1635,
198,
6738,
12531,
62,
9654,
62,
9535,
2108,
62,
8612,
1352,
13,
11125,
62,
541,
85,
19,
1330,
1635,
198,
6738,
12531,
62,
9654,
6... | 2.181034 | 464 |
from utuby.youtube_video_info import info
from utuby.youtube_comments import comments
from utuby.utils import *
class youtube:
"""
Collects info. and comments from the multi-media content in YouTube when url is given.
:param youtubeid: Unique identification for every multimedia in YouTube.
"""
| [
6738,
3384,
549,
88,
13,
11604,
62,
15588,
62,
10951,
1330,
7508,
198,
6738,
3384,
549,
88,
13,
11604,
62,
15944,
1330,
3651,
198,
6738,
3384,
549,
88,
13,
26791,
1330,
1635,
198,
198,
4871,
35116,
25,
628,
220,
220,
220,
37227,
198... | 3.556818 | 88 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
198
] | 3.666667 | 6 |
import codemoninitdir
if __name__ == "__main__":
success, failed = 0, 0
# test 1
print("Running test 'Codemon init dir':")
if codemoninitdir.codemoninitdir() == 0: success += 1
else: failed += 1
# print test results
print("***** Test results *****")
print("Total: ", success + failed)
print("Success: ", success)
print("Failed: ", failed)
| [
11748,
14873,
7966,
15003,
15908,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
1943,
11,
4054,
796,
657,
11,
657,
198,
220,
1303,
1332,
352,
198,
220,
3601,
7203,
28768,
1332,
705,
43806,
7966,
2315,
26... | 3.024793 | 121 |
import argparse
import json
import os
import csv
import math
import numpy as np
from glob import glob
from pathlib import Path
from sklearn.metrics import log_loss
total = 0
correct = 0
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
if __name__=="__main__":
parser = argparse.ArgumentParser("Evaluate Models")
arg = parser.add_argument
arg('--fake-threshold', type=float, default=0.5, required=False, help="Fake Threshold")
arg('--real-threshold', type=float, default=0.5, required=False,
help="Real Threshold")
arg('--result-path', type=str, required=True, help="result file path")
arg('--answer-json', type=str, required=False, default="output.json", help="answer json")
args = parser.parse_args()
FAKE_thres = args.fake_threshold
REAL_thres = args.real_threshold
y = []
y_pred = []
with open(args.answer_json) as json_file:
json_data = json.load(json_file)
for csv_path in glob(os.path.join(args.result_path, "*.csv")):
dir = Path(csv_path).parent
with open(csv_path, "r") as f:
rdr = csv.reader(f)
next(rdr)
for line in rdr:
total += 1
json_object = json_data[line[0]]
if json_object['label'] == 'FAKE':
y.append(1)
y_pred.append(float(line[1]))
if float(line[1]) >= FAKE_thres:
correct += 1
true_positive += 1
else:
false_positive += 1
elif json_object['label'] == 'REAL':
y.append(0)
y_pred.append(float(line[1]))
if float(line[1]) < REAL_thres:
correct += 1
true_negative += 1
else:
false_negative += 1
precision = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
print('Accuracy \t',correct/total)
print('Precision\t', precision)
print('Recall\t\t', recall)
print('F1 Score\t', 2*(precision * recall) / (precision + recall))
print('Fall-out\t', false_positive / (true_negative + false_positive))
print('Log-Loss\t', log_loss(y,y_pred)) | [
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
15095,
1330,
15095,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
1341,
35720,
13,
4164,
... | 2.007407 | 1,215 |
import os
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(description='')
ori_data_dir = '../data/full_data'
'''
data_dir -
tmp -
imgs
tags.csv
'''
if __name__ == "__main__":
parser.add_argument('--data_dir', dest='data_dir', help='Please specify an empty folder', default='/root/data/anime_heads', type=str)
args = parser.parse_args()
data_dir = Path(args.data_dir)
tmp_dir = data_dir/'tmp'
os.system(f"mkdir -p {tmp_dir}")
os.system(f"cp {ori_data_dir}/data.zip {tmp_dir}")
os.system(f"7z x {tmp_dir/'data.zip'} -o{tmp_dir}")
os.system(f"mv {tmp_dir/'extra_data/images'} {data_dir/'imgs'}")
os.system(f"mv {tmp_dir/'extra_data/tags.csv'} {data_dir}")
os.system(f"rm -rf {tmp_dir}") | [
11748,
28686,
198,
11748,
1822,
29572,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
28,
7061,
8,
198,
10145,
62,
7890,
62,
15908,
796,
705,
40720,
7890,
14,
12853,
62,
7890,
... | 2.2 | 355 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="lisenta",
version="0.0.1",
author="Lisa-Yao Gan",
author_email="ga27bil@mytum.de",
description="A fun little program that lets you transcribe and generate piano music",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gitlab.ldv.ei.tum.de/komcrea/musik/-/tree/master",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Windows 64 Bit",
],
python_requires='=3.7',
) | [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.581481 | 270 |
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import Wrappable
class GeneratorIterator(Wrappable):
"An iterator created by a generator."
def descr__iter__(self):
"""x.__iter__() <==> iter(x)"""
return self.space.wrap(self)
def descr_next(self):
"""x.next() -> the next value, or raise StopIteration"""
space = self.space
if self.running:
raise OperationError(space.w_ValueError,
space.wrap('generator already executing'))
if self.frame.frame_finished_execution:
raise OperationError(space.w_StopIteration, space.w_None)
self.running = True
try:
try:
w_result = self.frame.execute_generator_frame(space.w_None)
except OperationError:
# errors finish a frame
self.frame.frame_finished_execution = True
raise
# if the frame is now marked as finished, it was RETURNed from
if self.frame.frame_finished_execution:
raise OperationError(space.w_StopIteration, space.w_None)
else:
return w_result # YIELDed
finally:
self.frame.f_back = None
self.running = False
| [
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
18224,
1330,
14680,
12331,
198,
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
8692,
26801,
13200,
1330,
27323,
381,
540,
628,
198,
4871,
35986,
37787,
7,
36918,
381,
540,
2599,
198,
220,
... | 2.142395 | 618 |
import io
import os
import re
import keras
import random
import numpy as np
import pandas as pd
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from scipy.sparse import coo_matrix
from sklearn import preprocessing
from sklearn.preprocessing import scale
########################################
# parameters
########################################
random.seed(1234)
np.random.seed(1234)
split_ratio = -1
max_nb_words = 50000
max_seq_len = 35
emb_dim = 300
dir_base = './data/'
file_emb = dir_base + 'wordvec.txt'
file_train = dir_base + 'train.tsv'
file_val = dir_base + 'dev.tsv'
file_test = dir_base + 'test.tsv'
file_sick = dir_base + 'sick.txt'
file_msr = dir_base + 'msr.txt'
file_sample_weight = dir_base + 'density.npy'
dir_processed = "./processed_data/"
if not os.path.isdir(dir_processed):
os.mkdir(dir_processed)
stamp_data = str(split_ratio)
file_data = dir_processed + 'data_%s.npz' % str(split_ratio)
file_split = dir_processed + 'split_%s.npz' % str(split_ratio)
file_leaky = dir_processed + 'leakage_features_%s.npz' % str(split_ratio)
########################################
# read-data
########################################
tr = pd.read_csv(file_train, delimiter='\t', header=None)
tr.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
tr = tr[['is_duplicate', 'question1', 'question2']]
val = pd.read_csv(file_val, delimiter='\t', header=None)
val.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
val = val[['is_duplicate', 'question1', 'question2']]
tst = pd.read_csv(file_test, delimiter='\t', header=None)
tst.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
tst = tst[['is_duplicate', 'question1', 'question2']]
sick = pd.read_csv(file_sick, delimiter='\t', usecols=['sentence_A', 'sentence_B', 'relatedness_score'])
sick.columns = ['question1', 'question2', 'is_duplicate']
sick['is_duplicate'] = sick['is_duplicate'].apply(lambda x: 1 if x > 3.6 else 0)
msr = pd.read_csv(file_msr, delimiter='\t', usecols=['#1 String', '#2 String', 'Quality'])
msr.columns = ['is_duplicate', 'question1', 'question2']
data = pd.concat([tr, val, tst, sick, msr], sort=False).fillna('')
########################################
# pre-processing
########################################
print('Pre-processing')
data['question1'] = data['question1'].apply(text_cleaning)
data['question2'] = data['question2'].apply(text_cleaning)
tokenizer = Tokenizer(num_words=max_nb_words, oov_token='oov_token_placeholder')
tokenizer.fit_on_texts(list(data['question1'].values) + list(data['question2'].values))
sequences_1 = tokenizer.texts_to_sequences(data['question1'].values)
sequences_2 = tokenizer.texts_to_sequences(data['question2'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
x1 = pad_sequences(sequences_1, maxlen=max_seq_len)
x2 = pad_sequences(sequences_2, maxlen=max_seq_len)
y = data['is_duplicate'].values
########################################
# retrieval embeddings
########################################
print('Indexing word vectors')
word2vec = {}
fin = io.open(file_emb, 'r', encoding='utf-8', newline='\n', errors='ignore')
for line in fin:
tokens = line.rstrip().split(' ')
word2vec[tokens[0]] = np.asarray(tokens[1:], dtype='float32')
print('Found %s word vectors of word2vec' % len(word2vec.keys()))
print('Preparing embedding matrix')
nb_words = min(max_nb_words, len(word_index))
emb = np.zeros((nb_words + 1, emb_dim))
miss_cnt = 0
for word, i in word_index.items():
if i >= nb_words:
break
if word in word2vec.keys():
emb[i] = word2vec[word]
else:
emb[i] = (np.random.rand(emb_dim) - 0.5) * 0.1
miss_cnt += 1
print('Null word embeddings: %d' % miss_cnt)
########################################
# sample train/val/test data
########################################
questions = list(data['question1'].values) + list(data['question2'].values)
le = preprocessing.LabelEncoder()
le.fit(questions)
q1_id = le.transform(data['question1'].values)
q2_id = le.transform(data['question2'].values)
pair_number = q1_id.shape[0]
sen_number = np.max((q1_id.max(), q2_id.max())) + 1
num_data = len(tr) + len(val) + len(tst)
sick_idx = np.arange(num_data, num_data + len(sick))
msr_idx = np.arange(num_data + len(sick), num_data + len(sick) + len(msr))
if split_ratio == -1:
train_idx = np.arange(len(tr))
val_idx = np.arange(len(tr), len(tr) + len(val))
test_idx = np.arange(len(tr) + len(val), len(tr) + len(val) + len(tst))
else:
perm = np.random.permutation(num_data)
val_split = (1 - split_ratio) / 2
train_idx = perm[:int(num_data * split_ratio)]
val_idx = perm[int(num_data * split_ratio): int(num_data * (split_ratio + val_split))]
test_idx = perm[int(num_data * (split_ratio + val_split)):]
train_sent_set = set(q1_id[train_idx]) | set(q2_id[train_idx])
val_overlap_idx = [i for i, idx in enumerate(val_idx) if
(q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
test_overlap_idx = [i for i, idx in enumerate(test_idx) if
(q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
val_no_overlap_idx = [i for i, idx in enumerate(val_idx) if
not (q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
test_no_overlap_idx = [i for i, idx in enumerate(test_idx) if
not (q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
print("Valid Overlap Distribution: %.5lf%%"
% (y[val_idx][val_overlap_idx].sum() / len(val_overlap_idx) * 100.0))
print("Test Overlap Distribution: %.5lf%%" %
(y[test_idx][test_overlap_idx].sum() / len(test_overlap_idx) * 100.0))
print("Valid No Overlap Distribution: %.5lf%%" %
(y[val_idx][val_no_overlap_idx].sum() / len(val_no_overlap_idx) * 100.0))
print("Test No Overlap Distribution: %.5lf%%" %
(y[test_idx][test_no_overlap_idx].sum() / len(test_no_overlap_idx) * 100.0))
sent_test_same = list(
set(list(data['question1'].values[train_idx]) + list(data['question2'].values[train_idx])))
sequences_test_same = tokenizer.texts_to_sequences(sent_test_same)
x_test_same = pad_sequences(sequences_test_same, maxlen=max_seq_len)
y_test_same = np.ones(len(x_test_same))
test_same_idx = range(len(x1), len(x1) + len(x_test_same))
x1 = np.concatenate([x1, x_test_same])
x2 = np.concatenate([x2, x_test_same])
y = np.concatenate([y, y_test_same])
########################################
# process leaky feature
########################################
adj = coo_matrix((np.ones(len(q1_id) * 2), (np.concatenate(
[q1_id, q2_id]), np.concatenate([q2_id, q1_id]))), (sen_number, sen_number))
leaky_features = np.zeros([len(q1_id), 8])
degree = np.array(adj.sum(axis=1))
leaky_features[:, 0] = degree[q1_id][:, 0]
leaky_features[:, 1] = degree[q2_id][:, 0]
tmp = adj * adj
degree1 = np.array(tmp.sum(axis=1))
leaky_features[:, 2] = np.array([tmp[q1_id[i], q2_id[i]] for i in range(len(q1_id))])
leaky_features[:, 3] = degree1[q1_id][:, 0]
leaky_features[:, 4] = degree1[q2_id][:, 0]
tmp = adj * adj * adj
degree2 = np.array(tmp.sum(axis=1))
leaky_features[:, 5] = np.array([tmp[q1_id[i], q2_id[i]] for i in range(len(q1_id))])
leaky_features[:, 6] = degree1[q1_id][:, 0]
leaky_features[:, 7] = degree1[q2_id][:, 0]
leaky_features = leaky_features[:, :3]
leaky_features = scale(leaky_features)
########################################
# save data to disk
########################################
np.savez(file_data, x1=x1, x2=x2, y=y, emb=emb, word_index=word_index)
np.savez(file_split, train_idx=train_idx, val_idx=val_idx, test_idx=test_idx,
val_overlap_idx=val_overlap_idx, val_no_overlap_idx=val_no_overlap_idx,
test_overlap_idx=test_overlap_idx, test_no_overlap_idx=test_no_overlap_idx,
sick_idx=sick_idx, msr_idx=msr_idx, test_same_idx=test_same_idx)
np.savez(file_leaky, leaky_features=leaky_features)
| [
11748,
33245,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
41927,
292,
198,
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
41927,
292,
62,
3866,
36948,
13,
43167,
1330,
14841,
6... | 2.413043 | 3,312 |
from __future__ import division
import boost.python
boost.python.import_ext("scitbx_linalg_ext")
from scitbx_linalg_ext import *
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
5750,
13,
29412,
198,
39521,
13,
29412,
13,
11748,
62,
2302,
7203,
1416,
270,
65,
87,
62,
75,
1292,
70,
62,
2302,
4943,
198,
6738,
629,
270,
65,
87,
62,
75,
1292,
70,
62,
2302,
1330... | 2.804348 | 46 |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import gym, time
import numpy as np
from spinup.utils.logx import EpochLogger
from core import actor_critic as ac
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='ddpg')
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ddpg(args.env, actor_critic_function=ac,
hidden_size=[args.hid]*args.l, gamma=args.gamma, epochs=args.epochs,
logger_kwargs=logger_kwargs)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
11748,
11550,
11,
640,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
... | 2.611111 | 396 |
import unittest
import k3mime
import k3ut
dd = k3ut.dd
| [
11748,
555,
715,
395,
198,
198,
11748,
479,
18,
76,
524,
198,
11748,
479,
18,
315,
198,
198,
1860,
796,
479,
18,
315,
13,
1860,
628,
198
] | 2.185185 | 27 |
s = input().split("print")[1]
print(eval(s))
| [
82,
796,
5128,
22446,
35312,
7203,
4798,
4943,
58,
16,
60,
198,
4798,
7,
18206,
7,
82,
4008,
198
] | 2.368421 | 19 |
import os
import pytest
import yaml
from kuku.types import Context
from kuku.values import resolve
HERE = os.path.abspath(os.path.dirname(__file__))
VALUES_STAGING_FILE = os.path.join(HERE, "fixtures/values/values-staging.yaml")
VALUES_PRODUCTION_FILE = os.path.join(HERE, "fixtures/values/values-production.yaml")
@pytest.mark.parametrize(
"values, expected",
[
(["k=v"], {"k": "v"}),
(["k1=v1,k2=v2"], {"k1": "v1", "k2": "v2"}),
(["k1=v1,k2=v2", "k3=v3"], {"k1": "v1", "k2": "v2", "k3": "v3"}),
(["k="], {"k": ""}),
],
)
@pytest.mark.parametrize(
"values, expected",
[
(["a.b=v1", "a.c=v2"], {"a": {"b": "v1", "c": "v2"}}),
(["a.b.c=v1", "a.b.d=v2"], {"a": {"b": {"c": "v1", "d": "v2"}}}),
],
)
@pytest.mark.parametrize(
"values, expected",
[
(["a.0.b=v1", "a.0.c=v2"], {"a": [{"c": "v2"}]}),
(["a.0=v1", "a.0=v2"], {"a": ["v2"]}),
(["a.0.b=v1", "a.1.c=v2"], {"a": [{"b": "v1"}, {"c": "v2"}]}),
],
)
@pytest.mark.parametrize(
"values", [["k"], ["=v"], ["="], ["k=1,=2"], ["a.b.c=1,=2"], ["a.0.c=1,=2"]]
)
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
11748,
331,
43695,
198,
198,
6738,
479,
33263,
13,
19199,
1330,
30532,
198,
6738,
479,
33263,
13,
27160,
1330,
10568,
198,
198,
39,
9338,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
41... | 1.769111 | 641 |
from django.db import models
from shop.models import Product
from collection.choices import CollectionCategoryChoices
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
6128,
13,
27530,
1330,
8721,
198,
198,
6738,
4947,
13,
6679,
1063,
1330,
12251,
27313,
22164,
1063,
628
] | 4.444444 | 27 |
from __future__ import absolute_import
import sys
import time
import datetime
import subprocess
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
850,
14681,
628
] | 4.083333 | 24 |
try:
import cv2
except (NameError, ImportError, ModuleNotFoundError):
print('cv2 package installed not found')
def live_stream():
'''Stream your live video'''
cam = cv2.VideoCapture(cv2.CAP_DSHOW)
while True:
ret, frame = cam.read() # Getting frame
frame = cv2.flip(frame, 180) # Rotating frame to 180 degree
cv2.namedWindow('LIVE STREAM', cv2.WINDOW_NORMAL)
cv2.resizeWindow('LIVE STREAM', (800, 600))
cv2.imshow('LIVE STREAM', frame)
if cv2.waitKey(1) == 27: # Press esc to quit everything
break
cam.release() # Destroying camera
cv2.destroyAllWindows() # Destroying all your active windows
if __name__ == '__main__':
live_stream()
| [
28311,
25,
201,
198,
220,
220,
220,
1330,
269,
85,
17,
201,
198,
201,
198,
16341,
357,
5376,
12331,
11,
17267,
12331,
11,
19937,
3673,
21077,
12331,
2599,
201,
198,
220,
220,
220,
3601,
10786,
33967,
17,
5301,
6589,
407,
1043,
11537,
... | 2.230548 | 347 |
# Generated by Django 3.1.6 on 2021-09-01 10:43
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
2931,
12,
486,
838,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from ...interface.models import LisaUser
from django.contrib.auth import login, logout
from mongoengine.queryset import DoesNotExist
from tastypie_mongoengine import resources as mongoresources
from tastypie.http import HttpUnauthorized, HttpForbidden, HttpAccepted
from tastypie import fields
from tastypie.utils import trailing_slash
from tastypie.authentication import MultiAuthentication
from django.conf.urls import *
from .mixins import PublicEndpointResourceMixin, CustomApiKeyAuthentication
from tastypie import authorization
from mongoengine import document
| [
6738,
2644,
39994,
13,
27530,
1330,
15378,
12982,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
17594,
11,
2604,
448,
198,
6738,
285,
25162,
18392,
13,
10819,
893,
316,
1330,
8314,
3673,
3109,
396,
198,
6738,
14854,
4464,
494... | 3.736842 | 152 |
import matplotlib.pyplot as plt
import numpy as np
from scipy.constants import hbar, pi
from lmfit import Model, fit_report
#plt.style.use('presentation.mplstyle')
attenuation = 52 #dB
k = 2*pi*513e3
ke = 0.7*k
w0 = 2*pi*6.310792e9
#######################
w = w0 - (2*pi*5.2656*1e6)
pth = 'D:\\data\\2018-09-08\\12-06-01_omit_pump_pw_sw_mode1\\'
file = 'gamma_vs_pw.dat'
###################
power, cp, gamm = np.loadtxt(pth+file, unpack=True)
photons = photon(power-attenuation)
plt.figure(figsize = (5,3))
plt.plot(photons,cp, '-ro')
plt.xlabel('number of photons', fontsize=12)
plt.ylabel(r'Cooperativity', fontsize = 12)
plt.grid()
# plt.plot(photons, coop, '.')
#plt.xscale('log')
# plt.title(r'Cooperativity vs Number of Cavity Photons', fontsize=20)
plt.tight_layout()
plt.savefig(pth+'Cp_number_ph.png', transparent=True)
#plt.show() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
629,
541,
88,
13,
9979,
1187,
1330,
289,
5657,
11,
31028,
201,
198,
6738,
300,
76,
11147,
1330,
9104,
11,
4197,
62,
13... | 2.161765 | 408 |
import logging
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
from azure.keyvault.keys.crypto import CryptographyClient, EncryptionAlgorithm
logger = logging.getLogger(__name__)
get_secret = DecryptEncrypt()
| [
11748,
18931,
198,
6738,
35560,
495,
13,
738,
414,
1330,
15161,
26903,
495,
34,
445,
1843,
198,
6738,
35560,
495,
13,
2539,
85,
1721,
13,
13083,
1330,
7383,
11792,
198,
6738,
35560,
495,
13,
2539,
85,
1721,
13,
13083,
13,
29609,
78,
... | 3.282051 | 78 |
"""Contains the used variables and functions to provide logging functionality.
See Also
--------
FeatureCollection: its `logging_file_path` of the `calculate` method.
"""
__author__ = "Jeroen Van Der Donckt"
import logging
import pandas as pd
import re
from ..utils.logging import logging_file_to_df, remove_inner_brackets
from ..utils.time import timedelta_to_str
# Package specific logger
logger = logging.getLogger("feature_calculation_logger")
logger.setLevel(logging.DEBUG)
# Create logger which writes WARNING messages or higher to sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
logger.addHandler(console)
def _parse_message(message: str) -> list:
"""Parse the message of the logged info."""
regex = r"\[(.*?)\]"
matches = re.findall(regex, remove_inner_brackets(message))
assert len(matches) == 4
func = matches[0]
key = matches[1].replace("'", "")
window, stride = matches[2].split(",")[0], matches[2].split(",")[1]
duration_s = float(matches[3].rstrip(" seconds"))
return [func, key, window, stride, duration_s]
def _parse_logging_execution_to_df(logging_file_path: str) -> pd.DataFrame:
"""Parse the logged messages into a dataframe that contains execution info.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Note
----
This function only works when the `logging_file_path` used in a
`FeatureCollection` its `calculate` method is passed.
Returns
-------
pd.DataFrame
A DataFrame with the features its function, input series names and
calculation duration.
"""
df = logging_file_to_df(logging_file_path)
df[["function", "series_names", "window", "stride", "duration"]] = pd.DataFrame(
list(df["message"].apply(_parse_message)),
index=df.index,
)
df["window"] = pd.to_timedelta(df["window"]).apply(timedelta_to_str)
df["stride"] = pd.to_timedelta(df["stride"]).apply(timedelta_to_str)
return df.drop(columns=["name", "log_level", "message"])
def get_feature_logs(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) info for each feature of a `FeatureCollection`.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with the features its function, input series names and
calculation duration.
"""
df = _parse_logging_execution_to_df(logging_file_path)
df["duration"] = pd.to_timedelta(df["duration"], unit="s")
return df
def get_function_stats(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) statistics for each function.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with for each function (i.e., `function-(window,stride)`)
combination the mean (time), std (time), sum (time), and number of executions.
"""
df = _parse_logging_execution_to_df(logging_file_path)
# Get the sorted functions in a list to use as key for sorting the groups
sorted_funcs = (
df.groupby(["function"])
.agg({"duration": ["mean"]})
.sort_values(by=("duration", "mean"), ascending=True)
.index.to_list()
)
return (
df.groupby(["function", "window", "stride"])
.agg({"duration": ["mean", "std", "sum", "count"]})
.sort_index(key=key_func, ascending=False)
)
def get_series_names_stats(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) statistics for each `key-(window,stride)` combination.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with for each function the mean (time), std (time), sum (time), and
number of executions.
"""
df = _parse_logging_execution_to_df(logging_file_path)
return (
df.groupby(["series_names", "window", "stride"])
.agg({"duration": ["sum", "mean", "std", "count"]})
.sort_values(by=("duration", "sum"), ascending=False)
)
| [
37811,
4264,
1299,
262,
973,
9633,
290,
5499,
284,
2148,
18931,
11244,
13,
198,
198,
6214,
4418,
198,
982,
198,
38816,
36307,
25,
663,
4600,
6404,
2667,
62,
7753,
62,
6978,
63,
286,
262,
4600,
9948,
3129,
378,
63,
2446,
13,
198,
198... | 2.723738 | 1,723 |
"""
Base exchange class implementation.
"""
from __future__ import annotations
import logging
import pprint
from typing import Any
import ccxt
from ccxt.async_support import Exchange as CCXTExchange
from pydantic import BaseModel
from pydantic import PrivateAttr
from mcookbook.config.live import LiveConfig
from mcookbook.exceptions import OperationalException
from mcookbook.pairlist.manager import PairListManager
from mcookbook.utils import merge_dictionaries
log = logging.getLogger(__name__)
class Exchange(BaseModel):
"""
Base Exchange class.
"""
_name: str = PrivateAttr()
_market: str = PrivateAttr()
config: LiveConfig
_api: type[CCXTExchange] = PrivateAttr()
_markets: dict[str, dict[str, Any]] = PrivateAttr(default_factory=dict)
_pairlist_manager: PairListManager = PrivateAttr()
@classmethod
def resolved(cls, config: LiveConfig) -> Exchange:
"""
Resolve the passed ``name`` and ``market`` to class implementation.
"""
name = config.exchange.name
market = config.exchange.market
for subclass in cls.__subclasses__():
subclass_name = subclass._name # pylint: disable=protected-access
subclass_market = subclass._market # pylint: disable=protected-access
if subclass_name == name and market == subclass_market:
instance = subclass.parse_obj({"config": config.dict()})
instance._pairlist_manager = PairListManager.construct(config=config)
instance._pairlist_manager._exchange = instance
for handler in config.pairlists:
handler._exchange = instance
instance._pairlist_manager._pairlist_handlers = config.pairlists
return instance
raise OperationalException(
f"Cloud not find an implementation for the {name}(market={market}) exchange."
)
@property
def api(self) -> CCXTExchange:
"""
Instantiate and return a CCXT exchange class.
"""
try:
return self._api
except AttributeError:
log.info("Using CCXT %s", ccxt.__version__)
ccxt_config = self.config.exchange.get_ccxt_config()
exchange_ccxt_config = self._get_ccxt_config() # pylint: disable=assignment-from-none
if exchange_ccxt_config:
merge_dictionaries(ccxt_config, exchange_ccxt_config)
headers = self._get_ccxt_headers() # pylint: disable=assignment-from-none
if headers:
merge_dictionaries(ccxt_config, {"headers": headers})
log.info(
"Instantiating API for the '%s' exchange with the following configuration:\n%s",
self.config.exchange.name,
pprint.pformat(ccxt_config),
)
# Reveal secrets
for key in ("apiKey", "secret", "password", "uid"):
if key not in ccxt_config:
continue
ccxt_config[key] = ccxt_config[key].get_secret_value()
try:
self._api = getattr(ccxt.async_support, self.config.exchange.name)(ccxt_config)
except (KeyError, AttributeError) as exc:
raise OperationalException(
f"Exchange {self.config.exchange.name} is not supported"
) from exc
except ccxt.BaseError as exc:
raise OperationalException(f"Initialization of ccxt failed. Reason: {exc}") from exc
return self._api
async def get_markets(self) -> dict[str, Any]:
"""
Load the exchange markets.
"""
if not self._markets:
log.info("Loading markets")
self._markets = await self.api.load_markets()
return self._markets
@property
def markets(self) -> dict[str, Any]:
"""
Return the loaded markets.
"""
return self._markets
@property
def pairlist_manager(self) -> PairListManager:
"""
Return the pair list manager.
"""
return self._pairlist_manager
| [
37811,
198,
14881,
5163,
1398,
7822,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
18931,
198,
11748,
279,
4798,
198,
6738,
19720,
1330,
4377,
198,
198,
11748,
36624,
742,
198,
6738,
36624,
742,
13,
292,
13... | 2.341024 | 1,777 |
"""Base class for task classes.
This has almost no implementation; the debugging methods are injected by
tasks_ros. This allows us to test task classes outside ROS.
"""
| [
37811,
14881,
1398,
329,
4876,
6097,
13,
198,
198,
1212,
468,
2048,
645,
7822,
26,
262,
28769,
5050,
389,
25077,
416,
198,
83,
6791,
62,
4951,
13,
770,
3578,
514,
284,
1332,
4876,
6097,
2354,
48263,
13,
198,
37811,
198
] | 4.25 | 40 |
from __future__ import division
import endpoints
from google.appengine.api import taskqueue
from Crypto.Hash import SHA256
from google.appengine.ext import ndb
from domain import Bet, PostStatus
from models import User, UserForm, UserMiniForm, TrendsMessage, TrendUserMessage, RankingsMessage, UserRankingProfileMessage, UserStatsMessage
from datetime import datetime
from Utils import average, random_list_element
import PostManager
from domain import NotificationType
# user is an user object
| [
6738,
11593,
37443,
834,
1330,
7297,
201,
198,
11748,
886,
13033,
201,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
4876,
36560,
201,
198,
6738,
36579,
13,
26257,
1330,
25630,
11645,
201,
198,
6738,
23645,
13,
1324,
18392,
13,
23... | 3.680851 | 141 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing groups."""
from googlecloudsdk.api_lib.compute import base_classes
class Describe(base_classes.BaseAsyncMutator):
"""Describe a Google Compute Engine group.
*{command}* displays all data associated with a Google Compute
Engine group in a project.
"""
@staticmethod
@property
@property
@property
@property
def CreateRequests(self, args):
"""Returns a list of requests necessary for describing groups."""
group_ref = self.CreateAccountsReference(
args.name, resource_type='groups')
request = self.messages.ClouduseraccountsGroupsGetRequest(
project=self.project,
groupName=group_ref.Name())
return [request]
Describe.detailed_help = {
'EXAMPLES': """\
To describe a user, run:
$ {command} example-user
""",
}
| [
2,
15069,
1853,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.271689 | 438 |
import os
import psutil
try:
import configparser
except ImportError:
import ConfigParser as configparser
from xml.sax import saxutils
def get_old_entries(dirpath, at_least=5):
"""
Get a list of n least recently modified entries of dirpath except the most
recently modified entry if any
:param at_least: `dirpath` must contain at least `at_least` entries,
or return empty list
"""
entries = listdir_fullpath(dirpath)
if len(entries) < at_least:
return []
entries.sort(key=os.path.getmtime)
return entries[1:]
| [
11748,
28686,
198,
11748,
26692,
22602,
198,
28311,
25,
198,
220,
220,
220,
1330,
4566,
48610,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
17056,
46677,
355,
4566,
48610,
198,
6738,
35555,
13,
82,
897,
1330,
46909,
26791,
62... | 2.755981 | 209 |
from enum import Enum
from typing import Optional
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
32233,
628,
198
] | 4.333333 | 12 |
import os
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
from app.api.router import api_router
from app.core.config import settings
from app.core.logging import setup_logging
from app.middleware.logger import LoggerMiddleware
from app.utils.package_info import get_metadata
prefix = settings.BASE_PREFIX
app = FastAPI(
**get_metadata(),
openapi_url=f"{prefix}/openapi.json",
docs_url=f"{prefix}/docs",
redoc_url=f"{prefix}/redoc",
root_path=os.environ.get("ROOT_PATH", ""),
)
logger = setup_logging()
app.middleware("http")(LoggerMiddleware(logger=logger))
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=prefix)
# Находится не в роутере картинок из-за https://github.com/tiangolo/fastapi/issues/1469
app.mount(f"{prefix}/images", StaticFiles(directory=settings.IMAGE_DIR), name="images")
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", reload=True, port=8888)
| [
11748,
28686,
198,
198,
11748,
334,
25531,
1211,
198,
6738,
3049,
15042,
1330,
12549,
17614,
198,
6738,
3049,
15042,
13,
12708,
16624,
1330,
36125,
25876,
198,
6738,
3491,
21348,
13,
27171,
1574,
13,
66,
669,
1330,
23929,
12310,
2509,
157... | 2.458015 | 524 |
if __name__ == "__main__":
puzzle_input = read_input("day7.txt")
print(f"Part 1: {part1(puzzle_input)}")
print(f"Part 2: {part2(puzzle_input)}")
| [
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
15027,
62,
15414,
796,
1100,
62,
15414,
7203,
820,
22,
13,
14116,
4943,
198,
220,
220,
220,
3601,
7,
69,
1,
7841,
352,
25,
1391,
3911,
16,... | 2.205479 | 73 |
from setuptools import setup
setup(
name='loman',
version='0.3.0',
packages=['loman'],
url='https://github.com/janusassetallocation/loman',
license='BSD',
author='Ed Parcell',
author_email='edparcell@gmail.com',
description='Loman tracks state of computations, and the dependencies between them, allowing full and partial recalculations.',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=['six', 'dill', 'pydotplus', 'networkx', 'pandas', 'matplotlib'],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
75,
5185,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
18,
13,
15,
3256,
198,
220,
220,
220,
10392,
28,
17816,
75,
5185,
6,
4357,
198,
220... | 2.716172 | 303 |
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transformer decoder.
"""
from typing import Callable, Dict, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from texar.core import layers
from texar.hyperparams import HParams
from texar.modules.decoders.decoder_base import DecoderBase, _make_output_layer
from texar.modules.decoders.decoder_helpers import EmbeddingHelper, Helper
from texar.modules.encoders.multihead_attention import (
Cache, MultiheadAttentionEncoder)
from texar.modules.encoders.transformer_encoder import (
default_transformer_poswise_net_hparams)
from texar.modules.networks.networks import FeedForwardNetwork
from texar.utils import transformer_attentions as attn
from texar.utils.beam_search import beam_search
from texar.utils.shapes import mask_sequences
from texar.utils.utils import sequence_mask
__all__ = [
'TransformerDecoderOutput',
'TransformerDecoder',
]
class TransformerDecoderOutput(NamedTuple):
r"""The output of :class:`TransformerDecoder`.
"""
logits: torch.Tensor
r"""A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``
containing the logits."""
sample_id: torch.LongTensor
r"""A :tensor:`LongTensor` of shape ``[batch_size, max_time]`` containing
the sampled token indices."""
class TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):
r"""Transformer decoder that applies multi-head self-attention for
sequence decoding.
It is a stack of :class:`~texar.modules.encoders.MultiheadAttentionEncoder`,
:class:`~texar.modules.FeedForwardNetwork`, and residual connections.
Args:
vocab_size (int, optional): Vocabulary size. Required if
:attr:`output_layer` is `None`.
output_layer (optional): An output layer that transforms cell output
to logits. This can be:
- A callable layer, e.g., an instance of :torch_nn:`Module`.
- A tensor. A :torch_nn:`Linear` layer will be created using the
tensor as weights. The bias of the dense layer is determined
by ``hparams.output_layer_bias``. This can be used to tie the
output layer with the input embedding matrix, as proposed in
https://arxiv.org/pdf/1608.05859.pdf.
- `None`. A :torch_nn:`Linear` layer will be created based on
attr:`vocab_size` and ``hparams.output_layer_bias``.
- If no output layer is needed at the end, set
:attr:`vocab_size` to `None` and ``output_layer`` to
:func:`~texar.core.identity`.
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
# State variables used during `dynamic_decode`. Assigned in `forward`.
_state_max_decoding_length: int
_state_context: Optional[torch.LongTensor]
_state_context_sequence_length: Optional[torch.LongTensor]
_state_cache: Cache
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# Same as in TransformerEncoder
"num_blocks": 6,
"dim": 512,
"use_gpt_config": False,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"poswise_feedforward": default_transformer_poswise_net_hparams,
"multihead_attention": {
'name': 'multihead_attention',
'num_units': 512,
'output_dim': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
"initializer": None,
"name": "transformer_decoder"
# Additional for TransformerDecoder
"embedding_tie": True,
"output_layer_bias": False,
"max_decoding_length": int(1e10),
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoder.
`"use_gpt_config"`: bool
Whether to follow the `eps` setting of OpenAI GPT.
`"embedding_dropout"`: float
Dropout rate of the input word and position embeddings.
`"residual_dropout"`: float
Dropout rate of the residual connections.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``dim``.
See :func:`~texar.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``output_dim`` in this module is equal to ``dim``.
See :func:`~texar.modules.MultiheadAttentionEncoder.default_hparams`
for details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.core.get_initializer` for details.
`"embedding_tie"`: bool
Whether to use the word embedding matrix as the output layer
that computes logits. If `False`, a new dense layer is created.
`"output_layer_bias"`: bool
Whether to use bias to the output layer.
`"max_decoding_length"`: int
The maximum allowed number of decoding steps.
Set to a very large number of avoid the length constraint.
Ignored if provided in :meth:`forward` or ``"train_greedy"``
decoding is used.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'use_gpt_config': False,
'embedding_tie': True,
'output_layer_bias': False,
'max_decoding_length': int(1e10),
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
'initializer': None,
'name': "transformer_decoder",
}
def _inputs_to_outputs(self, inputs: torch.Tensor,
cache: Cache) -> Tuple[torch.Tensor, Cache]:
r"""Returns the outputs of one decoding step (for example,
the predicted logits of the next token).
:attr:`inputs` should be of shape ``[batch_size, dim]``.
Returns:
A tuple of logits and updated cache. Logits are of shape
``[batch_size, vocab_size]``.
"""
outputs = self._self_attention_stack(
inputs.unsqueeze(1), memory=cache['memory'], cache=cache)
outputs = self._output_layer(outputs)
outputs = outputs.squeeze(1)
return outputs, cache
def forward(self, # type: ignore
inputs: Optional[torch.Tensor] = None,
sequence_length: Optional[torch.LongTensor] = None,
memory: Optional[torch.Tensor] = None,
memory_sequence_length: Optional[torch.LongTensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
context: Optional[torch.Tensor] = None,
context_sequence_length: Optional[torch.LongTensor] = None,
helper: Optional[Helper] = None,
decoding_strategy: str = 'train_greedy',
max_decoding_length: Optional[int] = None,
impute_finished: bool = False,
infer_mode: Optional[bool] = None,
beam_width: Optional[int] = None,
length_penalty: float = 0.,
**kwargs) \
-> Union[
TransformerDecoderOutput,
Tuple[TransformerDecoderOutput, torch.LongTensor],
Dict[str, torch.Tensor]]:
r"""Performs decoding.
The interface is very similar to that of RNN decoders
(:class:`texar.modules.RNNDecoderBase`). In particular,
the function provides **3 ways** to specify the decoding method, with
varying flexibility:
1. The :attr:`decoding_strategy` argument.
- **"train_greedy"**: decoding in teacher-forcing fashion (i.e.,
feeding ground truth to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Argument :attr:`inputs` is required for this strategy.
:attr:`sequence_length` is optional.
- **"infer_greedy"**: decoding in inference fashion (i.e., feeding
`generated` sample to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Arguments :attr:`(start_tokens, end_token)` are
required for this strategy, and argument
:attr:`max_decoding_length` is optional.
- **"infer_sample"**: decoding in inference fashion, and for each
step sample is obtained by `random sampling` from the logits.
Arguments :attr:`(start_tokens, end_token)` are required for this
strategy, and argument :attr:`max_decoding_length` is optional.
This argument is used only when arguments :attr:`helper` and
:attr:`beam_width` are both `None`.
2. The :attr:`helper` argument: An instance of subclass of
:class:`texar.modules.decoders.Helper`.
This provides a superset of decoding strategies than above.
The interface is the same as in RNN decoders.
Please refer to :meth:`texar.modules.RNNDecoderBase.forward` for
detailed usage and examples.
Note that, here, though using a
:class:`~texar.decoder.TrainingHelper` corresponding to the
``"train_greedy"`` strategy above, the implementation is *slower*
than directly setting ``decoding_strategy="train_greedy"`` (though
output results are the same).
Argument :attr:`max_decoding_length` is optional.
3. **Beam search**: set :attr:`beam_width` to use beam search decoding.
Arguments :attr:`(start_tokens, end_token)` are required,
and argument :attr:`max_decoding_length` is optional.
.. warning::
Beam search is not yet implemented. Setting :attr:`beam_width`
to any value greater than 1 would raise a
:exc:`NotImplementedError`
Args:
memory (optional): The memory to attend, e.g., the output of an RNN
encoder. A :tensor:`Tensor` of shape
``[batch_size, memory_max_time, dim]``.
memory_sequence_length (optional): A :tensor:`Tensor` of shape
``[batch_size]`` containing the sequence lengths for the batch
entries in memory. Used to create attention bias of
:attr:`memory_attention_bias` is not given. Ignored if
:attr:`memory_attention_bias` is provided.
memory_attention_bias (optional): A :tensor:`Tensor` of shape
``[batch_size, num_heads, memory_max_time, dim]``.
An attention bias typically sets the value of a padding
position to a large negative value for masking. If not given,
:attr:`memory_sequence_length` is used to automatically
create an attention bias.
inputs (optional): Input tensor for teacher forcing decoding, of
shape ``[batch_size, target_max_time, emb_dim]`` containing the
target sequence word embeddings. Used when
:attr:`decoding_strategy` is set to ``"train_greedy"``.
sequence_length (optional): A :tensor:`LongTensor` of shape
``[batch_size]``, containing the sequence length of
:attr:`inputs`. Tokens beyond the respective sequence length are
masked out.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``.
decoding_strategy (str): A string specifying the decoding
strategy, including ``"train_greedy"``, ``"infer_greedy"``,
``"infer_sample"``.
Different arguments are required based on the
strategy. See above for details. Ignored if
:attr:`beam_width` or :attr:`helper` is set.
beam_width (int): Set to use beam search. If given,
:attr:`decoding_strategy` is ignored.
length_penalty (float): Length penalty coefficient used in beam
search decoding. Refer to https://arxiv.org/abs/1609.08144
for more details.
It should be larger if longer sentences are desired.
context (optional): An :tensor:`LongTensor` of shape
``[batch_size, length]``, containing the starting tokens for
decoding. If context is set, ``start_tokens`` of the
:class:`~texar.modules.Helper` will be ignored.
context_sequence_length (optional): Specify the length of context.
max_decoding_length (int, optional): The maximum allowed number of
decoding steps.
If `None` (default), use ``"max_decoding_length"`` defined in
:attr:`hparams`. Ignored in ``"train_greedy"`` decoding.
impute_finished (bool): If `True`, then states for batch
entries which are marked as finished get copied through and
the corresponding outputs get zeroed out. This causes some
slowdown at each time step, but ensures that the final state
and outputs have the correct values and that backprop ignores
time steps that were marked as finished. Ignored in
``"train_greedy"`` decoding.
helper (optional): An instance of
:class:`texar.modules.decoders.Helper`
that defines the decoding strategy. If given,
``decoding_strategy`` and helper configurations in
:attr:`hparams` are ignored.
infer_mode (optional): If not `None`, overrides mode given by
:attr:`self.training`.
Returns:
- For **"train_greedy"** decoding, returns an instance of
:class:`~texar.modules.TransformerDecoderOutput` which contains
`sample_id` and `logits`.
- For **"infer_greedy"** and **"infer_sample"** decoding or
decoding with :attr:`helper`, returns
a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an
instance of :class:`~texar.modules.TransformerDecoderOutput` as
in `"train_greedy"`, and ``sequence_lengths`` is a
:tensor:`LongTensor` of shape ``[batch_size]`` containing the
length of each sample.
- For **beam search** decoding, returns a ``dict`` containing keys
``"sample_id"`` and ``"log_prob"``.
- ``"sample_id"`` is a :tensor:`LongTensor` of shape
``[batch_size, max_time, beam_width]`` containing generated
token indexes. ``sample_id[:,:,0]`` is the highest-probable
sample.
- ``"log_prob"`` is a :tensor:`Tensor` of shape
``[batch_size, beam_width]`` containing the log probability
of each sequence sample.
"""
if memory is not None:
if memory_attention_bias is None:
if memory_sequence_length is None:
raise ValueError(
"`memory_sequence_length` is required if "
"`memory_attention_bias` is not given.")
enc_padding = 1 - sequence_mask(
memory_sequence_length, memory.size(1),
dtype=torch.float32)
memory_attention_bias = attn.attention_bias_ignore_padding(
enc_padding)
# record the context, which will be used in step function
# for dynamic_decode
if context is not None:
if context_sequence_length is None:
raise ValueError("'context_sequence_length' must not be None"
"when 'context' is specified.")
self._state_context = context[:, 1:]
self._state_context_sequence_length = context_sequence_length - 1
else:
self._state_context = None
self._state_context_sequence_length = None
# Faster code path for teacher-forcing training
if (helper is None and beam_width is None and
decoding_strategy == 'train_greedy'):
if inputs is None:
raise ValueError("'input' must not be none "
"when using 'train_greedy' decoding strategy.")
if sequence_length is not None:
inputs = mask_sequences(inputs, sequence_length)
decoder_self_attention_bias = (
attn.attention_bias_lower_triangle(inputs.size(1)))
decoder_output = self._self_attention_stack(
inputs, memory, decoder_self_attention_bias,
memory_attention_bias, cache=None)
logits = self._output_layer(decoder_output)
sample_id = torch.argmax(logits, dim=-1)
return TransformerDecoderOutput(logits, sample_id)
# Inference code path.
if max_decoding_length is None:
max_decoding_length = self._hparams.max_decoding_length
self._state_max_decoding_length = max_decoding_length
if beam_width is None or beam_width == 1: # Inference-like decoding
# Prepare helper
if helper is None:
kwargs.update(decoding_strategy=decoding_strategy)
if context is not None:
kwargs.update(start_tokens=context[:, 0])
helper = self._create_or_get_helper(infer_mode, **kwargs)
assert isinstance(helper, EmbeddingHelper)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=False, batch_size=helper.batch_size)
if context is not None:
assert self._state_context is not None
pad_length = max_decoding_length - self._state_context.size(1)
if pad_length > 0:
self._state_context = torch.cat((
self._state_context,
self._state_context.new_zeros(
self._state_context.size(0), pad_length)
), dim=1)
outputs, cache, sequence_lengths = self.dynamic_decode(
helper, inputs=None, sequence_length=None,
initial_state=None, max_decoding_length=max_decoding_length,
impute_finished=impute_finished)
del cache # not used
if context is not None:
# Here the length of sample_id will be larger than that
# of logit by 1, because there will be a additional
# start_token in the returned sample_id.
# the start_id should be the first token of the
# given context
start_tokens = context[:, 0]
outputs = TransformerDecoderOutput(
logits=outputs.logits,
sample_id=torch.cat([
start_tokens.unsqueeze(1),
outputs.sample_id
], dim=1))
sequence_lengths = sequence_lengths + 1
return outputs, sequence_lengths
else: # Beam-search decoding
# Ignore `decoding_strategy` and # assume `helper` is not set.
if helper is not None:
raise ValueError("Must not set 'beam_width' and 'helper' "
"simultaneously.")
if context is not None:
start_tokens = context[:, 0]
else:
if 'start_tokens' not in kwargs:
raise ValueError(
"'start_tokens' must be specified when using"
"beam search decoding.")
start_tokens = kwargs['start_tokens']
_batch_size = start_tokens.size(0)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=True,
batch_size=_batch_size)
end_token: int = kwargs.get('end_token') # type: ignore
# The output format is different when running beam search.
sample_id, log_prob = self._beam_decode(
start_tokens,
end_token,
embedding_fn=kwargs['embedding'],
beam_width=beam_width,
length_penalty=length_penalty,
decode_length=max_decoding_length)
return {
'sample_id': sample_id,
'log_prob': log_prob
}
def _self_attention_stack(
self, inputs: torch.Tensor,
memory: Optional[torch.Tensor],
decoder_self_attention_bias: Optional[torch.Tensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None) -> torch.Tensor:
r"""Forward through the stacked multi-head attentions.
"""
inputs = self.embed_dropout(inputs)
if cache is not None:
if memory is not None:
memory_attention_bias = cache['memory_attention_bias']
else:
assert decoder_self_attention_bias is not None
x = inputs
for i in range(self._hparams.num_blocks):
layer_cache = cache['layers'][i] if cache is not None else None
selfatt_output = self.self_attns[i](
queries=self.self_attn_layer_norm[i](x),
memory=None,
memory_attention_bias=decoder_self_attention_bias,
cache=layer_cache)
x = x + self.residual_dropout(selfatt_output)
if memory is not None:
encdec_output = self.enc_dec_attns[i](
queries=self.end_dec_attn_layer_norm[i](x),
memory=memory,
memory_attention_bias=memory_attention_bias)
x = x + self.residual_dropout(encdec_output)
sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))
x = x + self.residual_dropout(sub_output)
return self.final_layer_norm(x)
def _init_cache(self, memory: Optional[torch.Tensor],
memory_attention_bias: Optional[torch.Tensor],
beam_search_decoding: bool,
batch_size: int) -> Cache:
r"""Returns an initialized cache.
In order to support both inference-like decoding and beam-search
decoding, the elements of each layer must be initialized and extended
as different structure respectively. Specifically, for inference-like
decoding, a simple list is used; for beam-search decoding, a
:tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``
is maintained, where ``current_steps`` is the number of steps currently
decoded.
"""
device = next(self.parameters()).device
_create_fn = (_create_empty_tensor if beam_search_decoding
else _create_ta)
cache: Cache = {
'memory': memory,
'memory_attention_bias': memory_attention_bias,
'layers': [{
'keys': _create_fn(),
'values': _create_fn(),
} for _ in range(self._hparams.num_blocks)],
}
return cache
@property
def output_size(self) -> int:
r"""Output size of one step.
"""
return self._input_size
| [
2,
15069,
13130,
383,
3567,
283,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.165167 | 11,782 |
import os.path
from argparse import ArgumentParser, Namespace
from sys import stdout
from utils.config import COOKIE
| [
11748,
28686,
13,
6978,
198,
6738,
1822,
29572,
1330,
45751,
46677,
11,
28531,
10223,
198,
6738,
25064,
1330,
14367,
448,
198,
198,
6738,
3384,
4487,
13,
11250,
1330,
327,
15308,
10008,
628,
628
] | 3.666667 | 33 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'buy.',views.buy_product),
url(r'',views.show_product),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
17846,
2637,
11,
33571,
13,
17846,
62,
11167,
828,
198,
220,
220,
220,
... | 2.448276 | 58 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import str, object
import datetime
import json
import logging
import math
import numbers
import sys
import uuid
from datetime import timedelta
from django.contrib.sessions.models import Session
from django.db.models import Count
from django.db.models.functions import Trunc
from django.urls import reverse
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.conf import has_connectors, TASK_SERVER
from desktop.lib.i18n import smart_unicode
from desktop.lib.paths import SAFE_CHARACTERS_URI
from desktop.models import Document2
from useradmin.models import User
from notebook.conf import EXAMPLES, get_ordered_interpreters
from notebook.connectors.base import Notebook, get_api as _get_api, get_interpreter
if sys.version_info[0] > 2:
from urllib.parse import quote as urllib_quote
else:
from urllib import quote as urllib_quote
LOG = logging.getLogger(__name__)
# Materialize and HTML escape results
def make_notebook(
name='Browse', description='', editor_type='hive', statement='', status='ready',
files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None, batch_submit=False,
on_success_url=None, skip_historify=False, is_task=False, last_executed=-1, is_notebook=False, pub_sub_url=None, result_properties={},
namespace=None, compute=None, is_presentation_mode=False):
'''
skip_historify: do not add the task to the query history. e.g. SQL Dashboard
is_task / isManaged: true when being a managed by Hue operation (include_managed=True in document),
e.g. exporting query result, dropping some tables
'''
from notebook.connectors.hiveserver2 import HS2Api
if has_connectors():
interpreter = get_interpreter(connector_type=editor_type)
editor_connector = editor_type
editor_type = interpreter['dialect']
else:
editor_connector = editor_type
editor = Notebook()
if snippet_properties is None:
snippet_properties = {}
if editor_type == 'hive':
sessions_properties = HS2Api.get_properties(editor_type)
if files is not None:
_update_property_value(sessions_properties, 'files', files)
if functions is not None:
_update_property_value(sessions_properties, 'functions', functions)
if settings is not None:
_update_property_value(sessions_properties, 'settings', settings)
elif editor_type == 'impala':
sessions_properties = HS2Api.get_properties(editor_type)
if settings is not None:
_update_property_value(sessions_properties, 'files', files)
elif editor_type == 'java':
sessions_properties = [] # Java options
else:
sessions_properties = []
data = {
'name': name,
'uuid': str(uuid.uuid4()),
'description': description,
'sessions': [
{
'type': editor_connector,
'properties': sessions_properties,
'id': None
}
],
'selectedSnippet': editor_connector, # TODO: might need update in notebook.ko.js
'type': 'notebook' if is_notebook else 'query-%s' % editor_type,
'showHistory': True,
'isSaved': is_saved,
'onSuccessUrl': urllib_quote(on_success_url.encode('utf-8'), safe=SAFE_CHARACTERS_URI) if on_success_url else None,
'pubSubUrl': pub_sub_url,
'skipHistorify': skip_historify,
'isPresentationModeDefault': is_presentation_mode,
'isManaged': is_task,
'snippets': [
{
'status': status,
'id': str(uuid.uuid4()),
'statement_raw': statement,
'statement': statement,
'type': editor_connector,
'wasBatchExecuted': batch_submit,
'lastExecuted': last_executed,
'properties': {
'files': [] if files is None else files,
'functions': [] if functions is None else functions,
'settings': [] if settings is None else settings
},
'name': name,
'database': database,
'namespace': namespace if namespace else {},
'compute': compute if compute else {},
'result': {'handle': {}},
'variables': []
}
] if not is_notebook else []
}
if has_connectors(): # To improve
data['dialect'] = interpreter['dialect']
data['type'] = 'phoenix-' + editor_connector # 'flink-' + editor_connector
if snippet_properties:
data['snippets'][0]['properties'].update(snippet_properties)
if result_properties:
data['snippets'][0]['result'].update(result_properties)
editor.data = json.dumps(data)
return editor
def _get_notebook_api(user, connector_id, interpreter=None):
'''
Helper utils until the API gets simplified.
'''
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Query",
"name": "Test Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"%(connector_id)s","status":"running",\
"statement":"select * from web_logs","properties":{"settings":[],"variables":[],"files":[],"functions":[]},\
"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table",\
"handle":{"log_context":null,"statements_count":1,\
"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,\
"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,\
"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},\
"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
""" % {
'connector_id': connector_id,
}
snippet = json.loads(notebook_json)['snippets'][0]
snippet['interpreter'] = interpreter
request = MockRequest(user)
return get_api(request, snippet)
def _update_property_value(properties, key, value):
"""
Update property dict in list of properties where prop has "key": key, set "value": value
"""
for prop in properties:
if prop['key'] == key:
prop.update({'value': value})
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
49962,
284,
1012,
280,
1082,
64,
11,
3457,
13,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198... | 2.695274 | 2,645 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-04-28 18:42
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
19,
319,
2864,
12,
3023,
12,
2078,
1248,
25,
3682,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
#
# MIT License
#
# (C) Copyright 2019, 2021-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Tests of the Mock System used for stand-alone development and unit
testing of the Compute Rolling Upgrade Agent.
"""
from crus.controllers.upgrade_agent.node_table import NodeTable
from crus.app import APP, HEADERS
from crus.controllers.mocking.shared import requests
BASE_URI = APP.config['NODE_GROUP_URI']
NODE_GROUPS_URI = BASE_URI
NODE_GROUP_URI = "%s/%%s" % BASE_URI
NODE_GROUP_MEMBERS_URI = "%s/%%s/members" % BASE_URI
NODE_GROUP_MEMBER_URI = "%s/%%s/members/%%s" % BASE_URI
HTTPS_VERIFY = APP.config['HTTPS_VERIFY']
def test_node_group():
"""Test creation of a node group with no members in it, then delete it.
"""
# Create the group
label = "test_group"
create_uri = NODE_GROUPS_URI
data = {
'label': label,
'description': "My test group",
'tags': ['a tag'],
'members': {'ids': []}
}
result = requests.post(create_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Add some XNAMEs to the group
some_xnames = NodeTable.get_all_xnames()[:50]
for xname in some_xnames:
member_data = {
'id': xname
}
add_member_uri = NODE_GROUP_MEMBERS_URI % label
result = requests.post(add_member_uri, json=member_data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify that the members got added...
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert 'ids' in result_data['members']
member_xnames = result_data['members']['ids']
for xname in member_xnames:
assert xname in some_xnames
for xname in some_xnames:
assert xname in member_xnames
# Delete all the members that we added...
for xname in some_xnames:
delete_member_uri = NODE_GROUP_MEMBER_URI % (label, xname)
result = requests.delete(delete_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Verify that the members got deleted (we should be back where we were
# right after creation)
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_node_group_no_input():
"""Test that creating a node group with no input data fails as
expected.
"""
# Create the group
result = requests.post(NODE_GROUPS_URI, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['server_error']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Internal Server Error"
assert 'detail' in result_data
assert result_data['detail'] == \
"error decoding JSON unexpected end of JSON input"
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_node_group_no_label():
"""Test that creating a node group with no label in the input data
fails as expected.
"""
# Create the group
data = {
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['bad']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Bad Request"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_duplicate_group():
"""Test that trying to create the same group twice in a row fails.
"""
# Create the group
label = "test_group"
data = {
'label': label,
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Now try to create it again
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['conflict']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Conflict"
assert 'detail' in result_data
assert result_data['detail'] == \
"operation would conflict with an existing group that "\
"has the same label."
assert 'status' in result_data
assert result_data['status'] == result.status_code
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
# pylint: disable=invalid-name
def test_fail_delete_group_unknown():
"""Test that trying to delete an unknown group fails as expected.
"""
delete_uri = NODE_GROUP_URI % "not_there"
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_member_no_data():
"""Test that trying to create a new member in a node group without
supplying any data fails as expected.
"""
add_member_uri = NODE_GROUP_MEMBERS_URI % "fake"
result = requests.post(add_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['server_error']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Internal Server Error"
assert 'detail' in result_data
assert result_data['detail'] == \
"error decoding JSON unexpected end of JSON input"
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_member_no_id():
"""Test that trying to create a new member in a node group without
supplying any data fails as expected.
"""
data = {
'filler': 1
}
add_member_uri = NODE_GROUP_MEMBERS_URI % "fake"
result = requests.post(add_member_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['bad']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Bad Request"
assert 'detail' in result_data
assert result_data['detail'] == \
"invalid xname ID"
assert 'status' in result_data
assert result_data['status'] == result.status_code
def test_fail_delete_unknown_member():
"""Test that trying to delete a node group member that does not exist
fails as expected.
"""
# Create the group
label = "test_group"
create_uri = NODE_GROUPS_URI
data = {
'label': label,
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(create_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Delete an invalid group member
delete_member_uri = NODE_GROUP_MEMBER_URI % (label, "not_there")
result = requests.delete(delete_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert result_data['detail'] == "group has no such member."
assert 'status' in result_data
assert result_data['status'] == result.status_code
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
| [
2,
198,
2,
17168,
13789,
198,
2,
198,
2,
357,
34,
8,
15069,
13130,
11,
33448,
12,
1238,
1828,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
1672... | 2.740514 | 4,744 |
from django.urls import path
from . import views
# from rest_framework import routers
# router = routers.DefaultRouter()
# router.register('getinfo', views.getinfo)
urlpatterns = [
path('kvupdate1', views.kvupdate1, name='kvupdate1')
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
2,
422,
1334,
62,
30604,
1330,
41144,
198,
198,
2,
20264,
796,
41144,
13,
19463,
49,
39605,
3419,
198,
2,
20264,
13,
30238,
10786,
1136,
10951,
3256,
5009,... | 2.987654 | 81 |
#Imports
from n2v.models import Config, CARE
import numpy as np
from n2v.utils import plot_some, plot_history
from n2v.utils.n2v_utils import manipulate_val_data
import urllib
import os
import zipfile
import json
from os.path import join
from skimage import io
with open('experiment.json', 'r') as f:
exp_params = json.load(f)
#Read training images and GT from StarVoid/dataset/...
test_files = np.load(exp_params["test_path"])
X = test_files['X_test']
train_files = np.load(exp_params["train_path"])
X_trn = train_files['X_train']
mean, std = np.mean(X_trn), np.std(X_trn)
X = normalize(X, mean, std)
model = CARE(None, name= exp_params['model_name'], basedir= exp_params['base_dir'])
# X = X[...,np.newaxis]
#predictions = []
# Denoise all images
for i in range(X.shape[0]):
pred = denormalize(model.predict(X[i][..., np.newaxis], axes='YXC',normalizer=None ), mean, std)
# predictions.append(pred)
io.imsave(join(exp_params['base_dir'], 'mask'+str(i).zfill(3)+'.tif'), pred)
#predictions = np.array(predictions)
| [
2,
3546,
3742,
198,
198,
6738,
299,
17,
85,
13,
27530,
1330,
17056,
11,
47342,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
17,
85,
13,
26791,
1330,
7110,
62,
11246,
11,
7110,
62,
23569,
198,
6738,
299,
17,
85,
13,
26791,
1... | 2.576733 | 404 |
# users/app/app.py
import os
from flask import Flask
from app.api.utils.extensions import (
db, bcrypt, jwt, cors
)
from app.api.utils.func import JSONEncoder
def create_app(app_info=None):
"""Create Flask application in factory pattern
"""
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS'))
cors.init_app(app, origins=app.config.get('CORS_ORIGINS'), supports_credentials=True)
db.init_app(app)
bcrypt.init_app(app)
jwt.init_app(app)
from .api.views import user_api as user_bp
app.register_blueprint(user_bp, url_prefix='/v1/')
app.json_encoder = JSONEncoder
@app.route('/')
return app
| [
2,
2985,
14,
1324,
14,
1324,
13,
9078,
198,
198,
11748,
28686,
198,
198,
6738,
42903,
1330,
46947,
198,
198,
6738,
598,
13,
15042,
13,
26791,
13,
2302,
5736,
1330,
357,
198,
220,
220,
220,
20613,
11,
275,
29609,
11,
474,
46569,
11,
... | 2.474453 | 274 |
from discord.ext import commands
import requests
import json
| [
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
7007,
198,
11748,
33918,
628
] | 4.769231 | 13 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pavlov import runs, stats
from rebar import dotdict
from . import json
import activelo
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
45549,
27086,
1330,
4539,
11,
9756,
198,
6738,
3405,
283,
1330,
16605,
11600,
198,
6738,
764,
... | 3.395833 | 48 |
'''
submodules to handle Catalogs used in the project
'''
import os
import numpy as np
import h5py
from astropy.io import fits
from astropy.table import Table as aTable
from astropy.cosmology import FlatLambdaCDM
# -- local --
from . import util as UT
class Catalog(object):
''' parent object for the objects in this module. Currently
has no functionality
'''
def _h5py_create_dataset(self, grp, key, data):
''' the arrays from the fits files do not play well with the new h5py
and python3
'''
if isinstance(data, np.chararray) or isinstance(data[0], np.str_):
_chararray = np.array(data, dtype=h5py.special_dtype(vlen=str))
grp.create_dataset(key.lower(), data=_chararray)
elif isinstance(data[0], np.bool_):
_bool = np.zeros(len(data)).astype(bool)
_bool[data] = True
grp.create_dataset(key.lower(), data=_bool)
else:
grp.create_dataset(key.lower(), data=data)
return None
class GAMA(Catalog):
''' class to build/read in photometric and spectroscopic overlap
of the GAMA DR2/DR3 data.
The GAMA DR2 data contains photometry and
spectroscopy from GAMA I, which covers three regions of 48 deg^2
area for a total of 144 deg^2.
The GAMA DR3 data contains photometry and spectroscopy from GAMA II,
which covers the 14x6.5 GAMA regions in NGP (G02 region is EXCLUDED).
'''
def Read(self, field, data_release=3, silent=True):
''' Read in spherematched photometric and spectroscopic
data from GAMA DR2 (constructed using _Build).
'''
_file = self._File(field, data_release=data_release)
if not os.path.isfile(_file): # if file is not constructed
if not silent: print('Building %s' % _file)
if field == 'all': self._Build(data_release=data_release, silent=silent)
else: self._fieldSplit(data_release=data_release, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(_file, 'r')
grp_p = f['photo'] # photo data
grp_s = f['spec'] # spec data
grp_k0 = f['kcorr_z0.0']
grp_k1 = f['kcorr_z0.1']
if not silent:
print('colums in GAMA photometry')
print(sorted(grp_p.keys()))
print('========================')
print('colums in GAMA spectroscopy')
print(sorted(grp_s.keys()))
print('========================')
print('colums in GAMA kcorrects')
print(sorted(grp_k0.keys()))
print('========================')
print('%i objects' % len(grp_p['ra'][...]))
print('========================')
data = {}
for dkey, grp in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'], [grp_p, grp_s, grp_k0, grp_k1]):
data[dkey] = {}
for key in grp.keys():
data[dkey][key] = grp[key][...]
return data
def _File(self, field, data_release=3):
''' hdf5 file name of spherematched photometric and spectroscopic
data from GAMA DR3.
notes
-----
* v2 flag was added when photometry catalog was changed from InputCatA.fits
to TilingCat.fits
'''
if field == 'all':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.v2.hdf5']) # output file
elif field == 'g09':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G09.v2.hdf5']) # output file
elif field == 'g12':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G12.v2.hdf5']) # output file
elif field == 'g15':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G15.v2.hdf5']) # output file
def _Build(self, data_release=3, silent=True):
''' Read in the photometric data and the spectroscopic data,
spherematch them and write the intersecting data to hdf5 file.
'''
if data_release == 3:
# this includes *three* of the four gama fields G02 field has its own data
# read in photometry (GAMA`s tiling catalog; http://www.gama-survey.org/dr3/schema/table.php?id=3)
gama_p = fits.open(UT.dat_dir()+'gama/dr3/TilingCat.fits')[1].data
# read in emission line measurements (http://www.gama-survey.org/dr3/schema/table.php?id=40)
gama_s = fits.open(UT.dat_dir()+'gama/dr3/GaussFitSimple.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z01.fits')
elif data_release == 2: # Data Release 2 (what I had before)
# read in photometry (GAMA`s master input catalogue; http://www.gama-survey.org/dr2/schema/table.php?id=156)
gama_p = fits.open(UT.dat_dir()+'gama/InputCatA.fits')[1].data
# read in spectroscopy (http://www.gama-survey.org/dr2/schema/table.php?id=197)
gama_s = fits.open(UT.dat_dir()+'gama/SpecLines.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z01.fits')
if not silent:
#print('colums in GAMA photometry')
#print(sorted(gama_p.__dict__.keys()))
print('%i GAMA photometry objects' % len(gama_p['ra']))
print('========================')
#print('colums in GAMA spectroscopy')
#print(sorted(gama_s.__dict__.keys()))
print('%i GAMA spectroscopy (emission line) objects' % len(gama_s['ra']))
print('========================')
#print('colums in GAMA k-correct')
#print(sorted(gama_k0.__dict__.keys()))
print('%i GAMA k-correct objects' % len(gama_k0['mass']))
print('========================')
# impose some common sense cuts to make sure there's SDSS photometry
# these magnitudes are extinction corrected!
has_sdss_photo = (
(gama_p['u_model'] > -9999.) &
(gama_p['g_model'] > -9999.) &
(gama_p['r_model'] > -9999.) &
(gama_p['i_model'] > -9999.) &
(gama_p['z_model'] > -9999.))
# impose science catalog cuts
# sc >= 4: r < 19.8, GAMA II main survey
# sc >= 5: r < 19.8 and satisfies r-band star-galaxy separation
# sc = 6: r < 19.4 and satisfies r-band star-galaxy separation
# r = r_petro
sciencecut = (gama_p['survey_class'] > 3)
# match cataid with spectroscopic data
has_spec = np.in1d(gama_p['cataid'], gama_s['cataid'])
# match cataid with k-correct data
assert np.array_equal(gama_k0['cataid'], gama_k1['cataid'])
has_kcorr = np.in1d(gama_p['cataid'], gama_k0['cataid'])
# combined sample cut
sample_cut = (has_spec & sciencecut & has_kcorr & has_sdss_photo)
if not silent:
print('of %i GAMA photometry objects' % len(gama_p['cataid']))
print('========================')
print('%i have SDSS photometry data' % np.sum(has_sdss_photo))
print('========================')
print('%i have spectroscopic data' % np.sum(has_spec))
print('========================')
print('%i have k-correct data' % np.sum(has_kcorr))
print('========================')
print('%i have all of the above' % np.sum(sample_cut))
print('========================')
# match up with spectroscopic data
s_match = np.searchsorted(gama_s['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_s['cataid'][s_match], gama_p['cataid'][sample_cut])
# match up with k-correct data
k_match = np.searchsorted(gama_k0['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_k0['cataid'][k_match], gama_p['cataid'][sample_cut])
# write everything into a hdf5 file
f = h5py.File(self._File('all', data_release=data_release), 'w')
# store photometry data in photometry group
grp_p = f.create_group('photo')
for key in gama_p.names:
self._h5py_create_dataset(grp_p, key, gama_p[key][sample_cut])
# store spectroscopic data in spectroscopic group
grp_s = f.create_group('spec')
for key in gama_s.names:
self._h5py_create_dataset(grp_s, key, gama_s[key][s_match])
# store kcorrect data in kcorrect groups
grp_k0 = f.create_group('kcorr_z0.0')
for key in gama_k0.names:
self._h5py_create_dataset(grp_k0, key, gama_k0[key][k_match])
grp_k1 = f.create_group('kcorr_z0.1')
for key in gama_k1.names:
self._h5py_create_dataset(grp_k1, key, gama_k1[key][k_match])
f.close()
return None
def _fieldSplit(self, data_release=3, silent=True):
''' Split the GAMA photo-spectroscopic data into the differnt
GAMA regions. Different regions have different r-mag limits and
etc so treating them separately is the most sensible!
'''
all_gama = self.Read('all', data_release=data_release, silent=True)
fields = ['g09', 'g12', 'g15']
ra_min = [129.0, 174.0, 211.5]
ra_max = [141.0, 186.0, 223.5]
for i_f, field in enumerate(fields):
in_ra = ((all_gama['photo']['ra'] >= ra_min[i_f]) & (all_gama['photo']['ra'] <= ra_max[i_f]))
if not silent: print('%i objects in %s field' % (np.sum(in_ra), field.upper()))
# write each field into hdf5 files
f = h5py.File(self._File(field, data_release=data_release), 'w')
for k_grp in all_gama.keys(): # photo, spec, kcorr_z0.0, kcorr_z0.1
grp = f.create_group(k_grp)
for key in all_gama[k_grp].keys():
grp.create_dataset(key, data=all_gama[k_grp][key][in_ra])
f.close()
return None
def _readKcorrect(self, fitsfile):
''' GAMA Kcorrect raises VerifyError if read in the usual fashion.
'''
f = fits.open(fitsfile)
f.verify('fix')
return f[1].data
class GamaLegacy(Catalog):
''' class to append imaging data from the Legacy survey DR7 for the objects
in the GAMA DR3 photo+spec data (.GAMA object). The objects in the final
catalog has GAMA photometry, GAMA spectroscopy, and Legacy-survey photometry
'''
def AbsMag(self, data, kcorr=0.1, H0=70, Om0=0.3, galext=False):
''' Calculate absolute magnitude in SDSS u, g, r, i, z bands with kcorrect
at z=`kcorr` given the data dictionary from the `GamaLegacy.Read` method.
H0 and Om0 specifies the cosmology for the distance modulus.
'''
# check data's structure
for k in ['gama-photo', 'gama-spec','gama-kcorr-z0.0', 'gama-kcorr-z0.1']:
if k not in data.keys():
raise ValueError('input data does not have the approprite keys')
# check kcorr
if kcorr not in [0.0, 0.1]: raise ValueError('kcorr = 0.0, 0.1 only')
bands_sdss = ['u','g','r','i','z']
# apparent magnitude from GAMA photometry
if not galext:
mag_ugriz = np.array([data['gama-photo'][b+'_model'] for b in bands_sdss])
else:
mag_ugriz =np.array([data['gama-kcorr-z0.1'][b+'_model'] for b in bands_sdss])
redshift = data['gama-spec']['z'] # redshift
# distance modulus
cosmo = FlatLambdaCDM(H0=H0, Om0=Om0)
D_L = cosmo.luminosity_distance(redshift).value # Mpc
DM = 5. * np.log10(1e5*D_L)
# k-correct
if kcorr == 0.0:
kcorr = np.array([data['gama-kcorr-z0.0']['kcorr_'+b] for b in bands_sdss])
elif kcorr == 0.1:
kcorr = np.array([data['gama-kcorr-z0.1']['kcorr_'+b] for b in bands_sdss])
absmag_ugriz = mag_ugriz - DM - kcorr
return absmag_ugriz
def Read(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Read in objects from legacy survey DR 5 that overlap with the
GAMA photo+spectra objects
'''
fgleg = self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy)
if not os.path.isfile(fgleg): # if file is not constructed
if not silent: print('Building %s' % fgleg)
self._Build(field, dr_gama=dr_gama, dr_legacy=dr_legacy, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(fgleg, 'r')
grp_gp = f['gama-photo']
grp_gs = f['gama-spec']
grp_k0 = f['gama-kcorr-z0.0']
grp_k1 = f['gama-kcorr-z0.1']
grp_lp = f['legacy-photo']
if not silent:
print('colums in GAMA Photo Data:')
print(sorted(grp_gp.keys()))
print('colums in GAMA Spec Data:')
print(sorted(grp_gs.keys()))
print('colums in Legacy Data:')
print(sorted(grp_lp.keys()))
print('========================')
print('%i objects' % len(grp_gp['ra'][...]))
data = {}
for dk, grp in zip(['gama-photo', 'gama-spec', 'gama-kcorr-z0.0', 'gama-kcorr-z0.1', 'legacy-photo'],
[grp_gp, grp_gs, grp_k0, grp_k1, grp_lp]):
data[dk] = {}
for key in grp.keys():
data[dk][key] = grp[key][...]
self.catalog = data.copy()
return data
def select(self, index=None):
''' select objects in the catalog by their index
'''
if index is not None:
if isinstance(index, list):
index = np.array(index)
elif isinstance(index, np.ndarray):
pass
else:
raise ValueError("index can only be a list of array")
select_data = {}
for grp in self.catalog.keys():
select_data[grp] = {}
for key in self.catalog[grp].keys():
select_data[grp][key] = self.catalog[grp][key][index]
return select_data
def write(self, catalog, fname):
''' Given dictionary with same structure as self.catalog
write to hdf5 file
'''
f = h5py.File(fname, 'w')
for g in catalog.keys():
grp = f.create_group(g)
for k in catalog[g].keys():
grp.create_dataset(k, data=catalog[g][k])
f.close()
return None
def _Build(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Get Legacy Survey photometry for objects in the GAMA DR`dr_gama`
photo+spec objects from the sweep files. This is meant to run on nersc
but you can also manually download the sweep files and specify the dir
where the sweep files are located in.
'''
from pydl.pydlutils.spheregroup import spherematch
if dr_legacy == 5:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/'
elif dr_legacy == 7:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
tractor_s_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
elif dr_legacy == 8:
sweep_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/sweep/8.0/'
sweep_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/south/sweep/8.0/'
tractor_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/tractor/'
tractor_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr7/south/tractor/'
# read in the names of the sweep files
fsweep = ''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat'])
if not os.path.isfile(fsweep): _ = self._getSweeps(field, silent=silent)
sweep_files = np.loadtxt(fsweep, unpack=True, usecols=[0], dtype='S')
if not silent: print("there are %i sweep files in the %s GAMA region" % (len(sweep_files), field))
# read in GAMA objects
gama = GAMA()
gama_data = gama.Read(field, data_release=dr_gama, silent=silent)
sweep_dict = {}
gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict = {}, {}, {}, {}
# loop through the files and only keep ones that spherematch with GAMA objects
for i_f, f in enumerate(sweep_files):
# read in sweep object
for sweep_dir in [sweep_n_dir, sweep_s_dir]:
fsweep = os.path.join(sweep_dir, f.decode('unicode_escape'))
if os.path.isfile(fsweep): break
sweep = fits.open(fsweep)[1].data
if not silent: print('matching %s' % fsweep)
# spherematch the sweep objects with GAMA objects
if len(sweep['ra']) > len(gama_data['photo']['ra']):
match = spherematch(sweep['ra'], sweep['dec'],
gama_data['photo']['ra'], gama_data['photo']['dec'], 0.000277778)
else:
match_inv = spherematch(gama_data['photo']['ra'], gama_data['photo']['dec'],
sweep['ra'], sweep['dec'], 0.000277778)
match = [match_inv[1], match_inv[0], match_inv[2]]
if not silent:
print('%i matches from the %s sweep file' % (len(match[0]), f))
# save sweep photometry to `sweep_dict`
for key in sweep.names:
if i_f == 0:
sweep_dict[key.lower()] = sweep[key][match[0]]
else:
sweep_dict[key.lower()] = np.concatenate([sweep_dict[key.lower()], sweep[key][match[0]]])
# save matching GAMA data ('photo', 'spec', and kcorrects)
for gkey, gdict in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'],
[gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict]):
for key in gama_data[gkey].keys():
if i_f == 0:
gdict[key] = gama_data[gkey][key][match[1]]
else:
gdict[key] = np.concatenate([gdict[key], gama_data[gkey][key][match[1]]])
del sweep # free memory? (apparently not really)
if not silent:
print('========================')
print('%i objects out of %i GAMA objects mached' % (len(sweep_dict['ra']), len(gama_data['photo']['dec'])) )
assert len(sweep_dict['ra']) == len(gama_photo_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_spec_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_kcorr0_dict['mass'])
assert len(sweep_dict['ra']) == len(gama_kcorr1_dict['mass'])
# writeout all the GAMA objects without sweep objects
if not silent:
nosweep = ~np.in1d(gama_data['photo']['objid'], gama_photo_dict['objid'])
f_nosweep = ''.join([UT.dat_dir(),
'GAMAdr', str(dr_gama), '.', field, '.LEGACYdr', str(dr_legacy), '.nosweep_match.fits'])
print('========================')
print('Writing out RA, Dec of %i GAMA objects without Legacy sweep objects to %s' %
(np.sum(nosweep), f_nosweep))
tb = aTable([gama_data['photo']['ra'][nosweep], gama_data['photo']['dec'][nosweep]],
names=('ra', 'dec'))
tb.meta['COMMENTS'] = 'RA, Dec of GAMA objects without matches in Legacy DR5 sweep'
tb.write(f_nosweep, format='fits', overwrite=True)
#np.savetxt(f_nosweep, np.array([gama_data['photo']['ra'], gama_data['photo']['dec']]).T, header='RA, Dec')
# read apfluxes from tractor catalogs
try:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_n_dir)
except ValueError:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_s_dir)
assert apflux_dict['apflux_g'].shape[0] == len(sweep_dict['brickname'])
# save data to hdf5 file
if not silent: print('writing to %s' % self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy))
f = h5py.File(self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy), 'w')
grp_gp = f.create_group('gama-photo')
grp_gs = f.create_group('gama-spec')
grp_k0 = f.create_group('gama-kcorr-z0.0')
grp_k1 = f.create_group('gama-kcorr-z0.1')
grp_lp = f.create_group('legacy-photo')
for key in sweep_dict.keys():
self._h5py_create_dataset(grp_lp, key, sweep_dict[key])
for key in apflux_dict.keys(): # additional apflux data.
self._h5py_create_dataset(grp_lp, key, apflux_dict[key])
for key in gama_photo_dict.keys():
grp_gp.create_dataset(key, data=gama_photo_dict[key])
for key in gama_spec_dict.keys():
grp_gs.create_dataset(key, data=gama_spec_dict[key])
for key in gama_kcorr0_dict.keys():
grp_k0.create_dataset(key, data=gama_kcorr0_dict[key])
for key in gama_kcorr1_dict.keys():
grp_k1.create_dataset(key, data=gama_kcorr1_dict[key])
f.close()
return None
def _getSweeps(self, field, silent=True):
''' Construct list of sweep files given GAMA object.
'''
# read in GAMA objects in field
gama = GAMA()
if field == 'all': raise ValueError("only select specific GAMA fields; not the entire data release")
gama_data = gama.Read(field, silent=silent)
# get brickmin and brickmax of sweep files
ra_mins = 10.*np.arange(gama_data['photo']['ra'].min() // 10., (gama_data['photo']['ra'].max() // 10.) + 1)
ra_maxs = ra_mins + 10.
dec_mins = 5.*np.arange(gama_data['photo']['dec'].min() // 5., (gama_data['photo']['dec'].max() // 5.) + 1)
dec_maxs = dec_mins + 5.
legacy_gama_sweep = []
for i in range(len(ra_mins)):
for j in range(len(dec_mins)):
if dec_mins[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmin = ''.join([str(int(ra_mins[i])).zfill(3), pm_sign,
str(int(np.abs(dec_mins[j]))).zfill(3)])
if dec_maxs[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmax = ''.join([str(int(ra_maxs[i])).zfill(3), pm_sign,
str(int(np.abs(dec_maxs[j]))).zfill(3)])
f_sweep = ''.join(['sweep-', brickmin, '-', brickmax, '.fits'])
legacy_gama_sweep.append(f_sweep)
if not silent: print('... %s' % f_sweep)
np.savetxt(''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat']),
legacy_gama_sweep, fmt='%s')
return ra_mins, dec_mins
def _getTractorApflux(self, brickname, objids,
tractor_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/', silent=True):
''' The catalog is constructed from the sweep catalog and the
GAMA DR3 photo+spec data. The sweep catalog does not include
all the photometric data from the legacy survey. This methods
appends 'apflux_g', 'apflux_r', 'apflux_z' and relevant columsn
from the tractor files.
This can (and probably should) be extended to other columns
'''
bricks_uniq = np.unique(brickname) # unique bricks
AAAs = np.array([brick[:3] for brick in bricks_uniq])
# apfluxes in 'g', 'r', and 'z' bands
bands = ['g', 'r', 'z']
apfluxes = np.zeros((3, len(brickname), 8))
apflux_ivars = np.zeros((3, len(brickname), 8))
apflux_resids = np.zeros((3, len(brickname), 8))
n_brick = 0
for ii, AAA, brick in zip(range(len(AAAs)), AAAs, bricks_uniq):
name = ''.join([tractor_dir, AAA, '/tractor-', brick, '.fits'])
if not silent: print('%i of %i unique bricks -- %s' % (ii, len(AAAs), brick))
if not os.path.isfile(name): raise ValueError('%s tractor file not available' % name)
f_tractor = fits.open(name)
tractor = f_tractor[1].data
inbrick = (brickname == brick)
for i_k, key in enumerate(bands):
apfluxes[i_k, inbrick, :] = tractor.field('apflux_'+key)[objids[inbrick]]
apflux_ivars[i_k, inbrick, :] = tractor.field('apflux_ivar_'+key)[objids[inbrick]]
apflux_resids[i_k, inbrick, :] = tractor.field('apflux_resid_'+key)[objids[inbrick]]
n_brick += np.sum(inbrick)
assert n_brick == len(brickname)
# return dictionary with appropriate keys
apflux_dict = {}
for i_k, key in enumerate(bands):
apflux_dict['apflux_'+key] = apfluxes[i_k,:,:]
apflux_dict['apflux_ivar_'+key] = apflux_ivars[i_k,:,:]
apflux_dict['apflux_resid_'+key] = apflux_resids[i_k,:,:]
return apflux_dict
class Legacy(Catalog):
'''
'''
def _1400deg2_test(self, dr=8, rlimit=None):
'''
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = self._1400deg2_area()
# read legacy sweeps data in 1400 deg^2 region
if rlimit is None:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
fsweep = h5py.File(_fsweep, 'r')
sweep = {}
for k in fsweep.keys(): sweep[k] = fsweep[k][...]
print('%i sweep objects' % len(sweep['flux_r']))
# spatial masking
_spatial_mask = self.spatial_mask(sweep['maskbits'], [sweep['nobs_g'], sweep['nobs_r'], sweep['nobs_z']])
print('%i spatial mask' % np.sum(_spatial_mask))
# star-galaxy separation
_star_galaxy = self.star_galaxy(sweep['gaia_phot_g_mean_mag'], sweep['flux_r'])
print('%i star-galaxy separation' % np.sum(_star_galaxy))
# quality cut
gmag = self.flux_to_mag(sweep['flux_g']/sweep['mw_transmission_g'])
rmag = self.flux_to_mag(sweep['flux_r']/sweep['mw_transmission_r'])
zmag = self.flux_to_mag(sweep['flux_z']/sweep['mw_transmission_z'])
_quality_cut = self.quality_cut(
np.array([sweep['fracflux_g'], sweep['fracflux_r'], sweep['fracflux_z']]),
np.array([sweep['fracmasked_g'], sweep['fracmasked_r'], sweep['fracmasked_z']]),
np.array([sweep['fracin_g'], sweep['fracin_r'], sweep['fracin_z']]),
gmag - rmag,
rmag - zmag)
print('%i quality cut' % np.sum(_quality_cut))
sample_select = (_spatial_mask & _star_galaxy & _quality_cut)
print('%i (spatial mask) & (star-galaxy sep.) & (quality cut)' % (np.sum(sample_select)))
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'bgs.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweep.keys():
self._h5py_create_dataset(f, k, sweep[k][sample_select])
f.close()
return None
return None
def _1400deg2_area(self):
''' area of 1400 deg^2 test region
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = (np.radians(ra_max) - np.radians(ra_min))*(np.sin(np.radians(dec_max)) - np.sin(np.radians(dec_min)))
area *= (180/np.pi)**2
print('%.f deg^2 test region' % area)
return area
def quality_cut(self, frac_flux, fracmasked, fracin, g_r, r_z):
''' apply baseline quality cut
* frac_flux_[g,r,z]<5 Not overwhelmed by neighbouring source (any band)
* fracmasked_[g,r,z]<0.4 Model not dominated by masked pixels in any band
* fracin_[g,r,z]>0.3 Most of the model flux not outside the region of the data used to fit the model
* -1< g-r < 4 Not an absolutely bizarre colour
* -1< r-z < 4 Not an absolutely bizarre colour
'''
assert frac_flux.shape[0] == 3
assert fracmasked.shape[0] == 3
assert fracin.shape[0] == 3
# Not overwhelmed by neighbouring source (any band)
_frac_flux = ((frac_flux[0] < 5.) & (frac_flux[1] < 5.) & (frac_flux[2] < 5.))
# Model not dominated by masked pixels in any band
_fracmasked = ((fracmasked[0] < 0.4) & (fracmasked[1] < 0.4) & (fracmasked[2] < 0.4))
# Most of the model flux not outside the region of the data used to fit the model
_fracin = ((fracin[0] > 0.3) & (fracin[1] > 0.3) & (fracin[2] > 0.3))
# color cut
_colorcut = ((g_r > -1.) & (g_r < 4.) & (r_z > -1.) & (r_z < 4.))
cut = (_frac_flux & _fracmasked & _fracin & _colorcut)
return cut
def star_galaxy(self, gaia_G, r_flux):
''' star-galaxy separation using GAIA and tractor photometry
(gaia G mag) - (raw r mag) > 0.6 or (gaia G mag) == 0
'''
G_rr = gaia_G - self.flux_to_mag(r_flux)
isgalaxy = (G_rr > 0.6) | (gaia_G == 0)
return isgalaxy
def spatial_mask(self, maskbits, nobs):
''' spatial masking around
* bright stars
* medium bright stars
* clusters
* large galaxies
'''
nobs_g, nobs_r, nobs_z = nobs
BS = (np.uint64(maskbits) & np.uint64(2**1))!=0 # bright stars
MS = (np.uint64(maskbits) & np.uint64(2**11))!=0 # medium bright stars
GC = (np.uint64(maskbits) & np.uint64(2**13))!=0 # clusters
LG = (np.uint64(maskbits) & np.uint64(2**12))!=0 # large galaxies
allmask = ((maskbits & 2**6) != 0) | ((maskbits & 2**5) != 0) | ((maskbits & 2**7) != 0)
nobs = ((nobs_g < 1) | (nobs_r < 1) | (nobs_z < 1))
mask = ~(BS | MS | GC | LG | allmask | nobs)
return mask
def _collect_1400deg2_test(self, dr=8, rlimit=None):
''' collect sweeps data within the same 1400 deg2 test region that Omar used for dr7
and save to file.
'''
import glob
if dr != 8: raise NotImplementedError
if os.environ['NERSC_HOST'] != 'cori': raise ValueError('this script is meant to run on cori only')
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
dir_legacy = '/project/projectdirs/cosmo/data/legacysurvey/'
dir_north = os.path.join(dir_legacy, 'dr8/north/sweep/8.0')
dir_south = os.path.join(dir_legacy, 'dr8/south/sweep/8.0')
fsweeps_N = glob.glob('%s/*.fits' % dir_north)
print('%i North sweep files' % len(fsweeps_N))
fsweeps_S = glob.glob('%s/*.fits' % dir_south)
print('%i South sweep files' % len(fsweeps_S))
fsweeps = sorted([os.path.join(dir_north, _fs) for _fs in fsweeps_N] + [os.path.join(dir_south, _fs) for _fs in fsweeps_S])
sweeps = {}
for _fsweep in fsweeps:
# get sweep RA and Dec range
sweep_ra_min, sweep_ra_max, sweep_dec_min, sweep_dec_max = self._parse_brickname(_fsweep)
# check whether it's in the region or not
not_in_region = (
(sweep_ra_max < ra_min) |
(sweep_ra_min > ra_max) |
(sweep_dec_max < dec_min) |
(sweep_dec_min > dec_max)
)
if not_in_region: continue
# read sweep file
sweep = fits.open(_fsweep)[1].data
# area that's within the test region
mask_region = (
(sweep['RA'] >= ra_min) &
(sweep['RA'] <= ra_max) &
(sweep['DEC'] >= dec_min) &
(sweep['DEC'] <= dec_max))
if np.sum(mask_region) == 0: continue
if rlimit is None:
rcut = np.ones(sweep['RA']).astype(bool)
else:
rflux = sweep['FLUX_R'] / sweep['MW_TRANSMISSION_R']
rcut = (rflux > 10**((22.5-rlimit)/2.5))
print('%i obj in %s' % (np.sum(mask_region), os.path.basename(_fsweep)))
if len(sweeps.keys()) == 0:
for k in sweep.names:
sweeps[k] = sweep[k][mask_region & rcut]
else:
for k in sweep.names:
sweeps[k] = np.concatenate([sweeps[k], sweep[k][mask_region & rcut]], axis=0)
if rlimit is None:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweeps.keys():
self._h5py_create_dataset(f, k, sweeps[k])
f.close()
return None
def _parse_brickname(self, brickname):
''' parse ra and dec range from brick name
'''
name = os.path.basename(brickname).replace('.fits', '') # get rid of directory and ext
radec1 = name.split('-')[1]
radec2 = name.split('-')[2]
if 'p' in radec1: _c = 'p'
elif 'm' in radec1: _c = 'm'
ra_min = float(radec1.split(_c)[0])
dec_min = float(radec1.split(_c)[1])
if 'p' in radec2: _c = 'p'
elif 'm' in radec2: _c = 'm'
ra_max = float(radec2.split(_c)[0])
dec_max = float(radec2.split(_c)[1])
return ra_min, ra_max, dec_min, dec_max
def _Tycho(self, ra_lim=None, dec_lim=None):
''' read in tycho2 catalog within RA and Dec range
'''
_tycho = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'tycho2.fits'))[1].data
mask_region = np.ones(len(_tycho['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_tycho['RA'] >= ra_lim[0]) & (_tycho['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_tycho['DEC'] >= dec_lim[0]) & (_tycho['DEC'] <= dec_lim[1])
tycho = {}
for k in _tycho.names:
tycho[k] = _tycho[k][mask_region]
return tycho
def _LSLGA(self, ra_lim=None, dec_lim=None):
''' read in Legacy Survey Large Galaxy Atlas
'''
_lslga = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'LSLGA-v2.0.fits'))[1].data
mask_region = np.ones(len(_lslga['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_lslga['RA'] >= ra_lim[0]) & (_lslga['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_lslga['DEC'] >= dec_lim[0]) & (_lslga['DEC'] <= dec_lim[1])
lslga = {}
for k in _lslga.names:
lslga[k] = _lslga[k][mask_region]
return lslga
def _GamaLegacy_TractorAPFLUX():
''' Retroactively add apflux columns from the tractor catalogs
to the GamaLegacy catalog constructed and saved to file. This is a
hack.
'''
gleg = GamaLegacy()
# open saved gama-legacy catalog for appending
f_gleg = h5py.File(gleg._File(), 'r+')
# legacy photometry group
grp_lp = f_gleg['legacy-photo']
if 'apflux_g' in grp_lp.keys():
# check that the columsn dont' already exist
f_gleg.close()
raise ValueError('apfluxes already in the catalog')
# read apfluxes from tractor catalogs
apflux_dict = gleg._getTractorApflux(grp_lp['brickname'].value, grp_lp['objid'].value,
dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/')
assert apflux_dict['apflux_g'].shape[0] == len(grp_lp['brickname'].value)
# save fluxes to the dataset
for key in apflux_dict.keys():
grp_lp.create_dataset(key, data=apflux_dict[key])
f_gleg.close()
return None
| [
7061,
6,
198,
198,
7266,
18170,
284,
5412,
44515,
82,
973,
287,
262,
1628,
628,
198,
7061,
6,
198,
11748,
28686,
220,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
289,
20,
9078,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
220,
... | 1.978609 | 19,073 |
import rx
import rx.operators as ops
from rx.subject import Subject
obs1 = Subject()
obs2 = Subject()
obs3 = Subject()
higher_order = Subject()
higher_order.pipe(ops.switch_latest()).subscribe(
on_next=lambda i: print("on_next {}".format(i)),
on_error=lambda e: print("on_error: {}".format(e)),
on_completed=lambda: print("on_completed")
)
higher_order.on_next(obs1)
obs1.on_next("1: 1")
obs1.on_next("1: 2")
higher_order.on_next(obs2)
obs1.on_next("1: 3")
obs2.on_next("2: 1")
obs2.on_next("2: 2")
higher_order.on_next(obs3)
obs2.on_next("2: 3")
obs3.on_next("3: 1")
obs3.on_next("3: 2")
| [
11748,
374,
87,
198,
11748,
374,
87,
13,
3575,
2024,
355,
39628,
198,
6738,
374,
87,
13,
32796,
1330,
15540,
198,
198,
8158,
16,
796,
15540,
3419,
198,
8158,
17,
796,
15540,
3419,
198,
8158,
18,
796,
15540,
3419,
198,
46503,
62,
287... | 2.296578 | 263 |
from util import isint
data_base = Relocatable(Relocatable.data, 0)
main_base = Relocatable(Relocatable.main, 0)
wk_base = Relocatable(Relocatable.wk, 0)
| [
6738,
7736,
1330,
318,
600,
628,
198,
7890,
62,
8692,
796,
4718,
420,
21156,
7,
6892,
420,
21156,
13,
7890,
11,
657,
8,
198,
12417,
62,
8692,
796,
4718,
420,
21156,
7,
6892,
420,
21156,
13,
12417,
11,
657,
8,
198,
43021,
62,
8692,... | 2.689655 | 58 |
"""
Module defining a Falcon resource to provide login session info
Copyright (C) 2016 ERT Inc.
"""
import falcon
import api.json as json
from api.auth import auth
route = "user"
class User():
"""
Falcon resource object providing API login session info
"""
def on_get(self, request, resp):
"""
return JSON object, representing the current session's user info
"""
user_id = auth.get_user_id(request)
# return JSON user representation
user = get_user(user_id)
json_user = json.dumps(user)
resp.body = json_user
def get_user(user_id=None):
"""
Return object representing the logged in user
Keyword Parameters:
user_id -- String, identifier representing the logged in user
(Default: None, representing an public/anonymous user session)
>>> # Check public/Anonymous user
>>> from pprint import pprint
>>> anonymous_user = get_user()
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> anonymous_user = get_user(None) #public/Anonymous user
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> # Check logged in user
>>> user = get_user('uid=bob.newhart,ou=People,o=bobnewhart.com')
>>> pprint(user)
{'user': {'description': 'Authenticated user.',
'id': 'uid=bob.newhart,ou=People,o=bobnewhart.com'}}
"""
description = "Authenticated user."
if user_id is None:
description = "Anonymous user."
attributes = {'id': user_id, 'description': description}
user_object = {'user': attributes}
return user_object
| [
37811,
198,
26796,
16215,
257,
17621,
8271,
284,
2148,
17594,
6246,
7508,
198,
198,
15269,
357,
34,
8,
1584,
13793,
51,
3457,
13,
198,
37811,
198,
11748,
24215,
1102,
198,
198,
11748,
40391,
13,
17752,
355,
33918,
198,
6738,
40391,
13,
... | 2.748768 | 609 |
"""Talk model."""
from typing import Any
from django.db import models
from django.utils.translation import gettext_lazy as _
from domain import utils
class Talk(models.Model):
"""Talk submission."""
name = models.CharField(_("name"), max_length=255, unique=True)
slug = models.SlugField(unique=True, blank=True)
abstract = models.TextField(blank=True)
description = models.TextField(blank=True)
notes = models.TextField(blank=True)
def __str__(self) -> str:
"""Return name as a string."""
return self.name
def save(self, *args: Any, **kwargs: Any) -> None:
"""Save a slug on save."""
if not self.slug:
self.slug = utils.generate_unique_slug(Talk, self.name)
super().save(*args, **kwargs)
| [
37811,
25685,
2746,
526,
15931,
198,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
198,
198,
6738,
7386,
1330,... | 2.608696 | 299 |
# -*- coding: utf-8 -*-#
'''
# Name: HowNNWorks
# Description:
# Author: super
# Date: 2020/5/24
'''
import numpy as np
import matplotlib.pyplot as plt
from HelperClass2.NeuralNet_2_0 import *
train_data_name = "../data/ch08.train.npz"
test_data_name = "../data/ch08.test.npz"
if __name__ == '__main__':
dataReader = DataReader_2_0(train_data_name, test_data_name)
dataReader.ReadData()
dataReader.GenerateValidationSet()
n_input, n_hidden, n_output = 1, 2, 1
eta, batch_size, max_epoch = 0.05, 10, 5000
eps = 0.001
hp = HyperParameters_2_0(n_input, n_hidden, n_output, eta, max_epoch, batch_size, eps, NetType.Fitting,
InitialMethod.Xavier)
net = NeuralNet_2_0(hp, "sin_121")
net.LoadResult()
print(net.wb1.W)
print(net.wb1.B)
print(net.wb2.W)
print(net.wb2.B)
# net.train(dataReader, 50, True)
# net.ShowTrainingHistory_2_0()
# ShowResult(net, dataReader, hp.toString())
ShowResult2D(net, hp.toString()) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
2,
198,
7061,
6,
198,
2,
6530,
25,
220,
220,
220,
220,
220,
220,
220,
220,
1374,
6144,
23044,
198,
2,
12489,
25,
220,
220,
198,
2,
6434,
25,
220,
220,
220,
220,
220,
... | 2.131687 | 486 |
import os
import numpy as np
import torch
import pickle
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
2298,
293,
198
] | 3.5 | 16 |
from dataTable import * | [
6738,
1366,
10962,
1330,
1635
] | 4.6 | 5 |
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.applications.imagenet_utils import decode_predictions
import numpy as np
from pyspark.ml import Transformer
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.sql.functions import udf
from pyspark.sql.types import (ArrayType, FloatType, StringType, StructField, StructType)
import sparkdl.graph.utils as tfx
from sparkdl.image.imageIO import createResizeImageUDF
import sparkdl.transformers.keras_applications as keras_apps
from sparkdl.param import (
keyword_only, HasInputCol, HasOutputCol, SparkDLTypeConverters)
from sparkdl.transformers.tf_image import TFImageTransformer
SUPPORTED_MODELS = ["InceptionV3", "Xception", "ResNet50", "VGG16", "VGG19"]
class DeepImagePredictor(Transformer, HasInputCol, HasOutputCol):
"""
Applies the model specified by its popular name to the image column in DataFrame.
The input image column should be 3-channel SpImage.
The output is a MLlib Vector.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
decodePredictions = Param(Params._dummy(), "decodePredictions",
"If true, output predictions in the (class, description, probability) format",
typeConverter=TypeConverters.toBoolean)
topK = Param(Params._dummy(), "topK", "How many classes to return if decodePredictions is True",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5)
"""
super(DeepImagePredictor, self).__init__()
self._setDefault(decodePredictions=False)
self._setDefault(topK=5)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
# TODO: give an option to take off multiple layers so it can be used in tuning
# (could be the name of the layer or int for how many to take off).
class DeepImageFeaturizer(Transformer, HasInputCol, HasOutputCol):
"""
Applies the model specified by its popular name, with its prediction layer(s) chopped off,
to the image column in DataFrame. The output is a MLlib Vector so that DeepImageFeaturizer
can be used in a MLlib Pipeline.
The input image column should be 3-channel SpImage.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None)
"""
super(DeepImageFeaturizer, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
class _NamedImageTransformer(Transformer, HasInputCol, HasOutputCol):
"""
For internal use only. NamedImagePredictor and NamedImageFeaturizer are the recommended classes
to use.
Applies the model specified by its popular name to the image column in DataFrame. There are
two output modes: predictions or the featurization from the model. In either case the output
is a MLlib Vector.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
featurize = Param(Params._dummy(), "featurize",
"If true, output features. If false, output predictions. Either way the output is a vector.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False)
"""
super(_NamedImageTransformer, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._inputTensorName = None
self._outputTensorName = None
self._outputMode = None
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
def _buildTFGraphForName(name, featurize):
"""
Currently only supports pre-trained models from the Keras applications module.
"""
modelData = keras_apps.getKerasApplicationModel(name).getModelData(featurize)
sess = modelData["session"]
outputTensorName = modelData["outputTensorName"]
graph = tfx.strip_and_freeze_until([outputTensorName], sess.graph, sess, return_graph=True)
modelData["graph"] = graph
return modelData
| [
2,
15069,
2177,
16092,
397,
23706,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
19... | 2.627965 | 2,403 |
#!/usr/bin/python3 -tt
from xclib.configuration import get_args
from xclib.authops import perform
DEFAULT_LOG_DIR = '/var/log/xcauth'
DESC = '''XMPP server authentication against JSXC>=3.2.0 on Nextcloud.
See https://jsxc.org or https://github.com/jsxc/xmpp-cloud-auth.'''
EPILOG = '''-I, -R, and -A take precedence over -t. One of them is required.
-I, -R, and -A imply -i and -d.'''
if __name__ == '__main__':
args = get_args(DEFAULT_LOG_DIR, DESC, EPILOG, 'xcauth')
perform(args)
# vim: tabstop=8 softtabstop=0 expandtab shiftwidth=4
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
532,
926,
198,
198,
6738,
2124,
565,
571,
13,
11250,
3924,
1330,
651,
62,
22046,
198,
6738,
2124,
565,
571,
13,
18439,
2840,
1330,
1620,
198,
198,
7206,
38865,
62,
25294,
62,
34720,
796,
310... | 2.464602 | 226 |
from flask import Flask
import os
#from model import LSCCNN
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config.from_object(__name__)
# Load model
#checkpoint_path = path_to_gcp_bucket
#model = LSCCNN(checkpoint_path=checkpoint_path)
#model.eval()
#model.cuda() ??
from app import views
| [
6738,
42903,
1330,
46947,
198,
11748,
28686,
198,
198,
2,
6738,
2746,
1330,
30948,
4093,
6144,
198,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
198,
1324... | 2.650407 | 123 |
# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'simple_key':'hello'}
answer1 = d['simple_key']
print(answer1)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':{'k2':'hello'}}
answer2 = d['k1']['k2']
print(answer2)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':[{'nest_key':['this is deep',['hello']]}]}
answer3 = d['k1'][0]['nest_key'][1][0]
print(answer3)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':[1,2,{'k2':['this is tricky',{'tough':[1,2,['hello']]}]}]}
answer4 = d['k1'][2]['k2'][1]['tough'][2][0]
print(answer4) | [
2,
4222,
3465,
326,
611,
345,
8820,
434,
290,
1803,
3294,
1661,
11,
262,
1430,
481,
1394,
598,
1571,
284,
262,
2393,
13,
198,
198,
67,
796,
1391,
6,
36439,
62,
2539,
10354,
6,
31373,
6,
92,
198,
198,
41484,
16,
796,
288,
17816,
... | 2.706081 | 296 |
import numpy as np
import numpy.random as npr
import numpy.testing as npt
import pytest
from impute import FpcImpute
@pytest.mark.usefixtures('rae_case')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
25120,
355,
299,
1050,
198,
11748,
299,
32152,
13,
33407,
355,
299,
457,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
848,
1133,
1330,
376,
14751,
26950,
1133,
628,
198,
31,
... | 2.821429 | 56 |
import sys
sys.path.append('./test/tests')
from driver import driver, Keys
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
#import test_example
#import test_printtaskbook
#import test_class
#import test_duedates
import test_deletion
driver.close()
| [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
9288,
14,
41989,
11537,
198,
198,
6738,
4639,
1330,
4639,
11,
26363,
198,
26230,
13,
1136,
7203,
4023,
1378,
2503,
13,
29412,
13,
2398,
4943,
198,
30493,
366,
37906,
1,
287,
... | 2.911565 | 147 |
# -*- coding: utf-8 -*-
from model.contact import Contact | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
2746,
13,
32057,
1330,
14039
] | 2.85 | 20 |
# This is an python GUI program which is used to conver the current vlue for different conutries.
# importing the modules
from tkinter import *
# creating a class called CurrencyConverter
# definfing a init function for the class TipCal
# function to convert values
# function to clear values
CurrencyConverter()
| [
2,
770,
318,
281,
21015,
25757,
1430,
543,
318,
973,
284,
6718,
262,
1459,
410,
75,
518,
329,
1180,
369,
315,
1678,
13,
201,
198,
201,
198,
2,
33332,
262,
13103,
201,
198,
6738,
256,
74,
3849,
1330,
1635,
201,
198,
201,
198,
2,
... | 2.896825 | 126 |