content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 14:07:39 2016
@author: pablo
"""
""" CREATING PYTHON plasma_plume PACKAGE """
""" IMPORTING GENERAL USAGE MODULES """
from scipy.interpolate import interp1d,interp2d,griddata #1D, and 2D interpolation libraries
from scipy.integrate import odeint # Ordinary Diffferential Equation (ODE) Solver
import numpy as np #Scientific and numerical general module
import math #Simbolic math library
import matplotlib.pyplot as plt #General Plotter library
import unittest #Testing library
""" IMPORT PARENT CLASS Hyperplume """
from .HYPERPLUME.hyperplume import Hyperplume
""" IMPORT SUBCLASSES (FOR PACKAGE TESTS ONLY) """
from .SSM.SSM_plume import SSM,type_parks,type_korsun,type_ashkenazy
from .AEM.AEM_plume import AEM
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2758,
2534,
1478,
25,
2998,
25,
2670,
1584,
198,
198,
31,
9800,
25,
279,
18817,
198,
37811,
198,
198,
37811,
29244,
33881,
350,
56,
4221,
... | 2.857143 | 273 |
import os
import sys
from subprocess import call
from inkpy_jinja.backends.base import PDFBackend
class OdtToPdfScriptPathNotConfigured(Exception):
"""OdtToPdf script path not found in settings"""
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
850,
14681,
1330,
869,
198,
198,
6738,
16882,
9078,
62,
18594,
6592,
13,
1891,
2412,
13,
8692,
1330,
12960,
7282,
437,
628,
198,
4871,
10529,
83,
2514,
47,
7568,
7391,
15235,
3673,
16934,
1522... | 3.306452 | 62 |
# packages
import os, sys, datetime
sys.path.append("C:/BERTVision/code/torch")
from utils.collate import collate_H5_squad
from common.evaluators.H5_squad_evaluator import H5_SQUAD_Evaluator
from torch.cuda.amp import autocast
import torch
import numpy as np
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from tqdm.notebook import trange
class H5_SQUAD_Trainer(object):
'''
This class handles the training of 1-epoch tuned QA embeddings from BERT
Parameters
----------
model : object
A compression model; see compress_utils.py
criterion : loss function
A loss function
optimizer: object
A compatible Torch optimizer
processor: object
A Torch Dataset processor that emits data
scheduler: object
The learning rate decreases linearly from the initial lr set
args: object
A argument parser object; see args.py
scaler: object
A gradient scaler object to use FP16
Operations
-------
This trainer:
(1) Trains the weights
(2) Generates dev set loss
(3) Creates start and end logits and collects their original index for scoring
(4) Writes their results and saves the file as a checkpoint
'''
def train(self):
'''
This function handles the entirety of the training, dev, and scoring.
'''
# tell the user general metrics
self.logger.info(f"Number of examples: {len(self.train_examples)}")
self.logger.info(f"Batch size: {self.args.batch_size}")
self.logger.info(f"Number of optimization steps: {self.num_train_optimization_steps}")
# instantiate dataloader
train_dataloader = DataLoader(self.train_examples,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=self.args.num_workers,
drop_last=False,
collate_fn=collate_H5_squad)
# for each epoch
for epoch in trange(int(self.args.epochs), desc="Epoch"):
# train
self.train_epoch(self.criterion, train_dataloader)
# get dev loss
dev_loss, logits, indices = H5_SQUAD_Evaluator(self.model, self.criterion, self.processor, self.args).get_loss_and_scores()
# compute scores
metrics = H5_SQUAD_Evaluator(self.model, self.criterion, self.processor, self.args).score_squad_val(shuffled_idx=indices, logits=logits, n_best_size=20, max_answer=30)
# print validation results
self.logger.info("Epoch {0: d}, Dev/Exact {1: 0.3f}, Dev/F1. {2: 0.3f}",
epoch+1, metrics['exact'], metrics['f1'])
# update validation results
if metrics['f1'] > self.best_dev_f1:
self.unimproved_iters = 0
self.best_dev_f1 = metrics['f1']
torch.save(self.model, self.snapshot_path)
else:
# stop training with early stopping
self.unimproved_iters += 1
if self.unimproved_iters >= self.args.patience:
self.early_stop = True
self.logger.info(f"Early Stopping. Epoch: {epoch}, Best Dev F1: {self.best_dev_f1}")
break
#
| [
2,
10392,
198,
11748,
28686,
11,
25064,
11,
4818,
8079,
198,
17597,
13,
6978,
13,
33295,
7203,
34,
14079,
13246,
6849,
1166,
14,
8189,
14,
13165,
354,
4943,
198,
6738,
3384,
4487,
13,
26000,
378,
1330,
2927,
378,
62,
39,
20,
62,
164... | 2.139812 | 1,595 |
#!/usr/bin/env python
# -*- Mode: Python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*-
# vi: set ts=4 sw=4 expandtab:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This is a wrapper script around the exactgc script. Usage:
# Env:
# The AVM env var must point to an avmshell
# The ASC env var must point to asc.jar
# Invocation:
# The script exports one function "GenerateTracers" with these arguments:
# prefix = module specific prefix string used in generated file names, ie "avmplus", "avmglue"
# inputfiles = string of list of files, can contain wildcards
# outputdir = where output files go
import os
import shutil
import sys
import filecmp
import glob
import tempfile
import platform
import subprocess
import string
utilsdir = platform_filename(os.path.dirname(__file__))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
10363,
25,
11361,
26,
269,
12,
35487,
12,
28968,
25,
604,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
26,
7400,
12,
10394,
25,
604,
532,
9,
12,
198,
2,
25357,
... | 3.251701 | 294 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from io import StringIO
import io
import itertools
import os
import unittest
import warnings
import types
from tempfile import mkstemp
from skbio.io import (FormatIdentificationWarning, UnrecognizedFormatError,
ArgumentOverrideWarning, io_registry, sniff,
create_format)
from skbio.io.registry import (IORegistry, FileSentinel, Format,
DuplicateRegistrationError,
InvalidRegistrationError)
from skbio.util import get_data_path
from skbio.util._exception import TestingUtilError
from skbio import DNA, read, write
if __name__ == '__main__':
unittest.main()
| [
2,
16529,
10541,
198,
2,
15069,
357,
66,
8,
2211,
438,
11,
629,
1134,
270,
12,
65,
952,
2478,
1074,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
10305,
13789,
13,
198,
2,
198,
2,
383,
1336,
5964,
318,
... | 3.035088 | 342 |
from urllib import request
from googleapiclient.discovery import build
import gwm.auth.authenticator as auth
| [
6738,
2956,
297,
571,
1330,
2581,
198,
6738,
23645,
499,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
198,
11748,
308,
26377,
13,
18439,
13,
41299,
26407,
355,
6284,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198
] | 2.975 | 40 |
#!/usr/bin/env python
#
# Author: Veronica G. Vergara L.
#
#
from .base_jobLauncher import BaseJobLauncher
if __name__ == '__main__':
print('This is the Aprun job launcher class')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
6434,
25,
44498,
402,
13,
4643,
70,
3301,
406,
13,
198,
2,
198,
2,
198,
198,
6738,
764,
8692,
62,
21858,
46182,
2044,
1330,
7308,
33308,
46182,
2044,
198,
198,
361,
1159... | 2.735294 | 68 |
pip install scikit-learn
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
student_files = [doc for doc in os.listdir() if doc.endswith('.txt')]
student_notes = [open(_file, encoding='utf-8').read()
for _file in student_files]
vectors = vectorize(student_notes)
s_vectors = list(zip(student_files, vectors))
plagiarism_results = set()
for data in check_plagiarism():
print(data)
| [
79,
541,
2721,
629,
1134,
270,
12,
35720,
198,
11748,
28686,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
198,
6738,
1341,
35720,
13,
4164,
10466,
13,
24874,
3083,
1330,
8615,
500,
... | 2.757225 | 173 |
# Copyright (c) 2022 Jakub Vesely
# This software is published under MIT license. Full text of the license is available at https://opensource.org/licenses/MIT
import os
import hashlib
from ___basal.___logging import Logging
from ___blocks.___main_block import MainBlock
from ___basal.___planner import Planner
#pyright: reportMissingImports=false
#pylint: disable=no-name-in-module ;implemented in micropython
from micropython import const
_cmd_version = const(0x80)
_cmd_stop_program = const(0x81)
_cmd_start_program = const(0x82)
_cmd_get_next_file_info = const(0x83)
_cmd_remove_file = const(0x84)
_cmd_handle_file = const(0x85)
_cmd_get_file_checksum = const(0x86)
_cmd_append = const(0x87)
_cmd_mk_dir = const(0x88)
| [
2,
220,
15069,
357,
66,
8,
33160,
25845,
549,
569,
2771,
306,
198,
2,
220,
770,
3788,
318,
3199,
739,
17168,
5964,
13,
6462,
2420,
286,
262,
5964,
318,
1695,
379,
3740,
1378,
44813,
1668,
13,
2398,
14,
677,
4541,
14,
36393,
198,
1... | 2.46395 | 319 |
#!/usr/bin/env python
# Todo: Record and print total stats at end of run, write out to file, maybe csv.
# Todo: Option to record video and xyz
# Todo: Turn display on and off.
# Todo: conf binary options
# Todo: conf write in number option
# Todo: Think of ways to make needs_ vars more elegant, maybe a service.
# Todo: consider renaming vision and camera dirs with _drivers
# Todo: resolution selection in GUI
# Todo: camera device selection in GUI
# Todo: callbacks on resolution and camera device selection to restart camera
# Todo: save profiles
# Todo: auto-config
# Todo: when on os-x attempt to open scratch file before main config file
# Todo: graceful handling of Arduino wait
# Todo: don't throw away good dots in hybrid mode if face-find fails
from __future__ import print_function
import sys
import os
import gui_menu
from multiprocessing import Process, Pipe
import util
import conf
import filters
import threading
import time
# OSX has an error when launching GUI subprocesses
# If use_config_gui is false, the program will just watch ~/.headmouse
use_config_gui = sys.platform != 'darwin'
config = conf.render()
output_driver = None
vision_driver = None
camera_driver = None
smoother = None
needs_camera_reinit = False
needs_vision_reinit = False
needs_shutdown = False
needs_restart = False
if __name__ == '__main__':
if use_config_gui:
# GUI process setup
parent_conn, child_conn = Pipe()
gui_child_process = Process(target=gui_menu.initialize, args=(child_conn,))
gui_child_process.start()
handle_gui_process_messages(parent_conn, gui_child_process, polling_wait=1)
gui_watcher_thread = threading.Thread(target=watch_gui_process, args=(parent_conn, gui_child_process))
gui_watcher_thread.start()
else:
print("Gui menu can't be launched directly on OS X, you can launch gui_menu.py in a separete process.")
config_file_watcher = threading.Thread(target=watch_config)
config_file_watcher.start()
# Application restart involves multiple processes and can be triggered from multiple places.
xy_delta_gen = filters.relative_movement()
fps = util.simple_fps()
freq = 60
if use_config_gui:
send_fps = util.Every_n(freq, lambda: parent_conn.send(str( float("{0:.2f}".format(fps.next() * freq)))))
else:
send_fps = util.Every_n(freq, lambda: print(str( float("{0:.2f}".format(fps.next() * freq)))))
config.register_callback('output', update_component)
config.register_callback('algorithm', update_component)
config.register_callback('camera', update_component)
config.register_callback('smoothing', update_component)
config.register_callback('camera_dimensions', update_component)
config.register_callback('camera_device_id', update_component)
config.execute_all_callbacks()
# Todo: See if there's a cleaner way to structure the nested whiles, approval of 3136 would have been nice.
while not (needs_shutdown or needs_restart):
with camera_driver.Camera(config) as cam:
needs_camera_reinit = False
while not (needs_camera_reinit or needs_shutdown or needs_restart):
with vision_driver.Vision(cam, config) as viz:
needs_vision_reinit = False
display_frame = util.Every_n(4, viz.display_image)
while not (needs_vision_reinit or needs_camera_reinit or needs_shutdown or needs_restart):
try:
# Frame processing
viz.get_image()
coords = viz.process()
if coords is not None and None not in coords:
coords = filters.mirror(coords)
abs_pos_x, abs_pos_y, abs_pos_z = coords
xy = xy_delta_gen.send((abs_pos_x, abs_pos_y))
if not filters.detect_outliers(xy, config['max_input_distance']):
xy = smoother.send(xy)
xy = filters.accelerate(xy, config)
output_driver.send_xy(xy)
if config['display']:
display_frame.next()
send_fps.next()
except KeyboardInterrupt:
needs_restart = False
needs_shutdown = True
if needs_restart:
restart()
if use_config_gui:
gui_child_process.terminate()
sys.exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
309,
24313,
25,
13266,
290,
3601,
2472,
9756,
379,
886,
286,
1057,
11,
3551,
503,
284,
2393,
11,
3863,
269,
21370,
13,
198,
2,
309,
24313,
25,
16018,
284,
1700,
2008,
290,
... | 2.357107 | 1,977 |
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ expert.py ]
# Synopsis [ the speech separation downstream wrapper ]
# Source [ Reference some code from https://github.com/funcwj/uPIT-for-speech-separation and https://github.com/asteroid-team/asteroid ]
# Author [ Zili Huang ]
# Copyright [ Copyright(c), Johns Hopkins University ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import math
import random
import h5py
import numpy as np
from pathlib import Path
from collections import defaultdict
import librosa
# -------------#
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_sequence, pad_sequence
import torch.nn.functional as F
# -------------#
from .model import SepRNN
from .dataset import SeparationDataset
from asteroid.metrics import get_metrics
from .loss import MSELoss, SISDRLoss
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#COMPUTE_METRICS = ["si_sdr", "sdr", "sir", "sar", "stoi"]
class DownstreamExpert(nn.Module):
"""
Used to handle downstream-specific operations
eg. downstream forward, metric computation, contents to log
"""
def get_dataloader(self, mode):
"""
Args:
mode: string
'train', 'dev' or 'test'
Return:
a torch.utils.data.DataLoader returning each batch in the format of:
[wav1, wav2, ...], your_other_contents1, your_other_contents2, ...
where wav1, wav2 ... are in variable length
each wav is torch.FloatTensor in cpu with:
1. dim() == 1
2. sample_rate == 16000
3. directly loaded by torchaudio
"""
if mode == "train":
return self._get_train_dataloader(self.train_dataset)
elif mode == "dev":
return self._get_eval_dataloader(self.dev_dataset)
elif mode == "test":
return self._get_eval_dataloader(self.test_dataset)
def forward(self, mode, features, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length, records, **kwargs):
"""
Args:
mode: string
'train', 'dev' or 'test' for this forward step
features:
list of unpadded features [feat1, feat2, ...]
each feat is in torch.FloatTensor and already
put in the device assigned by command-line args
uttname_list:
list of utterance names
source_attr:
source_attr is a dict containing the STFT information
for the mixture. source_attr['magnitude'] stores the STFT
magnitude, source_attr['phase'] stores the STFT phase and
source_attr['stft'] stores the raw STFT feature. The shape
is [bs, max_length, feat_dim]
source_wav:
source_wav contains the raw waveform for the mixture,
and it has the shape of [bs, max_wav_length]
target_attr:
similar to source_attr, it contains the STFT information
for individual sources. It only has two keys ('magnitude' and 'phase')
target_attr['magnitude'] is a list of length n_srcs, and
target_attr['magnitude'][i] has the shape [bs, max_length, feat_dim]
target_wav_list:
target_wav_list contains the raw waveform for the individual
sources, and it is a list of length n_srcs. target_wav_list[0]
has the shape [bs, max_wav_length]
feat_length:
length of STFT features
wav_length:
length of raw waveform
records:
defaultdict(list), by appending contents into records,
these contents can be averaged and logged on Tensorboard
later by self.log_records every log_step
Return:
loss:
the loss to be optimized, should not be detached
"""
# match the feature length to STFT feature length
features = match_length(features, feat_length)
features = pack_sequence(features)
mask = self.model(features)
# evaluate the separation quality of predict sources
if mode == 'dev' or mode == 'test':
if mode == 'dev':
COMPUTE_METRICS = ["si_sdr"]
elif mode == 'test':
COMPUTE_METRICS = ["si_sdr", "stoi", "pesq"]
predict_stfts = [torch.squeeze(m * source_attr['stft'].to(device)) for m in mask]
predict_stfts_np = [np.transpose(s.data.cpu().numpy()) for s in predict_stfts]
assert len(wav_length) == 1
# reconstruct the signal using iSTFT
predict_srcs_np = [librosa.istft(stft_mat,
hop_length=self.upstream_rate,
win_length=self.datarc['win_length'],
window=self.datarc['window'],
center=self.datarc['center'],
length=wav_length[0]) for stft_mat in predict_stfts_np]
predict_srcs_np = np.stack(predict_srcs_np, 0)
gt_srcs_np = torch.cat(target_wav_list, 0).data.cpu().numpy()
mix_np = source_wav.data.cpu().numpy()
utt_metrics = get_metrics(
mix_np,
gt_srcs_np,
predict_srcs_np,
sample_rate = self.datarc['rate'],
metrics_list = COMPUTE_METRICS,
compute_permutation=True,
)
for metric in COMPUTE_METRICS:
input_metric = "input_" + metric
assert metric in utt_metrics and input_metric in utt_metrics
imp = utt_metrics[metric] - utt_metrics[input_metric]
if metric not in records:
records[metric] = []
if metric == "si_sdr":
records[metric].append(imp)
elif metric == "stoi" or metric == "pesq":
records[metric].append(utt_metrics[metric])
else:
raise ValueError("Metric type not defined.")
assert 'batch_id' in kwargs
if kwargs['batch_id'] % 1000 == 0: # Save the prediction every 1000 examples
records['mix'].append(mix_np)
records['hypo'].append(predict_srcs_np)
records['ref'].append(gt_srcs_np)
records['uttname'].append(uttname_list[0])
if self.loss_type == "MSE": # mean square loss
loss = self.objective.compute_loss(mask, feat_length, source_attr, target_attr)
elif self.loss_type == "SISDR": # end-to-end SI-SNR loss
loss = self.objective.compute_loss(mask, feat_length, source_attr, wav_length, target_wav_list)
else:
raise ValueError("Loss type not defined.")
records["loss"].append(loss.item())
return loss
# interface
def log_records(
self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs
):
"""
Args:
mode: string
'train':
records and batchids contain contents for `log_step` batches
`log_step` is defined in your downstream config
eg. downstream/example/config.yaml
'dev' or 'test' :
records and batchids contain contents for the entire evaluation dataset
records:
defaultdict(list), contents already appended
logger:
Tensorboard SummaryWriter
please use f'{prefix}your_content_name' as key name
to log your customized contents
global_step:
The global_step when training, which is helpful for Tensorboard logging
batch_ids:
The batches contained in records when enumerating over the dataloader
total_batch_num:
The total amount of batches in the dataloader
Return:
a list of string
Each string is a filename we wish to use to save the current model
according to the evaluation result, like the best.ckpt on the dev set
You can return nothing or an empty list when no need to save the checkpoint
"""
if mode == 'train':
avg_loss = np.mean(records["loss"])
logger.add_scalar(
f"separation_stft/{mode}-loss", avg_loss, global_step=global_step
)
return []
else:
COMPUTE_METRICS = ["si_sdr", "stoi", "pesq"]
avg_loss = np.mean(records["loss"])
logger.add_scalar(
f"separation_stft/{mode}-loss", avg_loss, global_step=global_step
)
with (Path(self.expdir) / f"{mode}_metrics.txt").open("w") as output:
for metric in COMPUTE_METRICS:
avg_metric = np.mean(records[metric])
if mode == "test" or mode == "dev":
print("Average {} of {} utts: {:.4f}".format(metric, len(records[metric]), avg_metric))
print(metric, avg_metric, file=output)
logger.add_scalar(
f'separation_stft/{mode}-'+metric,
avg_metric,
global_step=global_step
)
save_ckpt = []
assert 'si_sdr' in records
if mode == "dev" and np.mean(records['si_sdr']) > self.best_score:
self.best_score = torch.ones(1) * np.mean(records['si_sdr'])
save_ckpt.append(f"best-states-{mode}.ckpt")
for s in ['mix', 'ref', 'hypo', 'uttname']:
assert s in records
for i in range(len(records['uttname'])):
utt = records['uttname'][i]
mix_wav = records['mix'][i][0, :]
mix_wav = librosa.util.normalize(mix_wav, norm=np.inf, axis=None)
logger.add_audio('step{:06d}_{}_mix.wav'.format(global_step, utt), mix_wav, global_step=global_step, sample_rate=self.datarc['rate'])
for j in range(records['ref'][i].shape[0]):
ref_wav = records['ref'][i][j, :]
hypo_wav = records['hypo'][i][j, :]
ref_wav = librosa.util.normalize(ref_wav, norm=np.inf, axis=None)
hypo_wav = librosa.util.normalize(hypo_wav, norm=np.inf, axis=None)
logger.add_audio('step{:06d}_{}_ref_s{}.wav'.format(global_step, utt, j+1), ref_wav, global_step=global_step, sample_rate=self.datarc['rate'])
logger.add_audio('step{:06d}_{}_hypo_s{}.wav'.format(global_step, utt, j+1), hypo_wav, global_step=global_step, sample_rate=self.datarc['rate'])
return save_ckpt
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
198,
37811,
17174,
17174,
8412,
4557,
35625,
37811,
198,
2,
220,
220,
9220,
5376,
220,
220,
220,
220,
685,
5887,
13,
9078,
2361,
198,
2,
220,
220,
16065,
24608,
220,
... | 2.077405 | 5,426 |
# ============================================================================
# Adafruit PCA9685 16-Channel PWM Servo Driver
# ============================================================================
import logging
import math
import time
import pigpio
logger = logging.getLogger(__name__)
| [
2,
38093,
2559,
18604,
198,
2,
1215,
1878,
4872,
4217,
32,
24,
35978,
1467,
12,
29239,
350,
22117,
3116,
78,
12434,
198,
2,
38093,
2559,
18604,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
640,
198,
11748,
12967,
79,
952,
628,
19... | 5.321429 | 56 |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 2 22:16:47 2016
@author: jim
"""
# py_train.tsv: training data set
import sys
import pickle
import zlib
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import ensemble
# model data stricture
# parameters gleaned from R script submission: ExtraTreesClassifier (score 0.45911)
mdl_fit = ExtraTreesClassifier(n_estimators=700,max_features= 50,
criterion = 'entropy',min_samples_split= 5,
max_depth= 50, min_samples_leaf= 5,n_jobs=-1)
if __name__ == "__main__":
print "Starting training"
# retrieve work directory
work_dir = sys.argv[1]
# work_dir = "../../src/L0_xtc1"
# generate training data set file name
training_file = work_dir + "/py_train.tsv"
print training_file
# read training data
train = pd.read_csv(training_file,sep="\t")
# isoloate response variable
response = [1 if x == 'Class_1' else 0 for x in train["response"]]
# isolate predictors
predictors = train.columns[1:len(train.columns)].values
X_train = train[predictors]
# fit model
mdl_fit.fit(X_train,response)
# save fitted model structure
model_dict = {'model':mdl_fit}
model_file = work_dir + "/possible_model"
with open(model_file,"wb") as f:
pickle.dump(model_dict,f)
print "Saved " + model_file
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
1526,
220,
362,
2534,
25,
1433,
25,
2857,
1584,
198,
198,
31,
9800,
25,
474,
320,
198,
37811,
198,
198,
2,
12972,
62,
27432,
13,
912,
8... | 2.282769 | 679 |
import enum
# https://stackoverflow.com/a/51976841
class VerificationStatus(str, enum.Enum):
"""Class representing verification status enum in database."""
CREATED = "CREATED"
VERIFIED = "VERIFIED"
REJECTED = "REJECTED"
class WorkflowStatus(str, enum.Enum):
"""Class representing workflow status enum in database."""
AVAILABLE = "AVAILABLE"
NEEDS_VERIFICATION = "NEEDS_VERIFICATION"
WITHDRAWN = "WITHDRAWN"
DONE = "DONE"
# https://stackoverflow.com/a/51976841
class GuestPriorityStatus(str, enum.Enum):
"""Class representing status enum in database."""
DOES_NOT_RESPOND = "DOES_NOT_RESPOND"
ACCOMMODATION_NOT_NEEDED = "ACCOMMODATION_NOT_NEEDED"
EN_ROUTE_UA = "EN_ROUTE_UA"
EN_ROUTE_PL = "EN_ROUTE_PL"
IN_KRK = "IN_KRK"
AT_R3 = "AT_R3"
ACCOMMODATION_FOUND = "ACCOMMODATION_FOUND"
UPDATED = "UPDATED"
# https://stackoverflow.com/a/51976841
class Voivodeship(str, enum.Enum):
"""Class representing voivodeship enum in database."""
DOLNOSLASKIE = "DOLNOŚLĄSKIE"
KUJAWSKOPOMORSKIE = "KUJAWSKO-POMORSKIE"
LUBELSKIE = "LUBELSKIE"
LUBUSKIE = "LUBUSKIE"
LODZKIE = "ŁÓDZKIE"
MALOPOLSKIE = "MAŁOPOLSKIE"
MAZOWIECKIE = "MAZOWIECKIE"
OPOLSKIE = "OPOLSKIE"
PODKARPACKIE = "PODKARPACKIE"
PODLASKIE = "PODLASKIE"
POMORSKIE = "POMORSKIE"
SLASKIE = "ŚLĄSKIE"
SWIETOKRZYSKIE = "ŚWIĘTOKRZYSKIE"
WARMINSKOMAZURSKIE = "WARMIŃSKO-MAZURSKIE"
WIELKOPOLSKIE = "WIELKOPOLSKIE"
ZACHODNIOPOMORSKIE = "ZACHODNIOPOMORSKIE"
class LanguageEnum(enum.Enum):
"""Class representing language enum in database."""
ENGLISH = "En"
POLISH = "Pl"
UKRAINIAN = "Uk"
RUSSIAN = "Ru"
| [
11748,
33829,
628,
198,
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
64,
14,
20,
24991,
3104,
3901,
198,
4871,
4643,
2649,
19580,
7,
2536,
11,
33829,
13,
4834,
388,
2599,
198,
220,
220,
220,
37227,
9487,
10200,
19637,
3722,
33829,
... | 2.081509 | 822 |
from advent10 import (
read_input,
calc_corruption_score,
calc_autocompletion_score,
calc_middle_autocompletion_score,
)
| [
6738,
19980,
940,
1330,
357,
198,
220,
220,
220,
1100,
62,
15414,
11,
198,
220,
220,
220,
42302,
62,
46260,
62,
26675,
11,
198,
220,
220,
220,
42302,
62,
2306,
42829,
24547,
62,
26675,
11,
198,
220,
220,
220,
42302,
62,
27171,
62,
... | 2.611111 | 54 |
from piece import Gobang
import time
| [
6738,
3704,
1330,
16909,
648,
201,
198,
11748,
640,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.473684 | 19 |
import json, urllib.request, random
spreadsheetID = "1gZbPx9GpNMObvXzlGRGmah1sW7nCgLIHR-QOocybwXw"
tab = 1
finalUrl = "https://spreadsheets.google.com/feeds/list/" + str(spreadsheetID) + "/1/public/values?alt=json"
with urllib.request.urlopen(finalUrl) as url:
data = json.loads(url.read().decode())
qs = "const myQuiz = ["
for p in data['feed']['entry']:
i = random.randint(1, 100)
if(i < 1):
qs += "{'q':'"+p['title']['$t']+"', 'options': ['"+ p['gsx$correcta']['$t'] + "', '"+ p['gsx$incorrecta']['$t'] + "' ], 'correctIndex': 0, 'correctResponse':'"+ p['gsx$respuesta']['$t'] + "' },"
else:
qs += "{'q':'"+p['title']['$t']+"', 'options': ['"+ p['gsx$incorrecta']['$t'] + "', '"+ p['gsx$correcta']['$t'] + "' ], 'correctIndex': 1, 'correctResponse':'"+ p['gsx$respuesta']['$t'] + "' },"
qs = qs[:-1]
qs += "];"
qs += " $('#quiz').quiz({ counterFormat: 'Pregunta %current de %total', nextButtonText:'Siguiente', finishButtonText:'Terminar', restartButtonText:'Reiniciar', questions: myQuiz});"
with open("qs.js", "w", encoding="utf-8") as f:
f.write(qs)
| [
11748,
33918,
11,
2956,
297,
571,
13,
25927,
11,
4738,
198,
198,
43639,
21760,
2389,
796,
366,
16,
70,
57,
65,
47,
87,
24,
38,
79,
32755,
5944,
85,
55,
48274,
10761,
38,
76,
993,
16,
82,
54,
22,
77,
34,
70,
31271,
17184,
12,
4... | 2.255187 | 482 |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
21928,
198,
6738,
42625,
14208,
13,
6381,... | 2.513158 | 76 |
import pytest
import responses
from responses import matchers
from stoplight.repo import AssignmentRepo
REPO = None
@pytest.fixture(autouse=True)
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
| [
11748,
12972,
9288,
198,
11748,
9109,
198,
6738,
9109,
1330,
2603,
3533,
198,
198,
6738,
2245,
2971,
13,
260,
7501,
1330,
50144,
6207,
78,
198,
198,
2200,
16402,
796,
6045,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
2306,
1076,
28,
... | 3.181159 | 138 |
import math as m
'''
the function joint_entropy() accepts an 2-gram data structures as an input
that is a dictionary with word pairs as keys and the probabilities
of these word pairs as values
always test your code
'''
if __name__ == "__main__":
test_joint_entropy()
| [
11748,
10688,
355,
285,
198,
198,
7061,
6,
198,
1169,
2163,
6466,
62,
298,
28338,
3419,
18178,
281,
362,
12,
4546,
1366,
8573,
355,
281,
5128,
198,
5562,
318,
257,
22155,
351,
1573,
14729,
355,
8251,
290,
262,
39522,
198,
1659,
777,
... | 3.425 | 80 |
import itertools
import numpy as np
from scipy.stats import median_absolute_deviation
def segment_times(timeseries, max_gap):
"""
Returns an N-D array where each row represents a separate segmentation of continuous data with no gaps
greater than the max gap.
"""
time_segments = []
is_contiguous = False
arr_n = -1
for i, t in enumerate(timeseries):
if not is_contiguous:
time_segments.append([t])
arr_n += 1
else:
time_segments[arr_n].append(t)
if i + 1 < len(timeseries):
is_contiguous = (timeseries[i + 1] - t) < max_gap
return time_segments
def match_dimensions(ndarray, onedarray):
"""
Return an N-D array of shape ndarray.shape with the values of onedarray
"""
ndreturn = []
idx = 0
for sublist in ndarray:
num = len(sublist)
subreturn = onedarray[idx : idx + num]
idx += num
ndreturn.append(subreturn)
return ndreturn
def regularize(timeseries, dt, bin_about=None):
"""
:param timeseries: defines max range of output
:param dt: gap between outputted time points
:param bin_about: define centre of bins. If outside of range of timeseries will extrapolate
:return: regular timeseries with spacing dt defined within the range of input
"""
if bin_about:
return np.concatenate(
[
np.r_[bin_about : timeseries.min() : -dt][::-1],
np.r_[bin_about + dt : timeseries.max() : dt],
]
)
else:
return np.r_[timeseries.min() : timeseries.max() : dt]
def medsig(
a: np.ndarray, include_zeros: bool = True, axis: int = None
):
"""
Compute median and MAD-estimated scatter of array a
:param a: numpy NDArray
:param include_zeros: bool, default True. If False ignore zero values from median calculation
:param axis: (int), default None. axis over which to calculate the median.
"""
a = a if include_zeros else a[np.nonzero(a)]
med = np.nanmedian(a, axis=axis)
sig = median_absolute_deviation(a, axis=axis, nan_policy="omit")
return med, sig
def rebin_err(t, f, dt=0.02, get_err_on_mean=False, bin_about=None):
"""
Rebin a time-series with errors on the data (y-points).
Apply unweighted average: ignore errors on the data to be binned and
perform a simple MAD estimation of the error
from the scatter of points
"""
treg = regularize(t, dt=dt, bin_about=bin_about)
nreg = len(treg)
freg = np.zeros(nreg) + np.nan
freg_err = np.zeros(nreg) + np.nan
for i in np.arange(nreg):
l = (t >= treg[i]) * (t < treg[i] + dt)
if l.any():
treg[i] = np.nanmean(t[l])
freg[i], freg_err[i] = medsig(f[l])
if get_err_on_mean:
freg_err[i] /= np.sqrt(float(len(f[l])))
l = np.isfinite(freg)
return treg[l], freg[l], freg_err[l]
def rebin_err_chunks(t, f, dt, max_gap=0.02, get_err_on_mean=False, bin_about=0):
"""
Re-bin a time series with errors on the data, but by chunking up into slices to avoid empty data.
"""
times = segment_times(t, max_gap)
fluxes = match_dimensions(times, f)
times_binned = []
fluxes_binned = []
flux_errs_binned = []
for i, each_time in enumerate(times):
tbin, fbin, ferr = rebin_err(
np.array(each_time),
np.array(fluxes[i]),
dt=dt,
get_err_on_mean=get_err_on_mean,
bin_about=bin_about,
)
times_binned.append(tbin)
fluxes_binned.append(fbin)
flux_errs_binned.append(ferr)
treg = list(itertools.chain(*times_binned))
freg = list(itertools.chain(*fluxes_binned))
freg_err = list(itertools.chain(*flux_errs_binned))
return treg, freg, freg_err | [
11748,
340,
861,
10141,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
34242,
1330,
14288,
62,
48546,
62,
7959,
3920,
198,
198,
4299,
10618,
62,
22355,
7,
22355,
10640,
11,
3509,
62,
43554,
2599,
198,
220,
220,
220,
... | 2.187394 | 1,777 |
from . import datasets
from . import samplers
from . import filereaders
from . import imglists
try:
from .dataloader import *
except:
from torch.utils.data import DataLoader
| [
6738,
764,
1330,
40522,
198,
6738,
764,
1330,
6072,
489,
364,
198,
6738,
764,
1330,
2393,
961,
364,
198,
6738,
764,
1330,
545,
4743,
1023,
198,
28311,
25,
198,
220,
220,
220,
422,
764,
67,
10254,
1170,
263,
1330,
1635,
198,
16341,
2... | 3.192982 | 57 |
# Generated by Django 2.1.2 on 2018-12-20 03:00
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
17,
319,
2864,
12,
1065,
12,
1238,
7643,
25,
405,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Generated by Django 2.2.1 on 2019-07-08 09:17
import datetime
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
16,
319,
13130,
12,
2998,
12,
2919,
7769,
25,
1558,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530... | 2.916667 | 48 |
#!/usr/bin/env python3
from unittest.mock import call, patch
from schmetterling.core.nio import rm_paths
@patch('schmetterling.core.nio.rmdir')
@patch('schmetterling.core.nio.rmtree')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
869,
11,
8529,
198,
198,
6738,
5513,
4164,
353,
1359,
13,
7295,
13,
77,
952,
1330,
42721,
62,
6978,
82,
628,
198,
31,
17147,
10786,
2060... | 2.460526 | 76 |
from tests.factories import MineFactory, ProjectSummaryFactory
from app.api.mines.project_summary.models.project_summary import ProjectSummary
| [
6738,
5254,
13,
22584,
1749,
1330,
11517,
22810,
11,
4935,
22093,
22810,
198,
198,
6738,
598,
13,
15042,
13,
1084,
274,
13,
16302,
62,
49736,
13,
27530,
13,
16302,
62,
49736,
1330,
4935,
22093,
628,
198
] | 4.055556 | 36 |
'''
This module allows to load texture and to sample from it
'''
import numpy as np
from vulkbare import load_image, resize_image
from vulk import vulkanobject as vo
from vulk import vulkanconstant as vc
from vulk.util import mipmap_size, mipmap_levels
class RawTexture():
"""A Raw texture is not initialized with an image file but can be filled
manually"""
def __init__(self, context, width, height, texture_format, mip_levels=1):
"""
Args:
context (VulkContext)
width (int): Width of the texture
height (int): Height of the texture
texture_format (Format): Format of vulkan texture
mip_levels (int): Number of mipmaps
"""
self.width = width
self.height = height
self.format = texture_format
self.mip_levels = mip_levels or mipmap_levels(width, height)
self.texture = self.init_texture(context, self.mip_levels)
# Init bitmap
self.bitmap = self.init_bitmap()
# Init view and sampler
self.view = None
self.init_view(context)
self.sampler = None
self.init_sampler(context)
def init_bitmap(self):
# pylint: disable=unused-argument
'''Return the numpy array containing bitmap'''
_, _, pixel_size = vc.format_info(self.format)
return np.zeros(self.width*self.height*pixel_size, dtype=np.uint8)
def set_view(self, context):
"""Set texture view
Args:
context (VulkContext)
"""
texture_range = vo.ImageSubresourceRange(
vc.ImageAspect.COLOR, 0, 1, 0, 1)
self.view = vo.ImageView(
context, self.texture.final_image,
vc.ImageViewType.TYPE_2D, self.format, texture_range)
def set_sampler(self, context, mag_filter=vc.Filter.NEAREST,
min_filter=vc.Filter.NEAREST,
mipmap_mode=vc.SamplerMipmapMode.NEAREST,
address_mode_u=vc.SamplerAddressMode.REPEAT,
address_mode_v=vc.SamplerAddressMode.REPEAT,
address_mode_w=vc.SamplerAddressMode.REPEAT,
anisotropy_enable=False, max_anisotropy=16):
"""Set the texture sampler
By default, sampler is configured for the best performance.
If you want better quality, you must enable manually bilinear,
trilinear or anisotropic filtering.
Args:
context (VulkContext): Context
mag_filter (Filter): Magnification filter to apply to lookups
min_filter (Filter): Minification filter to apply to lookups
mipmap_mode (SamplerMipmapMode): Mipmap filter to apply to lookups
address_mode_u (SamplerAddressMode):
address_mode_v (SamplerAddressMode):
address_mode_w (SamplerAddressMode):
anisotropy_enable (bool): Whether to enable anisotropy
max_anisotropy (int): Anisotropy value clamp
"""
if self.sampler:
self.sampler.destroy(context)
self.sampler = vo.Sampler(
context, mag_filter, min_filter, mipmap_mode,
address_mode_u, address_mode_v, address_mode_w, 0,
anisotropy_enable, max_anisotropy, False, vc.CompareOp.ALWAYS,
0, 0, vc.BorderColor.INT_OPAQUE_BLACK, False)
def upload(self, context):
"""Make texture accessible for shader
If this function is not called, the texture can't be used.
When all your buffers are uploaded, call this function
"""
self.texture.finalize(context)
class BinaryTexture(RawTexture):
"""RawTexture with provided bitmap buffer.
**Warning: You are responsible of the bitmap buffer**
"""
def __init__(self, context, width, height, texture_format, raw_bitmap,
mip_levels=1):
"""
Args:
context (VulkContext)
width (int): Texture width
height (int): Texture height
texture_format (Format): Texture format
raw_bitmap (buffer): Bitmap buffer (can be None)
mip_levels (int): Number of mipmaps to generate (0 = until 1x1)
"""
self.raw_bitmap = raw_bitmap
# Create all the components by calling parent init
super().__init__(context, width, height, texture_format,
mip_levels=mip_levels)
# Upload data
if self.raw_bitmap:
self.generate_mipmaps(context)
self.upload(context)
def init_bitmap(self):
'''Initialize bitmap array with `raw_bitmap`'''
if not self.raw_bitmap:
return
return np.array(self.raw_bitmap, dtype=np.uint8, copy=False)
def upload_buffer(self, context, mip_level):
"""Upload bitmap into Vulkan memory
Args:
context (VulkContext)
mip_level (int): Level of mip
"""
base_width = self.width
base_height = self.height
components = vc.format_info(self.format)[1]
width, height = mipmap_size(base_width, base_height, mip_level)
if width == base_width and height == base_height:
upload_bitmap = self.bitmap
else:
upload_raw_bitmap = resize_image(
self.raw_bitmap, base_width, base_height, components,
width, height
)
upload_bitmap = np.array(upload_raw_bitmap, dtype=np.uint8,
copy=False)
with self.texture.bind_buffer(context, mip_level) as buf:
np.copyto(np.array(buf, copy=False),
upload_bitmap,
casting='no')
def generate_mipmaps(self, context):
"""Generate mipmap automatically
This method generates mipmap on processor and then upload it on GPU.
This method is heavy, use it with care. You shouldn't need to call it
several times unless raw_bitmap is modified.
You must call `upload` to update the texture in Graphic Card.
Args:
context (VulkContext)
"""
for i in range(self.mip_levels):
self.upload_buffer(context, i)
class Texture(BinaryTexture):
"""BinaryTexture with file managing"""
def __init__(self, context, path_file, mip_levels=1):
"""
Args:
context (VulkContext)
path_file (str): Path to the image to load
mip_levels (int): Number of mip level (0=max)
"""
# Load bitmap
with open(path_file, 'rb') as f:
raw_bitmap, width, height, components = load_image(f.read())
texture_format = Texture.components_to_format(components)
# Create all the components by calling parent init
super().__init__(context, width, height, texture_format, raw_bitmap,
mip_levels=mip_levels)
@staticmethod
def components_to_format(components):
'''Convert number of channel components in image to Vulkan format
*Parameters:*
- `components`: Number of components
'''
if components < 1 or components > 4:
raise ValueError("components must be between 0 and 4")
return [vc.Format.NONE, vc.Format.R8_UNORM, vc.Format.R8G8_UNORM,
vc.Format.R8G8B8_UNORM, vc.Format.R8G8B8A8_UNORM][components]
class HighQualityTexture(Texture):
"""Texture with best quality
To get best quality, we generate automatically all mipmaps and
set filter to trilinear or anisotropy filtering.
It's really just a helper class.
"""
class TextureRegion():
'''
Defines a rectangular area of a texture. The coordinate system used has
its origin in the upper left corner with the x-axis pointing to the
right and the y axis pointing downwards.
'''
def __init__(self, texture, u=0, v=0, u2=1, v2=1):
'''Initialize texture region
*Parameters:*
- `texture`: `RawTexture`
- `u`, `u2`: X coordinate relative to texture size
- `v`, `v2`: Y coordinate relative to texture size
'''
self.texture = texture
self.u = u
self.u2 = u2
self.v = v
self.v2 = v2
@staticmethod
def from_pixels(texture, x, y, width, height):
"""Create a TextureRegion with pixel coordinates
Args:
texture (Texture): Base texture of region
x (int): X offset (left to right)
y (int): Y offset (top to bottom)
width (int): Region width
height (int): Region height
Returns:
The new TextureRegion
"""
u = x / texture.width
u2 = u + width / texture.width
v = y / texture.height
v2 = v + height / texture.height
return TextureRegion(texture, u, v, u2, v2)
def set_texture(self, texture):
'''Set texture of `TextureRegion`
*Parameters:*
- `texture`: `RawTexture`
'''
self.texture = texture
def set_region(self, u, u2, v, v2):
'''Set coordinate relatively to texture size
*Parameters:*
- `u`, `u2`: X coordinate relative to texture size
- `v`, `v2`: Y coordinate relative to texture size
'''
self.u = u
self.u2 = u2
self.v = v
self.v2 = v2
def set_region_pixel(self, x, y, width, height):
'''Set coordinate relatively to pixel size
*Parameters:*
- `x`: X coordinate of the texture
- `y`: Y coordinate of the texture
- `width`: Width of the region
- `height`: Height of the region
'''
inv_width = 1. / self.texture.width
inv_height = 1. / self.texture.height
self.set_region(
x * inv_width, y * inv_height,
(x + width) * inv_width, (y + height) * inv_height
)
| [
7061,
6,
198,
1212,
8265,
3578,
284,
3440,
11743,
290,
284,
6291,
422,
340,
198,
7061,
6,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
410,
12171,
49382,
1330,
3440,
62,
9060,
11,
47558,
62,
9060,
198,
198,
6738,
410,
12171,
1330,
... | 2.229978 | 4,470 |
# Copyright 2021 Andreas Steck (steck.andi@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from carebt import RateControlNode
from carebt.examples.longrun_actions import AddTwoNumbersMultiTickAction
class SimpleRateControl(RateControlNode):
"""The `SimpleRateControl` example node.
The `SimpleRateControl` has one child. In this example this is the
`AddTwoNumbersMultiTickAction`. This node has no throttling. Due to
the `RateControlNode` such a throttling can be implemented without
changing the original source code of the node. Here, this throttling
rate is set to 1000 ms.
Input Parameters
----------------
?ticks : int
The number of ticks the calculation takes
?a : int
The first value
?b : int
The second value
"""
| [
2,
15069,
33448,
33728,
2441,
694,
357,
4169,
694,
13,
26800,
31,
14816,
13,
785,
8,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,... | 3.442105 | 380 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from app.curses_util import *
import app.fake_curses_testing
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
598,
13,
66,
46998,
62,
22602,
1330,
1635,
1... | 3.615385 | 52 |
"""
* *******************************************************
* Copyright VMware, Inc. 2016. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2016 VMware, Inc. All rights reserved.'
__vcenter_version__ = '6.0+'
import time
class ClsSyncHelper:
"""
Helper class to wait for the subscribed libraries and items to be
synchronized completely with the publisher.
"""
wait_interval_sec = 1
start_time = None
sync_timeout_sec = None
def verify_library_sync(self, pub_lib_id, sub_lib):
"""
Wait until the subscribed library and its items are synchronized with
the published library.
"""
self.start_time = time.time()
if not self.verify_same_items(pub_lib_id, sub_lib.id):
return False
sub_item_ids = self.client.library_item_service.list(sub_lib.id)
for sub_item_id in sub_item_ids:
if not self.verify_item_sync(sub_item_id):
return False
if not self.verify_library_last_sync_time(sub_lib):
return False
return True
def verify_item_sync(self, sub_item_id):
"""
Wait until the subscribed item is synchronized with the published item.
"""
self.start_time = time.time()
is_synced = False
pub_item_id = self.client.library_item_service.get(
sub_item_id).source_id
pub_item = self.client.library_item_service.get(pub_item_id)
while self.not_timed_out():
sub_item = self.client.library_item_service.get(sub_item_id)
# Verify if the subscribed item is the latest
if (sub_item.metadata_version == pub_item.metadata_version and
sub_item.content_version == pub_item.content_version):
is_synced = True
break
time.sleep(self.wait_interval_sec)
return is_synced
def verify_same_items(self, pub_lib_id, sub_lib_id):
"""
Wait until the subscribed library has the same source item IDs as the
published library.
"""
is_synced = False
pub_item_ids = self.client.library_item_service.list(pub_lib_id)
while self.not_timed_out():
sub_item_ids = self.client.library_item_service.list(sub_lib_id)
if self.has_same_items(pub_item_ids, sub_item_ids):
is_synced = True
break
time.sleep(self.wait_interval_sec)
return is_synced
def verify_library_last_sync_time(self, sub_lib):
"""
Wait until the subscribed library's last sync time is populated.
"""
is_synced = False
while self.not_timed_out():
# Get the subscribed library's updated information from server.
refreshed_sub_lib = self.client.subscribed_library_service.get(
sub_lib.id)
if refreshed_sub_lib.last_sync_time is not None:
if (sub_lib.last_sync_time is None or
refreshed_sub_lib.last_sync_time > sub_lib.last_sync_time):
is_synced = True
break
time.sleep(self.wait_interval_sec)
return is_synced
def has_same_items(self, pub_item_ids, sub_item_ids):
"""
Check if the subscribed library contains the same items as the
published library. The item versions are not checked.
"""
if len(pub_item_ids) != len(sub_item_ids):
return False
synced_item_ids = []
for sub_item_id in sub_item_ids:
source_id = self.client.library_item_service.get(
sub_item_id).source_id
if source_id not in synced_item_ids and source_id in pub_item_ids:
synced_item_ids.append(sub_item_id)
return len(pub_item_ids) == len(synced_item_ids)
def not_timed_out(self):
"""
Check if sync is not timed out yet.
"""
elasped_time = time.time() - self.start_time
return elasped_time < self.sync_timeout_sec
| [
37811,
198,
9,
41906,
8412,
2466,
8162,
198,
9,
15069,
37754,
11,
3457,
13,
1584,
13,
1439,
6923,
33876,
13,
198,
9,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
9,
41906,
8412,
2466,
8162,
198,
9,
198,
9,
13954,
48778,
... | 2.282981 | 1,986 |
import string
import globals
valid_input=[1,0,True,False] # Valid Input For Boolean Functions
def input_num(input_str):
'''
(VLSI_Object)-> (Input_list,Numer Of Inputs)
'''
try:
input_list=[]
for i in input_str:
if (i in string.ascii_letters) and (i not in input_list):
input_list.append(i)
return len(input_list)
except:
print("Please Pass VLSI Object To Function")
return None
# This Function Count Boolean Operation In Input String Separately
def check_valid(input_string):
'''
(Str)->Boolean
'''
try:
for i in input_string:
if i not in string.ascii_letters+ string.digits +"'+^.() #!":
return False
return True
except:
print("Problem In Input")
return None
| [
11748,
4731,
198,
11748,
15095,
874,
198,
12102,
62,
15414,
41888,
16,
11,
15,
11,
17821,
11,
25101,
60,
1303,
48951,
23412,
1114,
41146,
40480,
198,
4299,
5128,
62,
22510,
7,
15414,
62,
2536,
2599,
198,
220,
220,
220,
705,
7061,
198,... | 2.146096 | 397 |
from mk2.plugins import Plugin
from mk2.events import UserInput
| [
6738,
33480,
17,
13,
37390,
1330,
42636,
198,
6738,
33480,
17,
13,
31534,
1330,
11787,
20560,
628
] | 3.823529 | 17 |
#Common functions will go in here
| [
2,
17227,
5499,
481,
467,
287,
994,
198
] | 4.25 | 8 |
# -*- coding: utf-8 -*-
# A Python calculator for operator
from sys import argv
from decimal import *
def delBlank(strs):
"""
Delete all blanks in the str.
"""
ans = ""
for e in strs:
if e != " ":
ans += e
return ans
def precede(a, b):
"""
Compare the prior of operator a and b
"""
prior = (
# '+' '-' '*' '/' '(' ')' '^' '#'
('>', '>', '<', '<', '<', '>', '<', '>'), # '+'
('>', '>', '<', '<', '<', '>', '<', '>'), # '-'
('>', '>', '>', '>', '<', '>', '<', '>'), # '*'
('>', '>', '>', '>', '<', '>', '<', '>'), # '/'
('<', '<', '<', '<', '<', '=', '<', ' '), # '('
('>', '>', '>', '>', ' ', '>', '>', '>'), # ')'
('>', '>', '>', '>', '<', '>', '>', '>'), # '^'
('<', '<', '<', '<', '<', ' ', '<', '=') # '#'
)
char2num = {
'+': 0,
'-': 1,
'*': 2,
'/': 3,
'(': 4,
')': 5,
'^': 6,
'#': 7
}
return prior[char2num[a]][char2num[b]]
def operate(a, b, operator):
"""
Operate [a operator b]
"""
if operator == '+':
ans = a + b
elif operator == '-':
ans = a - b
elif operator == '*':
ans = a * b
elif operator == '/':
if b == 0:
ans = "VALUE ERROR"
else:
ans = a / b
elif operator == '^':
if a == 0 and b == 0:
ans = "VALUE ERROR"
else:
ans = a ** b
return ans
def calc(exp):
"""
Calculate the ans of exp
"""
exp += '#'
operSet= "+-*/^()#"
stackOfOperator, stackOfNum = ['#'], []
pos, ans, index, length = 0, 0, 0, len(exp)
while index < length:
e = exp[index]
if e in operSet:
topOperator = stackOfOperator.pop()
compare = precede(topOperator, e)
if compare == '>':
try:
b = stackOfNum.pop()
a = stackOfNum.pop()
except:
return "FORMAT ERROR"
ans = operate(a, b, topOperator)
if ans == 'VALUE ERROR':
return ans
else:
stackOfNum.append(ans)
elif compare == '<':
stackOfOperator.append(topOperator)
stackOfOperator.append(e)
index += 1
elif compare == '=':
index += 1
elif compare == ' ':
return "FORMAT ERROR"
else:
pos = index
while not exp[index] in operSet:
index += 1
temp = exp[pos:index]
#delete all 0 of float in the end
last = index -1
if '.' in temp:
while exp[last] == '0':
last -= 1
temp = exp[pos:last + 1]
try:
temp = Decimal(temp)
except:
return "INPUT ERROR"
stackOfNum.append(temp)
if len(stackOfNum) == 1 and stackOfOperator == []:
return stackOfNum.pop()
else:
return "INPUT ERROR"
if __name__ == '__main__':
exp = argv[1]
getcontext().prec = 10
exp = delBlank(exp)
ans = calc(exp)
print(ans)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
317,
11361,
28260,
329,
10088,
198,
198,
6738,
25064,
1330,
1822,
85,
198,
6738,
32465,
1330,
1635,
628,
198,
4299,
1619,
3629,
962,
7,
2536,
82,
2599,
198,
197,
37... | 1.982301 | 1,356 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 17:55:16 2020
Copyright 2020 by Hadrien Montanelli.
"""
# Standard library imports:
import matplotlib.pyplot as plt
import numpy as np
import scipy.integrate as intg
class RandVar:
"""
Class for representing continuous random variables in 1D.
"""
def __init__(self, pdf, domain):
"""Construct a RandVar from a pdf and a domain."""
self.pdf = pdf
self.domain = domain
def norm(self):
"""Return the norm of the pdf of self."""
left_bound = self.domain[0]
right_bound = self.domain[1]
integrand = lambda x: self.pdf(x)
output = intg.quad(integrand, left_bound, right_bound)
return output[0]
def mean(self):
"""Return the mean of self."""
left_bound = self.domain[0]
right_bound = self.domain[1]
integrand = lambda x: x*self.pdf(x)
output = intg.quad(integrand, left_bound, right_bound)
return output[0]
def var(self):
"""Return the variance of self."""
left_bound = self.domain[0]
right_bound = self.domain[1]
integrand_1 = lambda x: x*self.pdf(x)
output_1 = intg.quad(integrand_1, left_bound, right_bound)
integrand_2 = lambda x: x**2*self.pdf(x)
output_2 = intg.quad(integrand_2, left_bound, right_bound)
return output_2[0] - (output_1[0])**2
def display(self):
"""Display informatons about self."""
tol = 6
print('------------------')
print('norm:', round(self.norm(), tol))
print('mean:', round(self.mean(), tol))
print('var: ', round(self.var(), tol), '\n')
def scale(self, scaling, shift):
"""Return scaling*self + shift."""
self_scaled = RandVar([], self.domain)
self_scaled.pdf = lambda x: 1/scaling*self.pdf((x-shift)/scaling)
return self_scaled
def plus(self, randvar):
"""Return self + randvar."""
self_plus_randvar = RandVar([], self.domain)
left_bound = self.domain[0]
right_bound = self.domain[1]
integrand = lambda x,z: self.pdf(x)*randvar.pdf(z-x)
self_plus_randvar.pdf = lambda z: intg.quad(lambda x: integrand(x, z),
left_bound,
right_bound)[0]
return self_plus_randvar
def minus(self, randvar):
"""Return self - randvar."""
return RandVar.plus(self, RandVar.scale(randvar, -1, 0))
def plot(self, param=[]):
"""Plot the pdf of self."""
left_bound = self.domain[0]
right_bound = self.domain[1]
number_points = int(100*(right_bound - left_bound))
x = np.linspace(left_bound, right_bound, number_points)
y = np.vectorize(self.pdf)(x)
plt.plot(x, y, param) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2556,
1467,
1596,
25,
2816,
25,
1433,
12131,
198,
198,
15269,
12131,
416,
11161,
153... | 2.123555 | 1,384 |
import os
import sys
sys.path.append("/usr/local/apps/grib_api/1.15.0/GNU/4.8.1/lib/python2.7/site-packages/gribapi")
sys.path.append("/usr/local/apps/python/2.7.8-01/lib/python2.7/site-packages")
try:
import ecmwfapi
except:
pass
import numpy as np
import tempfile
try:
from grib import GribFile
except:
pass
ANALOGUES_HOME = os.environ.get("ANALOGUES_HOME", os.path.expanduser("~/Dropbox/phd"))
ANALOGUES_CACHE = os.environ.get("ANALOGUES_CACHE", os.path.expanduser("/tmp"))
ANALOGUES_FILES = os.environ.get("ANALOGUES_FILES", os.path.expanduser("/tmp"))
# MODE = {('2t', 3): 'meanz_19.03',
# ('msl', 3): 'meanz_32.18',
# ('sd', 3): 'meanz_0.13',
# ('sp', 3): 'meanz_19.8',
# ('tp', 3): 'meanz_0.25',
# ('ws', 3): 'meanz_0.63',
# ('z500', 3): 'meanz_13.05',
# ('z850', 3): 'meanz_4.81'}
"""
MODE = {('2t', 3): 'meanw_1.07',
(167, 0, 3): 'meanw_1.07',
('msl', 3): 'meanw_0',
(151, 0, 3): 'meanw_0',
('sd', 3): 'meanw_0.000125',
(141, 0, 3): 'meanw_0.000125',
('sp', 3): 'meanw_0.00354004',
(134, 0, 3): 'meanw_0.00354004',
('tp', 3): 'meanw_0.000125',
(228, 0, 3): 'meanw_0.000125',
('ws', 3): 'meanw_1.34687',
(10, 0, 3): 'meanw_1.34687',
(10, 10, 3): 'meanw_1.34687',
(129, 500, 3): 'meanw_0.004',
(129, 850, 3): 'meanw_0.00549316',
('z500', 3): 'meanw_0.004',
('z850', 3): 'meanw_0.00549316',
(228029, 0, 3): 'meanw_1',
('i10fg', 3): 'meanw_1',
(164, 0, 3): 'meanw_1',
('tcc', 3): 'meanw_1',
(255, 0, 3): 'meanw_1',
('logtp', 3): 'meanw_1',
('logsf', 3): 'meanw_1',
(144, 0, 3): 'meanw_1',
('expsf', 3): 'meanw_1',
('exptp', 3): 'meanw_1',
('expsd', 3): 'meanw_1',
('g1sf', 3): 'meanw_1',
('g1tp', 3): 'meanw_1',
('g1sd', 3): 'meanw_1',
('g2sf', 3): 'meanw_1',
('g2tp', 3): 'meanw_1',
('g2sd', 3): 'meanw_1',
(229, 0, 3): 'meanw_1',
('sf', 3): 'meanw_1',
('swh', 3): 'meanw_1',
('10u', 3): 'meanw_1',
('10v', 3): 'meanw_1',
(165, 0, 3): 'meanw_1',
(166, 0, 3): 'meanw_1',
(140229, 0, 3): 'meanw_1',
(140229, 3): 'meanw_1',
}
NAME = {'tp': "Total precipitations",
'ws': "10m wind speed",
'2t': "Surface air temperature",
'sp': "Surface pressure",
'msl': "Mean sea level pressure",
'sd': "Snow depth",
'sd': "Snow depth",
'z850': "Geopotential at 850 hPa",
'z500': "Geopotential at 500 hPa"}
SCALE = {'ws': (1, 0, "m/s"),
'2t': (1, -273.15, "°C"),
'tp': (1000, 0, "mm"),
'g1tp': (1000, 0, "mm"),
'g2tp': (1000, 0, "mm"),
'sd': (100 / 0.2, 0, "cm"), # Assumes %20 density
'g1sd': (100 / 0.2, 0, "cm"), # Assumes %20 density
'g2sd': (100 / 0.2, 0, "cm"), # Assumes %20 density
'sd (exp)': (100 / 0.2, 0, "cm"), # Assumes %20 density
'z500': (0.0101971621297793, 0, "dam"),
'z850': (0.0101971621297793, 0, "dam"),
'msl': (0.01, 0, "hPa"),
'sp': (0.01, 0, "hPa"),
'tcc': (1, 0, '%'),
'i10fg': (1, 0, "m/s"),
'logtp': (1000, 0, "log(1+m)"),
'exptp': (1000, 0, "exp(m)"),
'sf': (1000, 0, 'mm'),
'g1sf': (1000, 0, 'mm'),
'g2sf': (1000, 0, 'mm'),
'logsf': (1000, 0, "log(1+m)"),
'expsf': (1000, 0, "exp(m)"),
'10u': (1, 0, "m/s"),
'10v': (1, 0, "m/s"),
'swh': (1, 0, "m"),
}
PARAMS = {
'tp': 228,
}
def param(param):
return PARAMS[param]
class DefaultRetriever(object):
def execute(self, data):
c = ecmwfapi.ECMWFService("mars")
f, target = tempfile.mkstemp(".grib")
os.close(f)
c.execute(retrieve(data), target)
field = GribFile(target).next()
os.unlink(target)
return field
class WindRetriever(object):
def execute(self, data):
c = ecmwfapi.ECMWFService("mars")
f, target = tempfile.mkstemp(".grib")
os.close(f)
c.execute(retrieve(data), target)
w = GribFile(target)
u = w.next()
v = w.next()
q = np.sqrt(u.array * u.array + v.array * v.array)
u.set_array(q)
os.unlink(target)
return u
class PrecipRetriever(object):
def execute(self, data):
c = ecmwfapi.ECMWFService("mars")
f, target = tempfile.mkstemp(".grib")
os.close(f)
c.execute(retrieve(data), target)
w = list(GribFile(target))
if len(w) == 2:
cp = w[0]
lsp = w[1]
q = cp.array + lsp.array
cp.set_array(q)
field = cp
else:
field = w[0]
os.unlink(target)
return field
RETRIEVER = {'ws': WindRetriever, 'tp': PrecipRetriever}
def mars(data):
Retriever = RETRIEVER.get(data['param'], DefaultRetriever)
r = Retriever()
return r.execute(data)
def units(param):
u = SCALE[param]
return {"scale": u[0], "offset": u[1], "unit": u[2]}
"""
####################################################################
if os.path.exists('/Users/baudouin/Dropbox'):
from analogues.backend.sqlite import SqliteEngine
cdsdb = SqliteEngine()
ROOT = '/Users/baudouin/Dropbox/phd/data'
CACHE = ROOT
else:
from analogues.backend.postgres import PostgresEngine
cdsdb = PostgresEngine()
ROOT = '/cache/analogues'
CACHE = '/cache/data'
| [
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
14,
14629,
14,
12001,
14,
18211,
14,
70,
822,
62,
15042,
14,
16,
13,
1314,
13,
15,
14,
16630,
52,
14,
19,
13,
23,
13,
16,
14,
8019,
14,
29412,
17,
13,
22,
... | 1.815781 | 3,143 |
PI = 3.1415926535897932384626433832795028841971693993751058209
| [
11901,
796,
513,
13,
1415,
19707,
22980,
2327,
4531,
44750,
23721,
3510,
18897,
2091,
5999,
26050,
1120,
2078,
5705,
24991,
1433,
6052,
2079,
22318,
940,
3365,
22567,
198
] | 2.25 | 28 |
import ipywidgets as widgets
from traitlets import List, Unicode
@widgets.register("bonobo-widget.bonobo")
| [
11748,
20966,
88,
28029,
11407,
355,
40803,
198,
6738,
1291,
2578,
912,
1330,
7343,
11,
34371,
628,
198,
31,
28029,
11407,
13,
30238,
7203,
4189,
20391,
12,
42655,
13,
4189,
20391,
4943,
198
] | 3.30303 | 33 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import re
from itertools import takewhile
from pyparsing import LineStart, Optional, Suppress
from regparser.citations import internal_citations
from regparser.grammar import appendix as grammar
from regparser.grammar.utils import Marker, QuickSearchable
from regparser.layer.formatting import table_xml_to_plaintext
from regparser.layer.key_terms import KeyTerms
from regparser.tree.depth import markers
from regparser.tree.depth.derive import derive_depths
from regparser.tree.paragraph import p_levels
from regparser.tree.struct import Node
from regparser.tree.xml_parser import matchers, tree_utils
from regparser.web.settings.parser import APPENDIX_IGNORE_SUBHEADER_LABEL
logger = logging.getLogger(__name__)
def remove_toc(appendix, letter):
"""The TOC at the top of certain appendices gives us trouble since it
looks a *lot* like a sequence of headers. Remove it if present"""
fingerprints = set()
potential_toc = set()
for node in appendix.xpath("./HD[@SOURCE='HED']/following-sibling::*"):
parsed = parsed_title(tree_utils.get_node_text(node), letter)
if parsed:
# The headers may not match character-per-character. Only
# compare the parsed results.
fingerprint = tuple(parsed)
# Hit the real content
if fingerprint in fingerprints and node.tag == 'HD':
for el in potential_toc:
el.getparent().remove(el)
return
else:
fingerprints.add(fingerprint)
potential_toc.add(node)
elif node.tag != 'GPH': # Not a title and not a img => no TOC
return
def appendix_headers(node):
""" Retrieve Appendix/Supplement section HD, WHED, and RESERVED tags. """
return node.xpath('./RESERVED|./HD[@SOURCE="HED"]|./WHED')
def get_appendix_title(node):
""" Retrieve the first Appendix/Supplement title from its headers. """
return tree_utils.get_node_text(appendix_headers(node)[0])
_first_markers = [re.compile(r'[\)\.|,|;|-|—]\s*\(' + lvl[0] + r'\)')
for lvl in p_levels]
class AppendixProcessor(object):
"""Processing the appendix requires a lot of state to be carried in
between xml nodes. Use a class to wrap that state so we can
compartmentalize processing the various tags"""
# Placeholder text/headers have the label p1 or h1; use that as an
# identifier when determining which depth elements should be placed
filler_regex = re.compile(r"[ph]\d+")
def set_letter(self, appendix):
"""Find (and set) the appendix letter"""
for hd in appendix_headers(appendix):
text = tree_utils.get_node_text(hd)
if self.appendix_letter:
logger.warning("Found two appendix headers: %s and %s",
self.appendix_letter, text)
self.appendix_letter = grammar.headers.parseString(text).appendix
return self.appendix_letter
def hed(self, part, text):
"""HD with an HED source indicates the root of the appendix"""
n = Node(node_type=Node.APPENDIX, label=[part, self.appendix_letter],
title=text)
self.m_stack.push_last((0, n))
self.paragraph_counter = 0
self.depth = 0
def depth_from_ancestry(self, source_attr):
"""Subheaders without explicit depth markers (e.g. Part I) are
tricky. We look through their parents, trying to find a previous
header that shared its SOURCE level (the next node would also share
node level). If that doesn't work, find the last header and set
depth on higher (as the next node is an unseen level)."""
def not_known_depth_header(pair):
"""Hitting a know-depth header (see above) means we've gone too
far"""
_, parent = pair
return (not parent.title or
not title_label_pair(
parent.title, self.appendix_letter, self.part))
# Check if this SOURCE level matches a previous
for lvl, parent in takewhile(not_known_depth_header,
self.m_stack.lineage_with_level()):
if (parent.source_xml is not None and
parent.source_xml.attrib.get('SOURCE') == source_attr):
return lvl
# Second pass, search for any header; place self one lower
for lvl, parent in self.m_stack.lineage_with_level():
if parent.title:
pair = title_label_pair(
parent.title, self.appendix_letter, self.part)
if pair:
return pair[1]
else:
return lvl + 1
if not AppendixProcessor.filler_regex.match(parent.label[-1]):
return lvl + 1
def subheader(self, xml_node, text):
"""Each appendix may contain multiple subheaders. Some of these are
obviously labeled (e.g. A-3 or Part III) and others are headers
without a specific label (we give them the h + # id)"""
source = xml_node.attrib.get('SOURCE')
pair = title_label_pair(text, self.appendix_letter, self.part)
# Use the depth indicated in the title
if pair:
label, title_depth = pair
self.depth = title_depth - 1
n = Node(node_type=Node.APPENDIX, label=[label],
title=text)
# Look through parents to determine which level this should be
else:
self.header_count += 1
n = Node(node_type=Node.APPENDIX, title=text,
label=['h' + str(self.header_count)],
source_xml=xml_node)
self.depth = self.depth_from_ancestry(source)
self.m_stack.add(self.depth, n)
@staticmethod
def insert_dashes(xml_node, text):
""" If paragraph has a SOURCE attribute with a value of
FP-DASH it fills out with dashes, like Foo_____. """
mtext = text
if xml_node.get('SOURCE') == 'FP-DASH':
mtext = mtext + '_____'
return mtext
def paragraph_with_marker(self, text, tagged_text):
"""The paragraph has a marker, like (a) or a. etc."""
# To aid in determining collapsed paragraphs, replace any
# keyterms present
node_for_keyterms = Node(
text, node_type=Node.APPENDIX, tagged_text=tagged_text,
label=[initial_marker(text)[0]]
)
keyterm = KeyTerms.keyterm_in_node(node_for_keyterms)
if keyterm:
mtext = text.replace(keyterm, '.' * len(keyterm))
else:
mtext = text
for mtext in split_paragraph_text(mtext):
if keyterm: # still need the original text
mtext = mtext.replace('.' * len(keyterm), keyterm)
node = Node(mtext, node_type=Node.APPENDIX,
label=[initial_marker(mtext)[0]])
self.nodes.append(node)
def paragraph_no_marker(self, text):
"""The paragraph has no (a) or a. etc."""
self.paragraph_counter += 1
n = Node(text, node_type=Node.APPENDIX,
label=['p' + str(self.paragraph_counter)])
self.nodes.append(n)
def fence(self, xml_node, fence_type):
"""Use github-like fencing to indicate this is a note or code"""
self.paragraph_counter += 1
texts = ["```" + fence_type]
for child in xml_node:
texts.append(tree_utils.get_node_text(child).strip())
texts.append("```")
n = Node("\n".join(texts), node_type=Node.APPENDIX,
label=['p' + str(self.paragraph_counter)],
source_xml=xml_node)
self.nodes.append(n)
def depth_zero_finder(self, node):
"""Look back through all known nodes to see if this is a
continuation of a previous set of paragraph markers"""
for depth, prev_node in self.m_stack.lineage_with_level():
for typ in (markers.lower, markers.upper, markers.ints,
markers.roman):
if prev_node.label[-1] in typ and node.label[-1] in typ:
typ = list(typ)
prev_idx = typ.index(prev_node.label[-1])
current_idx = typ.index(node.label[-1])
if current_idx == prev_idx + 1:
return depth
return self.depth + 1
def end_group(self):
"""We've hit a header (or the end of the appendix), so take the
collected paragraphs and determine their depths and insert into the
heap accordingly"""
if self.nodes:
nodes = list(reversed(self.nodes))
marker_list = [n.label[-1] for n in self.nodes if not
AppendixProcessor.filler_regex.match(n.label[-1])]
if marker_list:
results = derive_depths(marker_list)
# currently no heuristics applied
depths = list(reversed(
[a.depth for a in results[0].assignment]))
else:
depths = []
depth_zero = None # relative for beginning of marker depth
self.depth += 1
while nodes:
node = nodes.pop()
if AppendixProcessor.filler_regex.match(node.label[-1]):
# Not a marker paragraph
self.m_stack.add(self.depth, node)
else:
depth = depths.pop()
# Match old behavior, placing marker paragraphs as
# children within non-marker paragraphs above
if depth_zero is None:
depth_zero = self.depth_zero_finder(node)
self.depth = depth_zero + depth
self.m_stack.add(self.depth, node)
self.nodes = []
def split_paragraph_text(text):
"""Split text into a root node and its children (if the text contains
collapsed markers"""
marker_positions = []
for marker in _first_markers:
# text.index('(') to skip over the periods, spaces, etc.
marker_positions.extend(text.index('(', m.start())
for m in marker.finditer(text))
# Remove any citations
citations = internal_citations(text, require_marker=True)
marker_positions = [pos for pos in marker_positions
if not any(cit.start <= pos and cit.end >= pos
for cit in citations)]
texts = []
# Drop Zeros, add the end
break_points = [p for p in marker_positions if p] + [len(text)]
last_pos = 0
for pos in break_points:
texts.append(text[last_pos:pos])
last_pos = pos
return texts
@matchers.match_tag('APPENDIX')
def title_label_pair(text, appendix_letter, reg_part):
"""Return the label + depth as indicated by a title"""
pair = None
match = parsed_title(text, appendix_letter)
if match:
# May need to include the parenthesized letter(s)
has_parens = (match.paren_upper or match.paren_lower or
match.paren_digit or match.markerless_upper)
if has_parens:
pair = (''.join(match), 2)
elif match.a1:
pair = (match.a1, 2)
elif match.aI:
pair = (match.aI, 2)
if (pair is not None
and reg_part in APPENDIX_IGNORE_SUBHEADER_LABEL
and pair[0] in APPENDIX_IGNORE_SUBHEADER_LABEL[reg_part][
appendix_letter]):
logger.warning("Ignoring subheader label %s of appendix %s",
pair[0], appendix_letter)
pair = None
return pair
_parser = QuickSearchable(
grammar.paren_upper | grammar.paren_lower | grammar.paren_digit |
grammar.period_upper | grammar.period_digit | grammar.period_lower)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
18931,
198,
11748,
302,
198,
6738,
340,
861,
10141,
1330,
1011,
4514,
198,
198,
6738,
279,
4... | 2.218458 | 5,461 |
from django.apps import apps
| [
6738,
42625,
14208,
13,
18211,
1330,
6725,
628,
198
] | 3.444444 | 9 |
from flask import current_app as app
from core.utils import Utils
from main.db import MongoDB
from main.services.blacklist_helpers import BlacklistHelper
class UserService:
""" doc string for UserService """
def add_user(self, user_obj):
""" user_obj - user object """
user = self.mongo.find(self.collection, {"email": user_obj["email"]})
if not user:
return self.mongo.save(self.collection, user_obj)
else:
return f'User with {user_obj["email"]} already existed.'
def get_user(self, user_id):
""" Get User profile by id. ex _id: """
res = self.mongo.find_by_id(self.collection, user_id)
if res:
del res["password"]
return ("success", res, "ok", 200)
else:
return ("error", [], "Something went wrong.", 400)
def login(self, email):
""" email as input """
user = self.mongo.find(self.collection, {"email": email})
if user:
user = user[0]
return user
else:
return None
| [
6738,
42903,
1330,
1459,
62,
1324,
355,
598,
198,
198,
6738,
4755,
13,
26791,
1330,
7273,
4487,
198,
6738,
1388,
13,
9945,
1330,
42591,
11012,
198,
6738,
1388,
13,
30416,
13,
13424,
4868,
62,
16794,
364,
1330,
2619,
4868,
47429,
628,
... | 2.321888 | 466 |
# -*- coding: utf-8 -*-
import unittest
from aiostripe.error import StripeError
from aiostripe.test.helper import StripeUnitTestCase
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
257,
72,
455,
380,
431,
13,
18224,
1330,
26137,
431,
12331,
198,
6738,
257,
72,
455,
380,
431,
13,
9288,
13,
2978,
525,
13... | 2.466667 | 75 |
import pytest
| [
11748,
12972,
9288,
628,
628,
198
] | 3 | 6 |
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.network import GNet, DNet
from loss import LeastSquare
| [
11748,
25064,
201,
198,
11748,
28686,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
6738,
2746,
13,
27349,
1330,
402,
7934,
11,
360,
7934,
20... | 3.075472 | 53 |
# proxy module
from traitsui.editors.instance_editor import *
| [
2,
15741,
8265,
198,
6738,
12796,
9019,
13,
276,
6742,
13,
39098,
62,
35352,
1330,
1635,
198
] | 3.647059 | 17 |
import subprocess
CLIENT_IDENTIFIER = "T" + "ib" + "ia - "
| [
11748,
850,
14681,
198,
198,
5097,
28495,
62,
25256,
5064,
38311,
796,
366,
51,
1,
1343,
366,
571,
1,
1343,
366,
544,
532,
366,
628,
628,
198
] | 2.37037 | 27 |
from detectron2.evaluation import COCOEvaluator, inference_on_dataset,print_csv_format
import os.path
import json
import cv2
import numpy as np
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog, DatasetMapper
from detectron2.data import transforms as T
from detectron2.structures import BoxMode
from detectron2.data import get_latim_dicts
import detectron2.utils.comm as comm
import logging
logger = logging.getLogger("detectron2")
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
import argparse
from detectron2.config import get_cfg
from detectron2.data import (
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data import MetadataCatalog, DatasetCatalog, DatasetMapper
from detectron2.modeling import build_model
from collections import OrderedDict
import random
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from detectron2.utils.visualizer import ColorMode
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.data.datasets import register_coco_instances
from shutil import rmtree
from tqdm import tqdm
import sys
sys.path.append("../")
from detectron2.data import gen_latim_dataset
from detectron2.checkpoint import DetectionCheckpointer
if __name__ == "__main__":
args = get_parser().parse_args()
cfg = setup_cfg(args)
gen_latim_dataset()
dir_name= cfg.MODEL.WEIGHTS.split(".")[0]
if os.path.exists(dir_name):
rmtree(dir_name)
os.mkdir(dir_name)
gt_dir=dir_name+"/vis_gt/"
os.mkdir(gt_dir)
pred_dir=dir_name+"/vis_pred/"
os.mkdir(pred_dir)
femur_tool_metadata = MetadataCatalog.get("for_detectron/train")
dataset_dicts = get_latim_dicts("dataset/train")
predictor = DefaultPredictor(cfg)
for d in tqdm(dataset_dicts):
im = cv2.imread(d["file_name"])
name = d["file_name"].split("/")[-1]
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1], metadata=femur_tool_metadata)
out_pred = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite(pred_dir+name,out_pred.get_image()[:, :, ::-1])
v = Visualizer(im[:, :, ::-1], metadata=femur_tool_metadata)
out_gt = v.draw_dataset_dict(d)
cv2.imwrite(gt_dir+name,out_gt.get_image()[:, :, ::-1])
#
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
do_test(cfg, model)
| [
6738,
4886,
1313,
17,
13,
18206,
2288,
1330,
327,
4503,
27799,
2100,
84,
1352,
11,
32278,
62,
261,
62,
19608,
292,
316,
11,
4798,
62,
40664,
62,
18982,
198,
11748,
28686,
13,
6978,
198,
11748,
33918,
198,
11748,
269,
85,
17,
198,
11... | 2.567243 | 1,331 |
# Search key
if __name__ == '__main__':
main()
| [
2,
11140,
1994,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
1388,
3419,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,... | 1.535714 | 56 |
#!/usr/bin/env python3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for sqlite_cherry_picker.py.
These tests should be getting picked up by the PRESUBMIT.py in the parent
directory.
"""
from pathlib import Path
import io
import os
import shutil
import tempfile
import unittest
import sqlite_cherry_picker
# pylint: disable=W0212,C0103,C0115,C0116
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
12131,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198... | 3.011429 | 175 |
#!/usr/bin/env python
"""
___ __ __
/ _ \___ / /__ / / __ _ ___ ____
/ // / _ \/ / _ \/ _ \/ ' \/ _ `/ _ \
/____/\___/_/ .__/_//_/_/_/_/\_,_/_//_/
/_/
The MIT License (MIT)
Copyright (c) 2015 Dolphman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup
setup(name='pyns',
install_requires=["nationstates>=1.1.33.63"],
version="0.0.0.1",
description='Nationstates for Python',
author='Joshua Walters',
author_email='therealdolphman@gmail.com',
url='https://github.com/Dolphman/wethepeople',
packages=['pyns', 'pyns.core'],
keywords=['Nationstates for Python', 'Python API', 'api'],
classifiers=["License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Utilities",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"],
license="MIT"
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
628,
220,
220,
46444,
220,
220,
220,
220,
220,
220,
11593,
220,
220,
220,
220,
11593,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
19... | 2.578548 | 923 |
import os
import io
import tensorflow as tf
import standard_fields as fields
import cv2
import scipy
import dataset_util
flags = tf.app.flags
flags.DEFINE_string('output_path', '/share/zhui/mnt/train.tfrecord', 'tfrecord filename')
flags.DEFINE_string('tags_file_path', '/share/zhui/mnt/ramdisk/max/imlist_filted.txt', 'tags file file')
FLAGS = flags.FLAGS
if __name__ == '__main__':
tf.app.run()
| [
11748,
28686,
220,
201,
198,
11748,
33245,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
220,
201,
198,
11748,
3210,
62,
25747,
355,
7032,
201,
198,
11748,
269,
85,
17,
220,
201,
198,
11748,
629,
541,
88,
201,
198,
11748,
27039,
62,... | 2.38172 | 186 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import tensorflow as tf
from .utils import positional_encoding
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
4818,
8079,
... | 2.582638 | 599 |
#!python
import os, sys, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from operator import itemgetter
from utils import *
from classify import Classify
sys.path.append(INITIAL_UTILS)
from ruffus import *
from create_summary import *
_readlibs = []
_skipsteps = []
_cls = None
_settings = Settings()
openbrowser = False
if os.environ.get('DISPLAY') != None:
openbrowser = True
@follows(Classify)
@posttask(touch_file("%s/Logs/postprocess.ok"%(_settings.rundir)))
@files("%s/Assemble/out/%s.asm.contig"%(_settings.rundir,_settings.PREFIX),"%s/Postprocess/%s.scf.fa"%(_settings.rundir,_settings.PREFIX))
| [
2,
0,
29412,
198,
198,
11748,
28686,
11,
25064,
11,
4731,
11,
640,
11,
7308,
6535,
28820,
18497,
11,
651,
8738,
11,
302,
11,
850,
14681,
11,
3992,
40259,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
198,
6738,
3384,
4487,
1330,
163... | 2.692308 | 234 |
import os
ANSWERS_MODEL_FOLDER = os.path.sep.join(['Projects', 'Project 3'])
ANSWERS_OUTPUT_PATH = os.path.sep.join(['gradetools', 'project_3', 'answers.json'])
NUM_ITERATIONS = 10000
TOLERANCES = dict(
mv_debt=800000000,
mv_equity=1,
wacc_mean=0.005,
wacc_std=0.002,
other=0.00001
)
DATA_DIR = os.path.sep.join(['Projects', 'Project 3'])
DATA_FILE_NAMES = [
'SP500 Prices.xlsx',
'WMT Balance Sheet.xlsx',
'WMT Debt Details.xls',
'WMT Income Statement.xlsx',
'WMT Prices.xlsx'
]
DATA_FILES = [os.path.join(DATA_DIR, file) for file in DATA_FILE_NAMES]
INPUT_DICTS = [
dict(
model=dict(
bond_years=15,
bond_coupon=0.0525,
bond_price=130.58,
bond_par=100,
risk_free=0.005,
price=119.51,
shares_outstanding=2850000000,
libor_rate=0.0196,
),
sim=dict(
beta_std=0.2,
mkt_ret_std=0.03,
bond_price_std=30,
tax_rate_std=0.05,
)
),
dict(
model=dict(
bond_years=10,
bond_coupon=0.0325,
bond_price=120.58,
bond_par=150,
risk_free=0.01,
price=87.51,
shares_outstanding=1250000000,
libor_rate=0.025,
),
sim=dict(
beta_std=0.1,
mkt_ret_std=0.02,
bond_price_std=20,
tax_rate_std=0.025,
)
),
]
for inp_dict in INPUT_DICTS:
inp_dict['sim']['num_iter'] = NUM_ITERATIONS
# TODO [#6]: set locations of excel inputs and outputs
EXCEL_INPUT_LOCATIONS = dict(
bond_years='B3',
bond_coupon='B4',
bond_price='B5',
bond_par='B6',
risk_free='B7',
price='B8',
shares_outstanding='B9',
libor_rate='B10',
num_iter='B13',
beta_std='B15',
mkt_ret_std='B16',
bond_price_std='B17',
tax_rate_std='B18',
)
EXCEL_OUTPUT_LOCATIONS = {
'wacc': 'B24',
'coe': 'B27',
'mv_equity': 'B28',
'pretax_cost_of_debt': 'B29',
'mv_debt': 'B31',
'aftertax_cost_of_debt': 'B30',
'mc_table': 'E26',
}
| [
11748,
28686,
198,
198,
15037,
54,
4877,
62,
33365,
3698,
62,
37,
3535,
14418,
796,
28686,
13,
6978,
13,
325,
79,
13,
22179,
7,
17816,
16775,
82,
3256,
705,
16775,
513,
6,
12962,
198,
15037,
54,
4877,
62,
2606,
7250,
3843,
62,
34219... | 1.744715 | 1,230 |
"""
Base view
"""
import uuid
import base64
from ..views import DEFAULT_LIBRARY_NAME_PREFIX, DEFAULT_LIBRARY_DESCRIPTION, \
USER_ID_KEYWORD
from flask import request, current_app
from flask_restful import Resource
from ..models import db, User, Library, Permissions
from ..client import client
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from ..biblib_exceptions import BackendIntegrityError, PermissionDeniedError
class BaseView(Resource):
"""
A base view class to keep a single version of common functions used between
all of the views.
"""
@staticmethod
def helper_uuid_to_slug(library_uuid):
"""
Convert a UUID to a slug
See a discussion about the details here:
http://stackoverflow.com/questions/12270852/
convert-uuid-32-character-hex-string-into-a-
youtube-style-short-id-and-back
:param library_uuid: unique identifier for the library
:return: library_slug: base64 URL safe slug
"""
library_slug = base64.urlsafe_b64encode(library_uuid.bytes)
library_slug = library_slug.rstrip('=\n').replace('/', '_')
current_app.logger.info('Converted uuid: {0} to slug: {1}'
.format(library_uuid, library_slug))
return library_slug
@staticmethod
def helper_slug_to_uuid(library_slug):
"""
Convert a slug to a UUID
See a discussion about the details here:
http://stackoverflow.com/questions/12270852/
convert-uuid-32-character-hex-string-into-a-
youtube-style-short-id-and-back
Keep in mind that base64 only works on bytes, and so they have to be
encoded in ASCII. Flask uses unicode, and so you must modify the
encoding before passing it to base64. This is fine, given we output
all our encoded URLs for libraries as strings encoded in ASCII and do
not accept any unicode characters.
:param library_slug: base64 URL safe slug
:return: library_uuid: unique identifier for the library
"""
library_uuid = (library_slug + '==').replace('_', '/')
library_uuid = library_uuid.encode('ascii')
library_uuid = uuid.UUID(bytes=base64.urlsafe_b64decode(library_uuid))
current_app.logger.info('Converted slug: {0} to uuid: {1}'
.format(library_slug, library_uuid))
return str(library_uuid)
@staticmethod
def helper_get_user_id():
"""
Helper function: get the user id from the header, otherwise raise
a key error exception
:return: unique API user ID
"""
try:
user = request.headers[USER_ID_KEYWORD]
if user.isdigit():
user = int(user)
return user
except KeyError:
current_app.logger.error('No username passed')
raise
@staticmethod
def helper_create_user(absolute_uid):
"""
Creates a user in the database with a UID from the API
:param absolute_uid: UID from the API
:return: SQLAlchemy User instance
"""
try:
user = User(absolute_uid=absolute_uid)
db.session.add(user)
db.session.commit()
current_app.logger.info('Successfully created user: {0} [API] as '
'{1} [Microservice]'
.format(absolute_uid, user.id))
return user
except IntegrityError as error:
current_app.logger.error('IntegrityError. User: {0:d} was not'
'added. Full traceback: {1}'
.format(absolute_uid, error))
raise
@staticmethod
def helper_user_exists(absolute_uid):
"""
Checks if a use exists before it would attempt to create one
:param absolute_uid: UID from the API
:return: boolean for if the user exists
"""
user_count = User.query.filter(User.absolute_uid == absolute_uid).all()
user_count = len(user_count)
if user_count == 1:
current_app.logger.info('User exists in database: {0} [API]'
.format(absolute_uid))
return True
elif user_count == 0:
current_app.logger.warning('User does not exist in database: {0} '
'[API]'.format(absolute_uid))
return False
@staticmethod
def helper_absolute_uid_to_service_uid(absolute_uid):
"""
Convert the API UID to the BibLib service ID.
If the user does not exist in the database, first create a user.
:param absolute_uid: API UID
:return: BibLib service ID
"""
if not BaseView.helper_user_exists(absolute_uid=absolute_uid):
user = BaseView.helper_create_user(absolute_uid=absolute_uid)
else:
user = User.query.filter(User.absolute_uid == absolute_uid).one()
current_app.logger.info('User found: {0} -> {1}'
.format(absolute_uid, user.id))
return user.id
@staticmethod
def helper_email_to_api_uid(permission_data):
"""
A proxy to the user/e-mail resolver service. Passes on any errors from
the API.
:param permission_data: dictionary that should contain an e-mail key
:return: int of the user id
"""
try:
service = '{api}/{email}'.format(
api=current_app.config['BIBLIB_USER_EMAIL_ADSWS_API_URL'],
email=permission_data['email']
)
current_app.logger.info('Obtaining UID of user: {0}'
.format(permission_data['email']))
response = client().get(
service
)
except KeyError as error:
current_app.logger.error('No user email provided. [{0}]'
.format(error))
raise
if response.status_code == 200:
return int(response.json()['id'])
elif response.status_code == 404:
raise NoResultFound('API does not have this user')
else:
raise Exception('Unknown internal error')
@staticmethod
def helper_access_allowed(service_uid, library_id, access_type):
"""
Determines if the given user has permissions to look at the content
of a library.
:param service_uid: the user ID within this microservice
:param library_id: the unique ID of the library
:param access_type: list of access types to check
:return: boolean, access (True), no access (False)
"""
try:
permissions = Permissions.query.filter(
Permissions.library_id == library_id,
Permissions.user_id == service_uid
).one()
return getattr(permissions, access_type)
except NoResultFound as error:
current_app.logger.error('No permissions for '
'user: {0}, library: {1}, permission: {2}'
' [{3}]'.format(service_uid, library_id,
access_type, error))
return False
@staticmethod
def helper_library_exists(library_id):
"""
Helper function that checks if a library exists in the database or not
by catching the raise and returning a True/False statement.
:param library_id: the unique ID of the library
:return: bool for exists (True) or does not (False)
"""
try:
Library.query.filter(Library.id == library_id).one()
return True
except NoResultFound:
return False
@staticmethod
def helper_validate_library_data(service_uid, library_data):
"""
Validates the library data to ensure the user does not give empty
content for the title and description.
:param service_uid: the user ID within this microservice
:param library_data: content needed to create a library
:return: validated name and description
"""
_name = library_data.get('name') or DEFAULT_LIBRARY_NAME_PREFIX
_description = library_data.get('description') or \
DEFAULT_LIBRARY_DESCRIPTION
current_app.logger.info('Creating library for user_service: {0:d}, '
'with properties: {1}'
.format(service_uid, library_data))
# We want to ensure that the users have unique library names. However,
# it should be possible that they have access to other libraries from
# other people, that have the same name
library_names = \
[i.library.name for i in
Permissions.query.filter(Permissions.user_id == service_uid,
Permissions.owner == True).all()]
matches = [name for name in library_names if name == _name]
if matches:
current_app.logger.error('Name supplied for the library already '
'exists: "{0}" ["{1}"]'.format(_name,
matches))
raise BackendIntegrityError('Library name already exists.')
if _name == DEFAULT_LIBRARY_NAME_PREFIX:
default_names = [lib_name for lib_name
in library_names
if DEFAULT_LIBRARY_NAME_PREFIX
in lib_name]
_extension = len(default_names) + 1
_name = '{0} {1}'.format(_name,
_extension)
library_out = {}
for key in library_data:
library_out[key] = library_data[key]
library_out['name'] = _name
library_out['description'] = _description
return library_out
| [
37811,
198,
14881,
1570,
198,
37811,
198,
11748,
334,
27112,
198,
11748,
2779,
2414,
198,
198,
6738,
11485,
33571,
1330,
5550,
38865,
62,
40347,
49,
13153,
62,
20608,
62,
47,
31688,
10426,
11,
5550,
38865,
62,
40347,
49,
13153,
62,
3091... | 2.161489 | 4,700 |
#!/usr/bin/env python
# coding=utf-8
import os
import hashlib
import logging
from queue import Queue
import requests
from gevent import monkey, sleep
from gevent.pool import Pool
# Do all of the default monkey patching (calls every other applicable
# function in this module).
monkey.patch_all()
CONFIG = Config()
if __name__ == "__main__":
try:
downloader = Downloader()
downloader.run()
except KeyboardInterrupt:
print("You have canceled all jobs.")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
28686,
198,
11748,
12234,
8019,
198,
11748,
18931,
198,
6738,
16834,
1330,
4670,
518,
198,
198,
11748,
7007,
198,
6738,
4903,
1151,
1330,
... | 3.030675 | 163 |
from flask import Flask
from .config import config
def create_app(env: str=None) -> Flask:
"""创建flask应用对象
Args:
env: 运行环境:development/testing/production
"""
if not env:
env = 'default'
app = Flask(__name__)
app.config.from_object(config[env])
config[env].init_app(app)
return app
| [
6738,
42903,
1330,
46947,
198,
198,
6738,
764,
11250,
1330,
4566,
628,
198,
4299,
2251,
62,
1324,
7,
24330,
25,
965,
28,
14202,
8,
4613,
46947,
25,
198,
220,
220,
220,
37227,
26344,
249,
161,
119,
118,
2704,
2093,
41753,
242,
18796,
... | 2.08805 | 159 |
import sys
import os
import time
import vk_api
import pandas as pd
from tqdm import tnrange, tqdm_notebook, tqdm
from time import sleep
import django
# указывается адрес проекта
VKData = '/home/rust/Documents/Azat/vk_crawler/code/VKData'
sys.path.insert(0, VKData)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "VKData.settings.py")
django.setup()
from getData.models import getData
from getData.models import MutualFriends
login, password = '$$$', '$$$'
vk_session = vk_api.VkApi(login, password)
try:
vk_session.auth()
except vk_api.AuthError as error_msg:
print(error_msg)
vk = vk_session.get_api()
getData.objects.count() # размер БД
vk_ids = getData.objects.values('vk_id', 'id_crm', 'timestamp_friends')
total = getData.objects.count()
vk_set_all = set()
for i in tqdm(vk_ids, desc='Making set'):
vk_set_all.add(i['vk_id'])
print(len(vk_set_all))
log = 0 # вывод логов в консоль. Если "0" не выводить логи. "1" выводить логи.
error_store = { 'id_crm': [],
'error': [], }
vk_objects = getData.objects.all()
if log == 1:
print('Total records in DB ',total)
error_store = {} # словарь для хранения возникших ошибок
for i in tqdm(vk_objects, desc='Total progress'): #итерартор по записям БД с испольованием плагина на статус бар
if i.timestamp_friends is None:
if log == 1:
print('Is data collected:',i.timestamp_friends)
if log == 1:
print('Cheking vk_id ---->',i.vk_id)
try:
res = vk.friends.get(user_id = i.vk_id) # запрос к vk api
if log == 1:
print('List of all friends -------------------------------------------------------------------->',res)
vk_set_to = set(res['items']) # множество друзей из запроса к vk api
res_set = vk_set_all.intersection(vk_set_to) # пересечения множества друзей с множеством из БД
if log == 1:
print('------------------------------------------------------------------------------------------')
print('resulted set of mutual friends --->', res_set)
if res_set is not None: # проверка на наличие общих друзей
#for r in tqdm(res_set, desc='Friends gathering', leave = False): # итератор по общим друзьям
for r in res_set: # итератор по общим друзьям
if log == 1:
print('vk_id added in DB ----->',r)
cur = getData.objects.filter(vk_id = r) # находти нужный id_crm в БД
# записываем данные в БД
gd, is_create = MutualFriends.objects.get_or_create(friend_vk_id=r, vk_id=i.vk_id,
id_crm_id = i.id_crm,
friend_id_crm_id = cur.values('id_crm')[0]['id_crm'])
gd.save()
#gd2, is_create2 = getData.objects.get_or_create(timestamp_friends = time.asctime( time.localtime(time.time()) ) )
i.timestamp_friends = time.asctime( time.localtime(time.time()) )
i.save()
if log == 1:
print('___________________________________________________________________________________________________')
print(' ')
except Exception as msg:
# появляющиеся ошибки записываем в словарь
#error_store['id_crm'].append(i.id_crm)
#error_store['error'].append(msg)
if log == 1:
print(msg, i.id_crm)
else:
if log == 1:
print('Is data collected:',i.timestamp_friends)
pass
| [
11748,
25064,
198,
11748,
28686,
220,
198,
11748,
640,
198,
11748,
410,
74,
62,
15042,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
256,
80,
36020,
1330,
256,
77,
9521,
11,
256,
80,
36020,
62,
11295,
2070,
11,
256,
80,
36020,
19... | 1.732093 | 2,150 |
import os
import time
from pathlib import Path
from uuid import uuid4
from git import Repo
from github import Github
org = None
def _get_openpecha_data_org(org_name=None, token=None):
"""OpenPecha github org singleton."""
global org
if org is None:
if not token:
token = os.environ.get("GITHUB_TOKEN")
print(token)
if not org_name:
org_name = os.environ["OPENPECHA_DATA_GITHUB_ORG"]
print(org_name)
g = Github(token)
org = g.get_organization(org_name)
return org
| [
11748,
28686,
198,
11748,
640,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
6738,
17606,
1330,
1432,
78,
198,
6738,
33084,
1330,
38994,
198,
198,
2398,
796,
6045,
628,
198,
4299,
4808,
1136,... | 2.193916 | 263 |
# The MIT License (MIT)
#
# Copyright (C) 2015 - Antoine Busque <abusque@efficios.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import stats
from .analysis import Analysis, PeriodData
| [
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
34,
8,
1853,
532,
3738,
42722,
5869,
4188,
1279,
46844,
4188,
31,
24531,
4267,
13,
785,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
... | 3.798742 | 318 |
import io
from PIL import Image
def gen_image(name='test.png'):
"""Возвращает файл изображения."""
file = io.BytesIO()
image = Image.new('RGBA', size=(100, 100), color=(155, 0, 0))
image.save(file, 'png')
file.name = name
file.seek(0)
return file
| [
11748,
33245,
198,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
4299,
2429,
62,
9060,
7,
3672,
11639,
9288,
13,
11134,
6,
2599,
198,
220,
220,
220,
37227,
140,
240,
25443,
115,
38857,
21169,
16142,
141,
231,
16142,
16843,
20375,
220,
1... | 1.992857 | 140 |
import ctypes
from ads.nonzerobasedarray import NonzeroBasedArray
| [
201,
198,
11748,
269,
19199,
201,
198,
6738,
9011,
13,
13159,
9107,
672,
839,
18747,
1330,
8504,
22570,
15001,
19182,
201,
198,
201,
198
] | 3 | 24 |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QSize, pyqtSignal, pyqtSlot
from PyQt5.QtCore import pyqtSignal
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1195,
19578,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
33734,
11,
1195,
10699,
11,
12972,
39568... | 2.279412 | 68 |
import pygame
vec = pygame.math.Vector2
| [
11748,
12972,
6057,
198,
35138,
796,
12972,
6057,
13,
11018,
13,
38469,
17,
628
] | 2.928571 | 14 |
from office365.runtime.client_value import ClientValue
class ChangeNotificationCollection(ClientValue):
"""Represents a collection of resource change notifications sent to the subscriber."""
| [
6738,
2607,
24760,
13,
43282,
13,
16366,
62,
8367,
1330,
20985,
11395,
628,
198,
4871,
9794,
3673,
2649,
36307,
7,
11792,
11395,
2599,
198,
220,
220,
220,
37227,
6207,
6629,
257,
4947,
286,
8271,
1487,
19605,
1908,
284,
262,
32944,
526,... | 4.581395 | 43 |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 07:34:12 2020
@author: cesarzosa
"""
import pandas as pd
path_save = "./data/artwork.pickle"
df = pd.read_pickle(path_save)
serie_artistas_dup = df['artist']
artistas = pd.unique(serie_artistas_dup)
print(type(artistas)) # numpy array
print(artistas.size)
print(len(artistas))
# Filtro la Serie en una columna y filtro los valores requeridos
blake = df['artist'] == 'Blake, William' #Serie
print(blake.value_counts()) #Contar Datos
df_blake = df[blake] #DataFrames
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2447,
1367,
8753,
25,
2682,
25,
1065,
12131,
198,
198,
31,
9800,
25,
269,
18964,
89,
8546,
198,
37811,
198,
198,
11748,
19798,
292,
355,
... | 2.426606 | 218 |
"""This module defines shortcuts for generating WCS instances and working
with them. The bounding boxes and shapes used in this module all use
the same ordering as WCS, i.e. column major (so {ra,dec} rather than
{dec,ra}). Coordinates are assigned to pixel centers, as WCS does natively,
but bounding boxes include the whole pixels, not just their centers, which
is where the 0.5 stuff comes from."""
import numpy as np
from astropy.wcs import WCS
# The origin argument used in the wcs pix<->world routines seems to
# have to be 1 rather than the 0 one would expect. For example,
# if wcs is CAR(crval=(0,0),crpix=(0,0),cdelt=(1,1)), then
# pix2world(0,0,1) is (0,0) while pix2world(0,0,0) is (-1,-1).
#
# No! the problem is that everythin in the fits header counts from 1,
# so the default crpix should be (1,1), not (0,0). With
# CAR(crval(0,0),crpix(1,1),cdelt(1,1)) we get
# pix2world(1,1,1) = (0,0) and pix2world(0,0,0) = (0,0)
# Useful stuff to be able to do:
# * Create a wcs from (point,res)
# * Create a wcs from (box,res)
# * Create a wcs from (box,shape)
# * Create a wcs from (point,res,shape)
# Can support this by taking arguments:
# pos: point[2] or box[2,2], mandatory
# res: num or [2], optional
# shape: [2], optional
# In cases where shape is not specified, the implied
# shape can be recovered from the wcs and a box by computing
# the pixel coordinates of the corners. So we don't need to return
# it.
# 1. Construct wcs from box, res (and return shape?)
# 2. Construct wcs from box, shape
# 3. Construct wcs from point, res (this is the most primitive version)
deg2rad = np.pi/180
rad2deg = 1/deg2rad
def describe(wcs):
"""Since astropy.wcs.WCS objects do not have a useful
str implementation, this function provides a relpacement."""
sys = wcs.wcs.ctype[0][-3:].lower()
n = wcs.naxis
fields = ("cdelt:["+",".join(["%.4g"]*n)+"],crval:["+",".join(["%.4g"]*n)+"],crpix:["+",".join(["%.4g"]*n)+"]") % (tuple(wcs.wcs.cdelt) + tuple(wcs.wcs.crval) + tuple(wcs.wcs.crpix))
pv = wcs.wcs.get_pv()
for p in pv:
fields += ",pv[%d,%d]=%.3g" % p
return "%s:{%s}" % (sys, fields)
# Add this to all WCSes in this class
WCS.__repr__ = describe
def is_compatible(wcs1, wcs2, tol=1e-3):
"""Checks whether two world coordinate systems represent
(shifted) versions of the same pixelizations, such that
every pixel center in wcs1 correspond to a pixel center in
wcs2. For now, they also have to have the pixels going
in the same direction."""
h1 = wcs1.to_header()
h2 = wcs2.to_header()
for key in h1:
if key.startswith("CRVAL") or key.startswith("CRPIX") or key.startswith("CDELT"): continue
if key not in h2 or h2[key] != h1[key]: return False
if np.max(np.abs(wcs1.wcs.cdelt-wcs2.wcs.cdelt))/np.min(np.abs(wcs1.wcs.cdelt)) > tol: return False
crdelt = wcs1.wcs.crval - wcs2.wcs.crval
cpdelt = wcs1.wcs.crpix - wcs2.wcs.crpix
subpix = (crdelt/wcs1.wcs.cdelt - cpdelt + 0.5)%1-0.5
if np.max(np.abs(subpix)) > tol: return False
return True
def is_plain(wcs):
"""Determines whether the given wcs represents plain, non-specific,
non-wrapping coordinates or some angular coordiante system."""
return wcs.wcs.ctype[0] == ""
def scale(wcs, scale=1, rowmajor=False, corner=False):
"""Scales the linear pixel density of a wcs by the given factor, which can be specified
per axis. This is the same as dividing the pixel size by the same number."""
scale = np.zeros(2)+scale
if rowmajor: scale = scale[::-1]
wcs = wcs.deepcopy()
if not corner:
wcs.wcs.crpix -= 0.5
wcs.wcs.crpix *= scale
wcs.wcs.cdelt /= scale
if not corner:
wcs.wcs.crpix += 0.5
return wcs
# I need to update this to work better with full-sky stuff.
# Should be easy to construct something that's part of a
# clenshaw-curtis or fejer sky.
def plain(pos, res=None, shape=None, rowmajor=False, ref=None):
"""Set up a plain coordinate system (non-cyclical)"""
pos, res, shape, mid = validate(pos, res, shape, rowmajor)
w = WCS(naxis=2)
w.wcs.crval = mid
if ref is "standard": ref = None
return finalize(w, pos, res, shape, ref=ref)
def car(pos, res=None, shape=None, rowmajor=False, ref=None):
"""Set up a plate carree system. See the build function for details."""
pos, res, shape, mid = validate(pos, res, shape, rowmajor)
w = WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.crval = np.array([mid[0],0])
if ref is "standard": ref = (0,0)
return finalize(w, pos, res, shape, ref=ref)
def cea(pos, res=None, shape=None, rowmajor=False, lam=None, ref=None):
"""Set up a cylindrical equal area system. See the build function for details."""
pos, res, shape, mid = validate(pos, res, shape, rowmajor)
if lam is None:
lam = np.cos(mid[1]*deg2rad)**2
w = WCS(naxis=2)
w.wcs.ctype = ["RA---CEA", "DEC--CEA"]
w.wcs.set_pv([(2,1,lam)])
w.wcs.crval = np.array([mid[0],0])
if ref is "standard": ref = (0,0)
return finalize(w, pos, res, shape, ref=ref)
def zea(pos, res=None, shape=None, rowmajor=False, ref=None):
"""Setups up an oblate Lambert's azimuthal equal area system.
See the build function for details. Don't use this if you want
a polar projection."""
pos, res, shape, mid = validate(pos, res, shape, rowmajor)
w = WCS(naxis=2)
w.wcs.ctype = ["RA---ZEA", "DEC--ZEA"]
w.wcs.crval = mid
if ref is "standard": ref = None
return finalize(w, pos, res, shape, ref=ref)
# The airy distribution is a bit different, since is needs to
# know the size of the patch.
def air(pos, res=None, shape=None, rowmajor=False, rad=None, ref=None):
"""Setups up an Airy system. See the build function for details."""
pos, res, shape, mid = validate(pos, res, shape, rowmajor)
if rad is None:
if pos.ndim != 2:
raise ValueError("Airy requires either rad or pos[2,2]")
w = angdist(mid[0]*deg2rad,pos[0,1]*deg2rad,mid[0]*deg2rad,pos[1,1]*deg2rad)*rad2deg
h = angdist(pos[0,0]*deg2rad,mid[1]*deg2rad,pos[1,0]*deg2rad,mid[1]*deg2rad)*rad2deg
rad = (w+h)/4
w = WCS(naxis=2)
w.wcs.ctype = ["RA---AIR","DEC--AIR"]
w.wcs.set_pv([(2,1,90-rad)])
if ref is "standard": ref = None
return finalize(w, pos, res, shape, ref=ref)
def tan(pos, res=None, shape=None, rowmajor=False, ref=None):
"""Set up a plate carree system. See the build function for details."""
pos, res, shape, mid = validate(pos, res, shape, rowmajor)
w = WCS(naxis=2)
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = np.array([mid[0],0])
if ref is "standard": ref = None
return finalize(w, pos, res, shape, ref=ref)
systems = {"car": car, "cea": cea, "air": air, "zea": zea, "tan": tan, "gnom": tan, "plain": plain }
def build(pos, res=None, shape=None, rowmajor=False, system="cea", ref=None, **kwargs):
"""Set up the WCS system named by the "system" argument.
pos can be either a [2] center position or a [{from,to},2]
bounding box. At least one of res or shape must be specified.
If res is specified, it must either be a number, in
which the same resolution is used in each direction,
or [2]. If shape is specified, it must be [2]. All angles
are given in degrees."""
return systems[system.lower()](pos, res, shape, rowmajor, ref=ref, **kwargs)
def finalize(w, pos, res, shape, ref=None):
"""Common logic for the various wcs builders. Fills in the reference
pixel and resolution."""
w.wcs.crpix = [1,1]
if res is None:
# Find the resolution that gives our box the required extent.
w.wcs.cdelt = [1,1]
corners = w.wcs_world2pix(pos,1)
w.wcs.cdelt *= (corners[1]-corners[0])/shape
else:
w.wcs.cdelt = res
if pos.ndim == 2: w.wcs.cdelt[pos[1]<pos[0]] *= -1
if pos.ndim == 1:
if shape is not None:
# Place pixel origin at corner of shape centered on crval
off = w.wcs_world2pix(pos[None],0)[0]
w.wcs.crpix = np.array(shape)/2.0+0.5 - off
else:
# Make pos[0] the corner of the (0,0) pixel (counting from 0 for simplicity)
off = w.wcs_world2pix(pos[0,None],0)[0]+0.5
w.wcs.crpix -= off
if ref is not None:
# Tweak wcs so that crval is an integer number of pixels
# away from ref. We do that by constructing a new wcs centered
# on ref, measuring the pixel coordinates of crval in this system
# and truncating it to a whole pixel number.
wtmp = w.deepcopy()
wtmp.wcs.crpix = (1,1)
wtmp.wcs.crval = ref
w.wcs.crval = wtmp.wcs_pix2world(np.round(wtmp.wcs_world2pix(w.wcs.crval[None],1)),1)[0]
# We can then simply round the crpix to the closest integer. Together with the
# previous operation, this will displace us by around 1 pixel, which is the
# cost one has to pay for this realignment.
w.wcs.crpix = np.round(w.wcs.crpix)
return w
def fix_wcs(wcs, axis=0):
"""Returns a new WCS object which has had the reference pixel moved to the
middle of the possible pixel space."""
res = wcs.deepcopy()
# Find the center ra manually: mean([crval - crpix*cdelt, crval + (-crpix+shape)*cdelt])
# = crval + (-crpix+shape/2)*cdelt
# What pixel does this correspond to?
# crpix2 = crpix + (crval2-crval)/cdelt
# But that requires shape. Can we do without it? Yes, let's use the
# biggest possible shape. n = 360/cdelt
n = abs(360/wcs.wcs.cdelt[axis])
delta_ra = wcs.wcs.cdelt[axis]*(n/2-wcs.wcs.crpix[axis])
delta_pix = delta_ra/wcs.wcs.cdelt[axis]
res.wcs.crval[axis] += delta_ra
res.wcs.crpix[axis] += delta_pix
repr(res.wcs) # wcs not properly updated if I don't do this
return res
| [
37811,
1212,
8265,
15738,
32953,
329,
15453,
45410,
10245,
290,
1762,
198,
4480,
606,
13,
383,
5421,
278,
10559,
290,
15268,
973,
287,
428,
8265,
477,
779,
198,
1169,
976,
16216,
355,
45410,
11,
1312,
13,
68,
13,
5721,
1688,
357,
568,... | 2.592975 | 3,587 |
# test code
array = [10, 5, 4, 1, 2, 3, 25]
print(merge_sort(array))
| [
628,
198,
2,
1332,
2438,
198,
18747,
796,
685,
940,
11,
642,
11,
604,
11,
352,
11,
362,
11,
513,
11,
1679,
60,
198,
4798,
7,
647,
469,
62,
30619,
7,
18747,
4008,
198
] | 2.117647 | 34 |
from brteve.brt_eve_bt817_8 import BrtEve
from brteve.brt_eve_rp2040 import BrtEveRP2040
host = BrtEveRP2040()
eve = BrtEve(host)
eve.init(resolution="1280x800", touch="goodix")
eve.ClearColorRGB(0x20, 0x40, 0x20)
eve.Clear()
eve.cmd_text(eve.lcd_width // 2, eve.lcd_height // 2, 31, eve.OPT_CENTER, "Hello world")
eve.swap()
| [
6738,
865,
660,
303,
13,
1671,
83,
62,
44655,
62,
18347,
23,
1558,
62,
23,
1330,
1709,
83,
36,
303,
198,
6738,
865,
660,
303,
13,
1671,
83,
62,
44655,
62,
81,
79,
1238,
1821,
1330,
1709,
83,
36,
303,
20031,
1238,
1821,
198,
198,... | 2.143791 | 153 |
from src.preprocess import read_dataset
from src.utils import *
import json
import numpy as np
from tqdm import tqdm
DATA_DIR = 'data/dataset/'
if __name__ == "__main__":
main()
| [
6738,
12351,
13,
3866,
14681,
1330,
1100,
62,
19608,
292,
316,
198,
6738,
12351,
13,
26791,
1330,
1635,
198,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
628,
198,
26947,
62,
... | 2.546667 | 75 |
#!/usr/bin/env python
# Copyright 2020 The Tilt Brush Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pprint
try:
from tiltbrush.tilt import Tilt
except ImportError:
print "You need the Tilt Brush Toolkit (https://github.com/googlevr/tilt-brush-toolkit)"
print "and then put its Python directory in your PYTHONPATH."
sys.exit(1)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
12131,
383,
309,
2326,
39846,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
... | 3.442748 | 262 |
from django.db import models
from django.template.defaultfilters import slugify
from django.shortcuts import reverse
from django.conf import settings
from ordered_model.models import OrderedModel
from . import snippets
# Create your models here. | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,
1330,
31065,
1958,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
9575,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
614... | 4.083333 | 60 |
from app import db
from app.auth.forms import RegisterFlaskForm
from app.models import Instructors, Departments, Courses
from app.auth import bp
from app.auth.google_login import google_login_request_uri, process_google_login_callback
from app.utils.api import resource_url
from flask import render_template, redirect, flash, url_for, current_app
from flask_login import login_required, login_user, logout_user, current_user
import requests
import json
@bp.route('/login', methods=['GET', 'POST'])
@bp.route('/login/callback')
@login_required
@bp.route('/register', methods=['GET', 'POST'])
@login_required
@bp.route('/logout')
| [
6738,
598,
1330,
20613,
198,
6738,
598,
13,
18439,
13,
23914,
1330,
17296,
7414,
2093,
8479,
198,
6738,
598,
13,
27530,
1330,
20689,
669,
11,
2129,
32514,
11,
2734,
8448,
198,
6738,
598,
13,
18439,
1330,
275,
79,
198,
6738,
598,
13,
... | 3.271795 | 195 |
from __future__ import print_function
import time
import os
import copy
import numpy as np
from . import onmt_model
import onmt
import collections
import operator
import editdistance
import sys
import itertools
PYTHON3 = sys.version_info > (3, 0)
if PYTHON3:
from itertools import zip_longest as zip_longest
else:
from itertools import izip_longest as zip_longest
# DEFAULT_TO_PATHS = ['/home/marcotcr/OpenNMT-py/trained_models/english_french_model_acc_70.61_ppl_3.73_e13.pt', '/home/marcotcr/OpenNMT-py/trained_models/english_german_model_acc_58.34_ppl_7.82_e13.pt', '/home/marcotcr/OpenNMT-py/trained_models/english_portuguese_model_acc_70.90_ppl_4.28_e13.pt']
# DEFAULT_BACK_PATHS = ['/home/marcotcr/OpenNMT-py/trained_models/french_english_model_acc_68.83_ppl_4.43_e13.pt', '/home/marcotcr/OpenNMT-py/trained_models/german_english_model_acc_57.23_ppl_10.00_e13.pt', '/home/marcotcr/OpenNMT-py/trained_models/portuguese_english_model_acc_69.78_ppl_5.05_e13.pt']
DEFAULT_TO_PATHS = ['seada/sea/translation_models/english_french_model_acc_71.05_ppl_3.71_e13.pt',
'seada/sea/translation_models/english_portuguese_model_acc_70.75_ppl_4.32_e13.pt']
DEFAULT_BACK_PATHS = ['seada/sea/translation_models/french_english_model_acc_68.51_ppl_4.43_e13.pt',
'seada/sea/translation_models/portuguese_english_model_acc_69.93_ppl_5.04_e13.pt']
def largest_indices(ary, n):
"""Returns the n largest indices from a numpy array."""
flat = ary.flatten()
if n > flat.shape[0]:
indices = np.array(range(flat.shape[0]), dtype='int')
return np.unravel_index(indices, ary.shape)
indices = np.argpartition(flat, -n)[-n:]
indices = indices[np.argsort(-flat[indices])]
return np.unravel_index(indices, ary.shape)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
1330,
319,
16762,
62,
19849,
198,
11748,
319,
16762,
198,
11748,
17268,
198,
11748,
... | 2.315584 | 770 |
# Generated by Django 3.1.6 on 2021-02-15 18:38
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
2999,
12,
1314,
1248,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import math
import numpy as np
import tensorflow as tf
import os
import time
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import tflearn
import global_constants as settings
# FEATURE_NUM = 64 #128
ACTION_EPS = 1e-6
GAMMA = 0.99
# PPO2
EPS = 0.2
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
28686,
198,
11748,
640,
198,
418,
13,
268,
2268,
17816,
43633,
5631,
62,
29817,
34563,
62,
39345,
34444,
20520,
796,
705,
17,
6,
19... | 2.547368 | 95 |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
from unittest import TestCase
from unittest.mock import patch
from pyqldb.cursor.buffered_cursor import BufferedCursor
from .helper_functions import assert_query_stats, create_stream_cursor, generate_statement_result
MOCK_VALUES = [1, 2]
MOCK_TRANSACTION_ID = 'id'
@patch('pyqldb.cursor.stream_cursor.StreamCursor')
| [
2,
15069,
13130,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
921,
743,
407,
779,
428,
2393,
2845,
28... | 3.402299 | 261 |
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core.strutils import (
bytes_from_string
)
from charmhelpers.core.hookenv import (
is_relation_made,
relation_ids,
relation_get,
related_units,
service_name,
config,
log as juju_log,
ERROR
)
from charmhelpers.contrib.openstack.context import (
OSContextGenerator,
ApacheSSLContext as SSLContext,
BindHostContext
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
)
from charmhelpers.contrib.openstack.utils import (
os_release,
CompareOpenStackReleases,
)
class GlancePolicyContext(OSContextGenerator):
"""This Context is only used from Ussuri onwards. At Ussuri, Glance
implemented policy-in-code, and thus didn't ship with a policy.json.
Therefore, the charm introduces a 'policy.yaml' file that is used to
provide the override here.
Note that this is separate from policy overrides as it's a charm config
option that has existed prior to its introduction.
Update *_image_location policy to restrict to admin role.
We do this unconditonally and keep a record of the original as installed by
the package.
"""
| [
2,
15069,
1584,
19507,
605,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743... | 3.255046 | 545 |
import os
import io
from datetime import datetime, timedelta
import json
import logging
import requests
import boto3
import psycopg2
logger = logging.getLogger()
logger.setLevel(logging.INFO)
BUCKET = os.getenv("S3_BUCKET")
if not BUCKET:
logger.error({"error": "no bucket env var found"})
DB_CREDENTIALS = os.getenv("DB_CREDENTIALS")
if not DB_CREDENTIALS:
logger.error({"error": "no DB credentials env var found"})
API_URL = (
"https://opendata.arcgis.com/datasets/80193066bcb84a39893fbed995fc8ed0_0.geojson"
)
# following two keys aren't required to function, but if not included then the function
# won't invalidate the cache as soon as new data is available, but it still
# will every 15 minutes per the normal schedule
INVALIDATE_CACHE_KEY = os.getenv("INVALIDATE_CACHE_KEY")
API_GATEWAY_URL = os.getenv("API_URL")
EMAIL_TOPIC = os.getenv("EMAIL_TOPIC")
s3_client = boto3.client("s3")
sns_client = boto3.client("sns")
def already_saved_todays_data():
"""Check if last successful data save was less than 12 hrs ago"""
conn = psycopg2.connect(DB_CREDENTIALS)
cur = conn.cursor()
time_to_check = twelve_hours_ago()
cur.execute(
"SELECT * FROM invokes WHERE function_name = %s and invoke_time > %s and new_data = %s",
("cases", time_to_check, True),
)
data = cur.fetchone()
conn.close()
if data:
return True
return False
def log_update_time(new_data=False):
"""Update DB with times we checked for new data along with status of if we found new data"""
conn = psycopg2.connect(DB_CREDENTIALS)
cur = conn.cursor()
cur.execute(
"INSERT INTO invokes (function_name, invoke_time, new_data) VALUES (%s, now(), %s)",
("cases", new_data),
)
conn.commit()
conn.close()
| [
11748,
28686,
198,
11748,
33245,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
33918,
198,
11748,
18931,
198,
198,
11748,
7007,
198,
11748,
275,
2069,
18,
198,
11748,
17331,
22163,
70,
17,
198,
198,
6404,
1362,
... | 2.565957 | 705 |
# class User:
# pass
#
# user_1 = User()
# user_1.id = "001"
# user_1.username = "angela"
user_1 = User("001", "Douglas")
user_2 = User("002", "Angela")
user_1.follow(user_2)
print(user_1.followers)
print(user_1.following)
print(user_2.followers)
print(user_2.following)
| [
2,
1398,
11787,
25,
198,
2,
220,
220,
220,
220,
1208,
198,
2,
198,
2,
2836,
62,
16,
796,
11787,
3419,
198,
2,
2836,
62,
16,
13,
312,
796,
366,
8298,
1,
198,
2,
2836,
62,
16,
13,
29460,
796,
366,
8368,
64,
1,
628,
198,
7220,
... | 2.241935 | 124 |
""" Satellite Data Source """
import logging
from dataclasses import InitVar, dataclass
from functools import partial
from numbers import Number
from typing import Iterable, Optional
import dask
import numpy as np
import pandas as pd
import xarray as xr
import nowcasting_dataset.time as nd_time
from nowcasting_dataset.consts import SAT_VARIABLE_NAMES
from nowcasting_dataset.data_sources.data_source import ZarrDataSource
from nowcasting_dataset.data_sources.satellite.satellite_model import Satellite
from nowcasting_dataset.utils import drop_duplicate_times, drop_non_monotonic_increasing
_LOG = logging.getLogger(__name__)
_LOG_HRV = logging.getLogger(__name__.replace("satellite", "hrvsatellite"))
@dataclass
class SatelliteDataSource(ZarrDataSource):
"""Satellite Data Source."""
channels: Optional[Iterable[str]] = SAT_VARIABLE_NAMES[1:]
image_size_pixels: InitVar[int] = 128
meters_per_pixel: InitVar[int] = 2_000
logger = _LOG
time_resolution_minutes: int = 15
def __post_init__(self, image_size_pixels: int, meters_per_pixel: int):
"""Post Init"""
assert len(self.channels) > 0, "channels cannot be empty!"
assert image_size_pixels > 0, "image_size_pixels cannot be <= 0!"
assert meters_per_pixel > 0, "meters_per_pixel cannot be <= 0!"
super().__post_init__(image_size_pixels, meters_per_pixel)
n_channels = len(self.channels)
self._shape_of_example = (
self.total_seq_length,
image_size_pixels,
image_size_pixels,
n_channels,
)
@property
def sample_period_minutes(self) -> int:
"""Override the default sample minutes"""
return self.time_resolution_minutes
def open(self) -> None:
"""
Open Satellite data
We don't want to open_sat_data in __init__.
If we did that, then we couldn't copy SatelliteDataSource
instances into separate processes. Instead,
call open() _after_ creating separate processes.
"""
self._data = self._open_data()
if "variable" in self._data.dims:
self._data = self._data.rename({"variable": "channels"})
self._data = self._data.rename({"x": "x_osgb", "y": "y_osgb"})
if not set(self.channels).issubset(self._data.channels.values):
raise RuntimeError(
f"One or more requested channels are not available in {self.zarr_path}!"
f" Requested channels={self.channels}."
f" Available channels={self._data.channels.values}"
)
self._data = self._data.sel(channels=list(self.channels))
@staticmethod
def get_data_model_for_batch():
"""Get the model that is used in the batch"""
return Satellite
def get_spatial_region_of_interest(
self, data_array: xr.DataArray, x_center_osgb: Number, y_center_osgb: Number
) -> xr.DataArray:
"""
Gets the satellite image as a square around the center
Ignores x and y coordinates as for the original satellite projection each pixel varies in
both its x and y distance from other pixels. See Issue 401 for more details.
This results, in 'real' spatial terms, each image covering about 2x as much distance in the
x direction as in the y direction.
Args:
data_array: DataArray to subselect from
x_center_osgb: Center of the image x coordinate in OSGB coordinates
y_center_osgb: Center of image y coordinate in OSGB coordinates
Returns:
The selected data around the center
"""
# Get the index into x and y nearest to x_center_osgb and y_center_osgb:
x_index_at_center = np.searchsorted(data_array.x_osgb.values, x_center_osgb) - 1
y_index_at_center = np.searchsorted(data_array.y_osgb.values, y_center_osgb) - 1
# Put x_index_at_center and y_index_at_center into a pd.Series so we can operate
# on them both in a single line of code.
x_and_y_index_at_center = pd.Series(
{"x_osgb": x_index_at_center, "y_osgb": y_index_at_center}
)
half_image_size_pixels = self._square.size_pixels // 2
min_x_and_y_index = x_and_y_index_at_center - half_image_size_pixels
max_x_and_y_index = x_and_y_index_at_center + half_image_size_pixels
# Check whether the requested region of interest steps outside of the available data:
suggested_reduction_of_image_size_pixels = (
max(
(-min_x_and_y_index.min() if (min_x_and_y_index < 0).any() else 0),
(max_x_and_y_index.x_osgb - len(data_array.x_osgb)),
(max_x_and_y_index.y_osgb - len(data_array.y_osgb)),
)
* 2
)
# If the requested region does step outside the available data then raise an exception
# with a helpful message:
if suggested_reduction_of_image_size_pixels > 0:
new_suggested_image_size_pixels = (
self._square.size_pixels - suggested_reduction_of_image_size_pixels
)
raise RuntimeError(
"Requested region of interest of satellite data steps outside of the available"
" geographical extent of the Zarr data. The requested region of interest extends"
f" from pixel indicies"
f" x={min_x_and_y_index.x_osgb} to x={max_x_and_y_index.x_osgb},"
f" y={min_x_and_y_index.y_osgb} to y={max_x_and_y_index.y_osgb}. In the Zarr data,"
f" len(x)={len(data_array.x_osgb)}, len(y)={len(data_array.y_osgb)}. Try reducing"
f" image_size_pixels from {self._square.size_pixels} to"
f" {new_suggested_image_size_pixels} pixels."
f" {self.history_length=}"
f" {self.forecast_length=}"
)
# Select the geographical region of interest.
# Note that isel is *exclusive* of the end of the slice.
# e.g. isel(x=slice(0, 3)) will return the first, second, and third values.
data_array = data_array.isel(
x_osgb=slice(min_x_and_y_index.x_osgb, max_x_and_y_index.x_osgb),
y_osgb=slice(min_x_and_y_index.y_osgb, max_x_and_y_index.y_osgb),
)
return data_array
def get_example(
self, t0_datetime_utc: pd.Timestamp, x_center_osgb: Number, y_center_osgb: Number
) -> xr.Dataset:
"""
Get Example data
Args:
t0_datetime_utc: list of timestamps for the datetime of the batches.
The batch will also include data for historic and future depending
on `history_minutes` and `future_minutes`.
x_center_osgb: x center batch locations
y_center_osgb: y center batch locations
Returns: Example Data
"""
selected_data = self._get_time_slice(t0_datetime_utc)
selected_data = self.get_spatial_region_of_interest(
data_array=selected_data,
x_center_osgb=x_center_osgb,
y_center_osgb=y_center_osgb,
)
if "variable" in list(selected_data.dims):
selected_data = selected_data.rename({"variable": "channels"})
selected_data = self._post_process_example(selected_data, t0_datetime_utc)
if selected_data.shape != self._shape_of_example:
raise RuntimeError(
"Example is wrong shape! "
f"x_center_osgb={x_center_osgb}\n"
f"y_center_osgb={y_center_osgb}\n"
f"t0_dt={t0_datetime_utc}\n"
f"times are {selected_data.time}\n"
f"expected shape={self._shape_of_example}\n"
f"actual shape {selected_data.shape}"
f" {self.forecast_length=}"
f" {self.history_length=}"
)
return selected_data.load().to_dataset(name="data")
def datetime_index(self, remove_night: bool = True) -> pd.DatetimeIndex:
"""Returns a complete list of all available datetimes
Args:
remove_night: If True then remove datetimes at night.
We're interested in forecasting solar power generation, so we
don't care about nighttime data :)
In the UK in summer, the sun rises first in the north east, and
sets last in the north west [1]. In summer, the north gets more
hours of sunshine per day.
In the UK in winter, the sun rises first in the south east, and
sets last in the south west [2]. In winter, the south gets more
hours of sunshine per day.
| | Summer | Winter |
| ---: | :---: | :---: |
| Sun rises first in | N.E. | S.E. |
| Sun sets last in | N.W. | S.W. |
| Most hours of sunlight | North | South |
Before training, we select timesteps which have at least some
sunlight. We do this by computing the clearsky global horizontal
irradiance (GHI) for the four corners of the satellite imagery,
and for all the timesteps in the dataset. We only use timesteps
where the maximum global horizontal irradiance across all four
corners is above some threshold.
The 'clearsky solar irradiance' is the amount of sunlight we'd
expect on a clear day at a specific time and location. The SI unit
of irradiance is watt per square meter. The 'global horizontal
irradiance' (GHI) is the total sunlight that would hit a
horizontal surface on the surface of the Earth. The GHI is the
sum of the direct irradiance (sunlight which takes a direct path
from the Sun to the Earth's surface) and the diffuse horizontal
irradiance (the sunlight scattered from the atmosphere). For more
info, see: https://en.wikipedia.org/wiki/Solar_irradiance
References:
1. [Video of June 2019](https://www.youtube.com/watch?v=IOp-tj-IJpk)
2. [Video of Jan 2019](https://www.youtube.com/watch?v=CJ4prUVa2nQ)
"""
if self._data is None:
sat_data = self._open_data()
else:
sat_data = self._data
datetime_index = pd.DatetimeIndex(sat_data.time.values)
if remove_night:
border_locations = self.geospatial_border()
datetime_index = nd_time.select_daylight_datetimes(
datetimes=datetime_index, locations=border_locations
)
return datetime_index
class HRVSatelliteDataSource(SatelliteDataSource):
"""Satellite Data Source for HRV data."""
channels: Optional[Iterable[str]] = SAT_VARIABLE_NAMES[:1]
image_size_pixels: InitVar[int] = 128
meters_per_pixel: InitVar[int] = 2_000
logger = _LOG_HRV
def remove_acq_time_from_dataset_and_fix_time_coords(
dataset: xr.Dataset, logger: logging.Logger
) -> xr.Dataset:
"""
Preprocess datasets by dropping `acq_time`, which causes problems otherwise
Args:
dataset: xr.Dataset to preprocess
logger: logger object to write to
Returns:
dataset with acq_time dropped
"""
dataset = dataset.drop_vars("acq_time", errors="ignore")
# If there are any duplicated init_times then drop the duplicated time:
stacked_eumetsat_data = drop_duplicate_times(
data_array=dataset["stacked_eumetsat_data"], class_name="Satellite", time_dim="time"
)
# If any init_times are not monotonic_increasing then drop the out-of-order init_times:
stacked_eumetsat_data = drop_non_monotonic_increasing(
data_array=stacked_eumetsat_data, class_name="Satellite", time_dim="time"
)
dataset = stacked_eumetsat_data.to_dataset(name="stacked_eumetsat_data")
assert pd.DatetimeIndex(stacked_eumetsat_data["time"]).is_unique
assert pd.DatetimeIndex(stacked_eumetsat_data["time"]).is_monotonic_increasing
return dataset
def open_sat_data(
zarr_path: str, consolidated: bool, logger: logging.Logger, sample_period_minutes: int = 15
) -> xr.DataArray:
"""Lazily opens the Zarr store.
Adds 1 minute to the 'time' coordinates, so the timestamps
are at 00, 05, ..., 55 past the hour.
Args:
zarr_path: Cloud URL or local path pattern. If GCP URL, must start with 'gs://'
consolidated: Whether or not the Zarr metadata is consolidated.
logger: logger object to write to
sample_period_minutes: The sample period minutes that the data should be reduced to.
"""
logger.debug("Opening satellite data: %s", zarr_path)
# If we are opening multiple Zarr stores (i.e. one for each month of the year) we load them
# together and create a single dataset from them. open_mfdataset also works if zarr_path
# points to a specific zarr directory (with no wildcards).
# Silence the warning about large chunks.
# Alternatively, we could set this to True, but that slows down loading a Satellite batch
# from 8 seconds to 50 seconds!
dask.config.set(**{"array.slicing.split_large_chunks": False})
# add logger to preprocess function
p_remove_acq_time_from_dataset_and_fix_time_coords = partial(
remove_acq_time_from_dataset_and_fix_time_coords, logger=logger
)
# Open datasets.
dataset = xr.open_mfdataset(
zarr_path,
chunks="auto", # See issue #456 for why we use "auto".
mode="r",
engine="zarr",
concat_dim="time",
preprocess=p_remove_acq_time_from_dataset_and_fix_time_coords,
consolidated=consolidated,
combine="nested",
)
data_array = dataset["stacked_eumetsat_data"]
if "stacked_eumetsat_data" == data_array.name:
data_array.name = "data"
del dataset
# Flip coordinates to top-left first
data_array = data_array.reindex(x=data_array.x[::-1])
# reindex satellite to 15 mins data
time = [
t for t in data_array.time.values if pd.Timestamp(t).minute % sample_period_minutes == 0
]
data_array = data_array.sel(time=time)
# Sanity check!
times = pd.DatetimeIndex(data_array["time"])
assert times.is_unique
assert times.is_monotonic_increasing
return data_array
| [
37811,
33530,
6060,
8090,
37227,
198,
11748,
18931,
198,
6738,
4818,
330,
28958,
1330,
44707,
19852,
11,
4818,
330,
31172,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
3146,
1330,
7913,
198,
6738,
19720,
1330,
40806,
540,
11,
3223... | 2.321326 | 6,274 |
from datetime import datetime
from django.core import exceptions
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.core import validators
from .helpers import format_tempo, format_run_time
class UserManager(BaseUserManager):
"""Define a model manager for User model with no username field."""
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""Create and save a User with the given email and password."""
if not email:
raise ValueError('The given email must be set')
if len(password) < 8:
raise exceptions.ValidationError("Password too short(min 8 characters).")
email = self.normalize_email(email)
try:
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.full_clean()
user.save(using=self._db)
except exceptions.ValidationError as e:
raise e
return user
def create_user(self, email, password=None, **extra_fields):
"""Create and save a regular User with the given email and password."""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
try:
return self._create_user(email, password, **extra_fields)
except Exception:
raise
def create_superuser(self, email, password, **extra_fields):
"""Create and save a SuperUser with the given email and password."""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
7295,
1330,
13269,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
11,
7308,
12982,
1... | 2.495737 | 821 |
from django.contrib import admin
from .models import Libro, Pagina
admin.site.register(Libro)
admin.site.register(Pagina) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
7980,
305,
11,
31525,
1437,
628,
198,
28482,
13,
15654,
13,
30238,
7,
25835,
305,
8,
198,
28482,
13,
15654,
13,
30238,
7,
47,
363,
1437,
8
] | 3.02439 | 41 |
# Variáveis com códigos de cores!
vermelho, amarelo, azul1, lilas, azul, fim = '\033[1:31m', '\033[1:33m', '\033[1:34m', '\033[1:35m', \
'\033[1:36m', '\033[m'
# ENTREGANDO DADOS SOBRE UM CONJUNTO DE NÚMEROS DIGITADOS
# Cabeçalho
print('\033[1:31m-=-\033[m' * 17)
print('\033[1:35mDIGITE QUANTOS NÚMEROS DESEJAR E OBTENHA DADOS DELES\033[m')
print('\033[1:31m-=-\033[m' * 17)
c = 0
s = 0
num = 0
maior = menor = 0
while num == 0:
num = int(input('Digite o {}º número: '.format(c + 1)))
prosseguir = int(input('\033[33mDeseja continuar?\033[m\n \033[36mSim [1]\n Não [2]\033[m\nSua escolha: '))
c = c + 1
s = s + num
if c == 1:
maior = num
menor = num
else:
if num > maior:
maior = num
if num < menor:
menor = num
if prosseguir == 1:
num = 0
elif prosseguir == 2:
media = s / c
print('\033[35mVocê digitou um total de\033[m \033[33m{}\033[m \033[35mnúmeros\033[m'.format(c))
print('\033[35mA soma dos números digitado equivale a\033[m: \033[33m{}\033[m'.format(s))
print('\033[35mA média dos números equivale a\033[m: \033[33m{}\033[m'.format(media))
print('\033[35mO maior número digitado foi\033[m \033[33m{}\033[m \033[35me o menor foi '
'\033[33m{}\033[m'.format(maior, menor))
else:
print('\033[31mOpção inválida! Tente novamente!\033[m') | [
2,
15965,
6557,
303,
271,
401,
269,
10205,
12894,
418,
390,
21758,
0,
198,
332,
17694,
8873,
11,
716,
533,
5439,
11,
35560,
377,
16,
11,
42280,
292,
11,
35560,
377,
11,
277,
320,
796,
705,
59,
44427,
58,
16,
25,
3132,
76,
3256,
... | 1.80597 | 804 |
"""
Functionality to scrape parliamentarian information from Wikipedia pages.
"""
import logging
import os
from datetime import datetime
import requests
import pandas as pd
import bs4 as bs
from .schema import schema, schema_map
from .urls import parliaments
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
WIKI_BASE_URL = "https://de.wikipedia.org"
columns_for_link_extraction = ['Name', 'Mitglied des Landtages', 'Bild', 'Foto']
class WikiFetcher:
"""
Class to scrape parliamentarian information from Wikipedia pages.
"""
if __name__ == "__main__":
fetcher = WikiFetcher()
fetcher.fetch_all_parliaments()
| [
37811,
198,
22203,
1483,
284,
42778,
8540,
3699,
1321,
422,
15312,
5468,
13,
198,
37811,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11... | 2.972973 | 222 |
from flask import Flask, jsonify
from flask_jwt_extended import JWTManager
from flask_restful import Api
from blacklist import BLACKLIST
from resource.hotel import HotelResource, HotelResourceAll
from resource.site import SiteResource, SiteResourceAll
from resource.user import UserResource, UserResourceAll, UserLoginResource, UserLogoutResource, UserConfirm
from sql_alchemy import database
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = 'MyAlfredHotel'
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['PROPAGATE_EXCEPTIONS'] = True
database.init_app(app)
api = Api(app)
jwt = JWTManager(app)
@app.before_first_request
@jwt.token_in_blacklist_loader
@jwt.revoked_token_loader
api.add_resource(HotelResourceAll, '/hotels')
api.add_resource(HotelResource, '/hotels/<int:id>')
api.add_resource(SiteResourceAll, '/sites')
api.add_resource(SiteResource, '/sites/<int:id>')
api.add_resource(UserResourceAll, '/users')
api.add_resource(UserResource, '/users/<int:id>')
api.add_resource(UserConfirm, '/confirm/<int:id>')
api.add_resource(UserLoginResource, '/login')
api.add_resource(UserLogoutResource, '/logout')
if __name__ == '__main__':
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
449,
39386,
13511,
198,
6738,
42903,
62,
2118,
913,
1330,
5949,
72,
198,
198,
6738,
38810,
1330,
31963,
45849,
198,
6738,
8271,
13,
8940,
... | 2.869469 | 452 |
import pygame, sys, random, json, time, os, button
from cryptography.fernet import Fernet
from threading import Thread
from time import sleep
key = "FuGxRMgLoA_lW62jYKpWoW0ieYUBMaryvlAOqp-aQpY="
f = Fernet(key)
clock = pygame.time.Clock()
pygame.init()
screen = pygame.display.set_mode((400,650))
game_font = pygame.font.Font('04B_19.ttf',30)
pygame.display.set_caption('Flappy Bird Made By GodOfPro')
#Icon
game_icon = pygame.image.load('sprites/icon.png')
pygame.display.set_icon(pygame.image.load('sprites/icon.png'))
#Background Music
pygame.mixer.music.load("audio/music.mp3")
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(0.1)
# Game Variables
gravity = 0.15
bird_movement = 0
game_active = True
game_state = "main_menu"
score = 0
high_score = 0
can_score = True
score_multi = 0
doublescore_sleep_duration = 0
#Draw Variables
#Background
bg_surface = pygame.image.load('sprites/background-day.png').convert()
bg_surface = pygame.transform.smoothscale(bg_surface,(400,650))
#Floor
floor_surface = pygame.image.load('sprites/base.png').convert()
floor_surface = pygame.transform.smoothscale(floor_surface,(400,130))
floor_x_pos = 0
#Bird
bird_downflap = pygame.transform.scale(pygame.image.load('sprites/bluebird-downflap.png'),(42,30)).convert_alpha()
bird_midflap = pygame.transform.scale(pygame.image.load('sprites/bluebird-midflap.png'),(42,30)).convert_alpha()
bird_upflap = pygame.transform.scale(pygame.image.load('sprites/bluebird-upflap.png'),(42,30)).convert_alpha()
bird_frames = [bird_downflap,bird_midflap,bird_upflap]
bird_index = 0
bird_surface = bird_frames[bird_index]
bird_rect = bird_surface.get_rect(center = (100,325))
#Buttons
start_img = pygame.image.load("sprites/start_btn.png").convert_alpha()
exit_img = pygame.image.load("sprites/exit_btn.png").convert_alpha()
#Game Events
BIRDFLAP = pygame.USEREVENT + 1
pygame.time.set_timer(BIRDFLAP,200)
pipe_surface = pygame.image.load('sprites/pipe-green.png').convert()
pipe_surface = pygame.transform.smoothscale(pipe_surface,(62,320))
pipe_list = []
SPAWNPIPE = pygame.USEREVENT
pygame.time.set_timer(SPAWNPIPE,1200)
pipe_height = [300,250,350,400,450]
#Gameover Image
game_over_surface = pygame.image.load('sprites/message.png').convert_alpha()
game_over_rect = game_over_surface.get_rect(center = (200,325))
data = {
'score': 0,
'high_score': 0
}
# Read Json file to load the score
try:
decrypt_file()
with open('score.txt') as score_file:
data = json.load(score_file)
os.remove("score.txt")
except:
print("No File Has Been Created Yet")
#Game logic
if game_state == "main_menu":
print("main_menu")
main_menu()
if game_state == "game":
print("game")
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and game_active == True:
bird_movement = 0
bird_movement -= 6
pygame.mixer.Sound.play(pygame.mixer.Sound('audio/wing.wav')).set_volume(0.15)
if event.key == pygame.K_SPACE and game_active == False:
game_active = True
pipe_list.clear()
bird_rect.center = (100,325)
bird_movement = 0
score = 0
data["score"] = 0
if event.type == SPAWNPIPE:
pipe_list.extend(create_pipe())
if event.type == BIRDFLAP:
if bird_index < 2:
bird_index += 1
else:
bird_index = 0
bird_surface,bird_rect = bird_animation()
screen.blit(bg_surface,(0,0))
if game_active:
# Bird
display_fps()
bird_movement += gravity
rotated_bird = rotate_bird(bird_surface)
bird_rect.centery += bird_movement
screen.blit(rotated_bird,bird_rect)
game_active = check_collision(pipe_list)
# Pipes
pipe_list = move_pipes(pipe_list)
draw_pipes(pipe_list)
# Score
pipe_score_check()
high_score = update_score(score,high_score)
score_display('main_game')
draw_doublescore_powerup()
else :
screen.blit(game_over_surface,game_over_rect)
score_display('game_over')
# Floor
floor_x_pos -= 0.7
draw_floor()
if floor_x_pos <= -400:
floor_x_pos = 0
pygame.display.update()
clock.tick(120)
pygame.quit() | [
11748,
12972,
6057,
11,
25064,
11,
4738,
11,
33918,
11,
640,
11,
28686,
11,
4936,
198,
6738,
45898,
13,
69,
1142,
316,
1330,
38982,
316,
198,
6738,
4704,
278,
1330,
14122,
198,
6738,
640,
1330,
3993,
198,
198,
2539,
796,
366,
41133,
... | 2.23871 | 2,015 |
#
# Copyright (c) 2021 The Markovflow Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module containing a state space model."""
from typing import Tuple
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow import default_float
from gpflow.base import Parameter, TensorType
from gpflow.utilities import triangular
from markovflow.base import SampleShape
from markovflow.block_tri_diag import LowerTriangularBlockTriDiagonal, SymmetricBlockTriDiagonal
from markovflow.gauss_markov import GaussMarkovDistribution, check_compatible
from markovflow.utils import tf_scope_class_decorator, tf_scope_fn_decorator
tfd = tfp.distributions
@tf_scope_class_decorator
class StateSpaceModel(GaussMarkovDistribution):
"""
Implements a state space model. This has the following form:
.. math:: xₖ₊₁ = Aₖ xₖ + bₖ + qₖ
...where:
* :math:`qₖ ~ 𝓝(0, Qₖ)`
* :math:`x₀ ~ 𝓝(μ₀, P₀)`
* :math:`xₖ ∈ ℝ^d`
* :math:`bₖ ∈ ℝ^d`
* :math:`Aₖ ∈ ℝ^{d × d}`
* :math:`Qₖ ∈ ℝ^{d × d}`
* :math:`μ₀ ∈ ℝ^{d × 1}`
* :math:`P₀ ∈ ℝ^{d × d}`
The key reference is::
@inproceedings{grigorievskiy2017parallelizable,
title={Parallelizable sparse inverse formulation Gaussian processes (SpInGP)},
author={Grigorievskiy, Alexander and Lawrence, Neil and S{\"a}rkk{\"a}, Simo},
booktitle={Int'l Workshop on Machine Learning for Signal Processing (MLSP)},
pages={1--6},
year={2017},
organization={IEEE}
}
The model samples :math:`x₀` with an initial Gaussian distribution in :math:`ℝ^d`
(in code :math:`d` is `state_dim`).
The model then proceeds for :math:`n` (`num_transitions`) to generate :math:`[x₁, ... xₙ]`,
according to the formula above. The marginal distribution of samples at a point :math:`k`
is a Gaussian with mean :math:`μₖ, Pₖ`.
This class allows the user to generate samples from this process as well as to calculate the
marginal distributions for each transition.
"""
def __init__(
self,
initial_mean: TensorType,
chol_initial_covariance: TensorType,
state_transitions: TensorType,
state_offsets: TensorType,
chol_process_covariances: TensorType,
) -> None:
"""
:param initial_mean: A :data:`~markovflow.base.TensorType` containing the initial mean,
with shape ``batch_shape + [state_dim]``.
:param chol_initial_covariance: A :data:`~markovflow.base.TensorType` containing the
Cholesky of the initial covariance, with shape
``batch_shape + [state_dim, state_dim]``. That is, unless the
initial covariance is zero, in which case it is zero.
:param state_transitions: A :data:`~markovflow.base.TensorType` containing state transition
matrices, with shape ``batch_shape + [num_transitions, state_dim, state_dim]``.
:param state_offsets: A :data:`~markovflow.base.TensorType` containing the process means
bₖ, with shape ``batch_shape + [num_transitions, state_dim]``.
:param chol_process_covariances: A :data:`~markovflow.base.TensorType` containing the
Cholesky of the noise covariance matrices, with shape
``batch_shape + [num_transitions, state_dim, state_dim]``. That is, unless the
noise covariance is zero, in which case it is zero.
"""
super().__init__(self.__class__.__name__)
tf.debugging.assert_shapes(
[
(initial_mean, [..., "state_dim"]),
(chol_initial_covariance, [..., "state_dim", "state_dim"]),
(state_transitions, [..., "num_transitions", "state_dim", "state_dim"]),
(state_offsets, [..., "num_transitions", "state_dim"]),
(chol_process_covariances, [..., "num_transitions", "state_dim", "state_dim"]),
]
)
# assert batch shapes are exactly matching
shape = tf.shape(initial_mean)[:-1]
tf.debugging.assert_equal(shape, tf.shape(chol_initial_covariance)[:-2])
tf.debugging.assert_equal(shape, tf.shape(state_transitions)[:-3])
tf.debugging.assert_equal(shape, tf.shape(state_offsets)[:-2])
tf.debugging.assert_equal(shape, tf.shape(chol_process_covariances)[:-3])
# store the tensors in self
self._mu_0 = initial_mean
self._A_s = state_transitions
self._chol_P_0 = chol_initial_covariance
self._chol_Q_s = chol_process_covariances
self._b_s = state_offsets
@property
def event_shape(self) -> tf.Tensor:
"""
Return the shape of the event.
:return: The shape is ``[num_transitions + 1, state_dim]``.
"""
return tf.shape(self.concatenated_state_offsets)[-2:]
@property
def batch_shape(self) -> tf.TensorShape:
"""
Return the shape of any leading dimensions that come before :attr:`event_shape`.
"""
return self._A_s.shape[:-3]
@property
def state_dim(self) -> int:
"""
Return the state dimension.
"""
return self._A_s.shape[-2]
@property
def num_transitions(self) -> tf.Tensor:
"""
Return the number of transitions.
"""
return tf.shape(self._A_s)[-3]
@property
def cholesky_process_covariances(self) -> TensorType:
"""
Return the Cholesky of :math:`[Q₁, Q₂, ....]`.
:return: A :data:`~markovflow.base.TensorType` with
shape ``[... num_transitions, state_dim, state_dim]``.
"""
return self._chol_Q_s
@property
def cholesky_initial_covariance(self) -> TensorType:
"""
Return the Cholesky of :math:`P₀`.
:return: A :data:`~markovflow.base.TensorType` with shape ``[..., state_dim, state_dim]``.
"""
return self._chol_P_0
@property
def initial_covariance(self) -> tf.Tensor:
"""
Return :math:`P₀`.
:return: A :data:`~markovflow.base.TensorType` with shape ``[..., state_dim, state_dim]``.
"""
return self._chol_P_0 @ tf.linalg.matrix_transpose(self._chol_P_0)
@property
def concatenated_cholesky_process_covariance(self) -> tf.Tensor:
"""
Return the Cholesky of :math:`[P₀, Q₁, Q₂, ....]`.
:return: A tensor with shape ``[... num_transitions + 1, state_dim, state_dim]``.
"""
return tf.concat([self._chol_P_0[..., None, :, :], self._chol_Q_s], axis=-3)
@property
def state_offsets(self) -> TensorType:
"""
Return the state offsets :math:`[b₁, b₂, ....]`.
:return: A :data:`~markovflow.base.TensorType` with
shape ``[..., num_transitions, state_dim]``.
"""
return self._b_s
@property
def initial_mean(self) -> TensorType:
"""
Return the initial mean :math:`μ₀`.
:return: A :data:`~markovflow.base.TensorType` with shape ``[..., state_dim]``.
"""
return self._mu_0
@property
def concatenated_state_offsets(self) -> tf.Tensor:
"""
Return the concatenated state offsets :math:`[μ₀, b₁, b₂, ....]`.
:return: A tensor with shape ``[... num_transitions + 1, state_dim]``.
"""
return tf.concat([self._mu_0[..., None, :], self._b_s], axis=-2)
@property
def state_transitions(self) -> TensorType:
"""
Return the concatenated state offsets :math:`[A₀, A₁, A₂, ....]`.
:return: A :data:`~markovflow.base.TensorType` with
shape ``[... num_transitions, state_dim, state_dim]``.
"""
return self._A_s
@property
def marginal_means(self) -> tf.Tensor:
"""
Return the mean of the marginal distributions at each time point. If:
.. math:: xₖ ~ 𝓝(μₖ, Kₖₖ)
...then return :math:`μₖ`.
If we let the concatenated state offsets be :math:`m = [μ₀, b₁, b₂, ....]` and :math:`A`
be defined as in equation (5) of the SpInGP paper (see class docstring), then:
.. math:: μ = A m = (A⁻¹)⁻¹ m
...which we can do quickly using :meth:`a_inv_block`.
:return: The marginal means of the joint Gaussian, with shape
``batch_shape + [num_transitions + 1, state_dim]``.
"""
# (A⁻¹)⁻¹ m: batch_shape + [num_transitions + 1, state_dim]
return self.a_inv_block.solve(self.concatenated_state_offsets)
@property
def marginal_covariances(self) -> tf.Tensor:
"""
Return the ordered covariances :math:`Σₖₖ` of the multivariate normal marginal
distributions over consecutive states :math:`xₖ`.
:return: The marginal covariances of the joint Gaussian, with shape
``batch_shape + [num_transitions + 1, state_dim, state_dim]``.
"""
return self.precision.cholesky.block_diagonal_of_inverse()
def covariance_blocks(self) -> Tuple[tf.Tensor, tf.Tensor]:
"""
Return the diagonal and lower off-diagonal blocks of the covariance.
:return: A tuple of tensors with respective shapes
``batch_shape + [num_transitions + 1, state_dim]``,
``batch_shape + [num_transitions, state_dim, state_dim]``.
"""
return (
self.marginal_covariances,
self.subsequent_covariances(self.marginal_covariances),
)
@property
def a_inv_block(self) -> LowerTriangularBlockTriDiagonal:
"""
Return :math:`A⁻¹`.
This has the form::
A⁻¹ = [ I ]
[-A₁, I ]
[ -A₂, I ]
[ ᨞ ᨞ ]
[ -Aₙ, I]
...where :math:`[A₁, ..., Aₙ]` are the state transition matrices.
"""
# create the diagonal of A⁻¹
batch_shape = tf.concat([self.batch_shape, self.event_shape[:-1]], axis=0)
identities = tf.eye(self.state_dim, dtype=default_float(), batch_shape=batch_shape)
# A⁻¹
return LowerTriangularBlockTriDiagonal(identities, -self._A_s)
def sample(self, sample_shape: SampleShape) -> tf.Tensor:
"""
Return sample trajectories.
:param sample_shape: The shape (and hence number of) trajectories to sample from
the state space model.
:return: A tensor containing state samples, with shape
``sample_shape + self.batch_shape + self.event_shape``.
"""
sample_shape = tf.TensorShape(sample_shape)
full_sample_shape = tf.concat(
[sample_shape, self.batch_shape, self.event_shape, tf.TensorShape([1])], axis=0
)
epsilons = tf.random.normal(full_sample_shape, dtype=default_float())
b = self.concatenated_state_offsets
z = tf.matmul(self.concatenated_cholesky_process_covariance, epsilons)[..., 0]
conditional_epsilons = b + z
# handle the case of zero sample size: this array has no elements!
if conditional_epsilons.shape.num_elements() == 0:
return conditional_epsilons
# (A⁻¹)⁻¹ m: sample_shape + self.batch_shape + self.event_shape
samples = self.a_inv_block.solve(conditional_epsilons)
return samples
def subsequent_covariances(self, marginal_covariances: tf.Tensor) -> tf.Tensor:
"""
For each pair of subsequent states :math:`xₖ, xₖ₊₁`, return the covariance of their joint
distribution. That is:
.. math:: Cov(xₖ₊₁, xₖ) = AₖPₖ
:param marginal_covariances: The marginal covariances of each state in the model,
with shape ``batch_shape + [num_transitions + 1, state_dim, state_dim]``.
:return: The covariance between subsequent state, with shape
``batch_shape + [num_transitions, state_dim, state_dim]``.
"""
subsequent_covs = self._A_s @ marginal_covariances[..., :-1, :, :]
tf.debugging.assert_equal(tf.shape(subsequent_covs), tf.shape(self.state_transitions))
return subsequent_covs
def log_det_precision(self) -> tf.Tensor:
r"""
Calculate the log determinant of the precision matrix. This uses the precision as
defined in the SpInGP paper (see class summary above).
Precision is defined as:
.. math:: K⁻¹ = (AQAᵀ)⁻¹
so:
.. math::
log |K⁻¹| &= log | Q⁻¹ | (since |A| = 1)\\
&= - log |P₀| - Σₜ log |Qₜ|\\
&= - 2 * (log |chol_P₀| + Σₜ log |chol_Qₜ|)
:return: A tensor with shape ``batch_shape``.
"""
# shape: [...]
log_det = -(
tf.reduce_sum(
input_tensor=tf.math.log(tf.square(tf.linalg.diag_part(self._chol_P_0))), axis=-1,
)
+ tf.reduce_sum(
input_tensor=tf.math.log(tf.square(tf.linalg.diag_part(self._chol_Q_s))),
axis=[-1, -2],
)
)
tf.debugging.assert_equal(tf.shape(log_det), self.batch_shape)
return log_det
def create_non_trainable_copy(self) -> "StateSpaceModel":
"""
Create a non-trainable version of :class:`~markovflow.gauss_markov.GaussMarkovDistribution`.
This is to convert a trainable version of this class back to being non-trainable.
:return: A Gauss-Markov distribution that is a copy of this one.
"""
initial_mean = tf.stop_gradient(self.initial_mean)
state_transitions = tf.stop_gradient(self.state_transitions)
chol_initial_covariance = tf.stop_gradient(self.cholesky_initial_covariance)
chol_process_covariances = tf.stop_gradient(self.cholesky_process_covariances)
state_offsets = tf.stop_gradient(self.state_offsets)
return StateSpaceModel(
initial_mean,
chol_initial_covariance,
state_transitions,
state_offsets,
chol_process_covariances,
)
def create_trainable_copy(self) -> "StateSpaceModel":
"""
Create a trainable version of this state space model.
This is primarily for use with variational approaches where we want to optimise
the parameters of a state space model that is initialised from a prior state space model.
The initial mean and state transitions are the same.
The initial and process covariances are 'flattened'. Since they are lower triangular, we
only want to parametrise this part of the matrix. For this purpose we use the
`params.triangular` constraint which is the `tfp.bijectors.FillTriangular` bijector that
converts between a triangular matrix :math:`[dim, dim]` and a flattened vector of
shape :math:`[dim (dim + 1) / 2]`.
:return: A state space model that is a copy of this one and a dataclass containing the
variables that can be trained.
"""
trainable_ssm = StateSpaceModel(
initial_mean=Parameter(self._mu_0, name="initial_mean"),
chol_initial_covariance=Parameter(
self._chol_P_0, transform=triangular(), name="chol_initial_covariance"
),
state_transitions=Parameter(self._A_s, name="state_transitions"),
state_offsets=Parameter(self._b_s, name="state_offsets"),
chol_process_covariances=Parameter(
self._chol_Q_s, transform=triangular(), name="chol_process_covariances"
),
)
# check that the state space models are the same
check_compatible(trainable_ssm, self)
return trainable_ssm
def _build_precision(self) -> SymmetricBlockTriDiagonal:
"""
Compute the compact banded representation of the Precision matrix using state space model
parameters.
We construct matrix:
K⁻¹ = A⁻ᵀQ⁻¹A⁻¹
Using Q⁻¹ and A⁻¹ defined in equations (6) and (8) in the SpInGP paper (see class docstring)
It can be shown that
K⁻¹ = | P₀⁻¹ + A₁ᵀ Q₁⁻¹ A₁ | -A₁ᵀ Q₁⁻¹ | 0...
| -Q₁⁻¹ A₁ | Q₁⁻¹ + A₂ᵀ Q₂⁻¹ A₂ | -A₂ᵀ Q₂⁻¹ | 0...
| 0 | -Q₂⁻¹ A₂ | Q₂⁻¹ + A₃ᵀ Q₃⁻¹ A₃ | -A₃ᵀ Q₃⁻¹| 0...
....
:return: The precision as a `SymmetricBlockTriDiagonal` object
"""
# [Q₁⁻¹A₁, Q₂⁻¹A₂, ....Qₙ⁻¹Aₙ]
# [... num_transitions, state_dim, state_dim]
inv_q_a = tf.linalg.cholesky_solve(self._chol_Q_s, self._A_s)
# [A₁ᵀQ₁⁻¹A₁, A₂ᵀQ₂⁻¹A₂, .... AₙQₙ⁻¹Aₙᵀ]
# [... num_transitions, state_dim, state_dim]
aqa = tf.matmul(self._A_s, inv_q_a, transpose_a=True)
# need to pad aqa to make it the same length
# [... 1, state_dim, state_dim]
padding_zeros = tf.zeros_like(self.cholesky_initial_covariance, dtype=default_float())[
..., None, :, :
]
# Calculate [P₀⁻¹, Q₁⁻¹, Q₂⁻¹, ....]
# First create the identities
# [... num_transitions, state_dim, state_dim]
identities = tf.eye(
self.state_dim,
dtype=default_float(),
batch_shape=tf.concat([self.batch_shape, self.event_shape[:-1]], axis=0),
)
# now use cholesky solve with the identities to create [P₀⁻¹, Q₁⁻¹, Q₂⁻¹, ....]
# [... num_transitions + 1, state_dim, state_dim]
concatted_inv_q_s = tf.linalg.cholesky_solve(
self.concatenated_cholesky_process_covariance, identities
)
# [P₀⁻¹ + A₁ᵀQ₁⁻¹A₁, Q₁⁻¹ + A₂ᵀQ₂⁻¹A₂, .... Qₙ₋₁⁻¹ + AₙQₙ⁻¹Aₙᵀ, Qₙ⁻¹]
# [... num_transitions + 1, state_dim, state_dim]
diag = concatted_inv_q_s + tf.concat([aqa, padding_zeros], axis=-3)
shape = tf.shape(self.concatenated_cholesky_process_covariance)
tf.debugging.assert_equal(tf.shape(diag), shape)
return SymmetricBlockTriDiagonal(diag, -inv_q_a)
def _log_pdf_factors(self, states: tf.Tensor) -> tf.Tensor:
"""
Return the value of the log of the factors of the probability density function (PDF)
evaluated at a state trajectory::
[log p(x₀), log p(x₁|x₀), ..., log p(xₖ₊₁|xₖ)]
...with x₀ ~ 𝓝(μ₀, P₀) and xₖ₊₁|xₖ ~ 𝓝(Aₖ xₖ + bₖ, Qₖ)
:states: The state trajectory has shape:
sample_shape + self.batch_shape + self.event_shape
:return: The log PDF of the factors with shape:
sample_shape + self.batch_shape + [self.num_transitions + 1]
"""
tf.debugging.assert_equal(tf.shape(states)[-2:], self.event_shape)
# log p(x₀)
initial_pdf = tfd.MultivariateNormalTriL(
loc=self.initial_mean, scale_tril=self.cholesky_initial_covariance,
).log_prob(states[..., 0, :])
# [A₁ x₀ + b₁, A₂ x₁ + b₂, ..., Aₖ₊₁ μₖ + bₖ₊₁]
conditional_means = tf.matmul(self._A_s, states[..., :-1, :, None])[..., 0] + self._b_s
# [log p(x₁|x₀), ..., etc]
remaining_pdfs = tfd.MultivariateNormalTriL(
loc=conditional_means, scale_tril=self.cholesky_process_covariances
).log_prob(states[..., 1:, :])
return tf.concat([initial_pdf[..., None], remaining_pdfs], axis=-1)
def log_pdf(self, states) -> tf.Tensor:
"""
Return the value of the log of the probability density function (PDF)
evaluated at states. That is:
.. math:: log p(x) = log p(x₀) + Σₖ log p(xₖ₊₁|xₖ) (for 0 ⩽ k < n)
:param states: The state trajectory, with shape
``sample_shape + self.batch_shape + self.event_shape``.
:return: The log PDF, with shape ``sample_shape + self.batch_shape``.
"""
return tf.reduce_sum(self._log_pdf_factors(states), axis=-1)
def kl_divergence(self, dist: GaussMarkovDistribution) -> tf.Tensor:
r"""
Return the KL divergence of the current Gauss-Markov distribution from the specified
input `dist`. That is:
.. math:: KL(dist₁ ∥ dist₂)
To do so we first compute the marginal distributions from the Gauss-Markov form:
.. math::
dist₁ = 𝓝(μ₁, P⁻¹₁)\\
dist₂ = 𝓝(μ₂, P⁻¹₂)
...where:
* :math:`μᵢ` are the marginal means
* :math:`Pᵢ` are the banded precisions
The KL divergence is thus given by:
.. math::
KL(dist₁ ∥ dist₂) = ½(tr(P₂P₁⁻¹) + (μ₂ - μ₁)ᵀP₂(μ₂ - μ₁) - N - log(|P₂|) + log(|P₁|))
...where :math:`N = (\verb |num_transitions| + 1) * \verb |state_dim|` (that is,
the dimensionality of the Gaussian).
:param dist: Another similarly parameterised Gauss-Markov distribution.
:return: A tensor of the KL divergences, with shape ``self.batch_shape``.
"""
check_compatible(self, dist)
batch_shape = self.batch_shape
marginal_covs_1 = self.marginal_covariances
precision_2 = dist.precision
# trace term, we use that for any trace tr(AᵀB) = Σᵢⱼ Aᵢⱼ Bᵢⱼ
# and since the P₂ is symmetric block tri diagonal, we only need the block diagonal and
# block sub diagonals from from P₁⁻¹
# this is the sub diagonal of P₁⁻¹, [..., num_transitions, state_dim, state_dim]
subsequent_covs_1 = self.subsequent_covariances(marginal_covs_1)
# trace_sub_diag must be added twice as the matrix is symmetric, [...]
trace = tf.reduce_sum(
input_tensor=precision_2.block_diagonal * marginal_covs_1, axis=[-3, -2, -1]
) + 2.0 * tf.reduce_sum(
input_tensor=precision_2.block_sub_diagonal * subsequent_covs_1, axis=[-3, -2, -1]
)
tf.debugging.assert_equal(tf.shape(trace), batch_shape)
# (μ₂ - μ₁)ᵀP₂(μ₂ - μ₁)
# [... num_transitions + 1, state_dim]
mean_diff = dist.marginal_means - self.marginal_means
# if P₂ = LLᵀ, calculate [Lᵀ(μ₂ - μ₁)] [... num_transitions + 1, state_dim]
l_mean_diff = precision_2.cholesky.dense_mult(mean_diff, transpose_left=True)
mahalanobis = tf.reduce_sum(input_tensor=l_mean_diff * l_mean_diff, axis=[-2, -1]) # [...]
tf.debugging.assert_equal(tf.shape(mahalanobis), batch_shape)
dim = (self.num_transitions + 1) * self.state_dim
dim = tf.cast(dim, default_float())
k_l = 0.5 * (
trace + mahalanobis - dim - dist.log_det_precision() + self.log_det_precision()
)
tf.debugging.assert_equal(tf.shape(k_l), batch_shape)
return k_l
def normalizer(self):
"""
Conputes the normalizer
Page 36 of Thang Bui
:return:
"""
dim = (self.num_transitions + 1) * self.state_dim
dim = tf.cast(dim, default_float())
cst = dim * np.log(2.0 * np.pi)
log_det = -self.log_det_precision()
# if P₂ = LLᵀ, calculate [Lᵀ(μ₂ - μ₁)] [... num_transitions + 1, state_dim]
l_mean = self.precision.cholesky.dense_mult(self.marginals[0], transpose_left=True)
mahalanobis = tf.reduce_sum(input_tensor=l_mean * l_mean, axis=[-2, -1]) # [...]
return 0.5 * (cst + log_det + mahalanobis)
@tf_scope_fn_decorator
def state_space_model_from_covariances(
initial_mean: tf.Tensor,
initial_covariance: tf.Tensor,
state_transitions: tf.Tensor,
state_offsets: tf.Tensor,
process_covariances: tf.Tensor,
) -> StateSpaceModel:
"""
Construct a state space model using the full covariance matrices for convenience.
:param initial_mean: The initial mean, with shape ``batch_shape + [state_dim]``.
:param initial_covariance: Initial covariance, with shape
``batch_shape + [state_dim, state_dim]``.
:param state_transitions: State transition matrices, with shape
``batch_shape + [num_transitions, state_dim, state_dim]``.
:param state_offsets: The process means :math:`bₖ`, with shape
``batch_shape + [num_transitions, state_dim]``.
:param process_covariances: Noise covariance matrices, with shape
``batch_shape + [num_transitions, state_dim, state_dim]``.
"""
def cholesky_or_zero(covariance: tf.Tensor) -> tf.Tensor:
"""
This function takes a number of covariance matrices which have been stacked in the batch
dimensions and, for each matrix if it non-zero computes the Cholesky of the matrix,
otherwise leaves as-is (i.e. a matrix of zeros).
:param covariance: tiled covariance matrices, shape
batch_shape + [dim, dim]
:return: tiled matrices each of which is either a Cholesky, or Zero matrix, shape
batch_shape + [dim, dim]
"""
zeros = tf.zeros_like(covariance)
tf.debugging.assert_greater_equal(tf.size(covariance), 1)
mask = tf.reduce_all(tf.math.equal(covariance, zeros), axis=(-2, -1))
dim = covariance.shape[-1]
mask_expanded = tf.stack([tf.stack([mask] * dim, axis=-1)] * dim, axis=-1)
batch_identity = tf.broadcast_to(tf.eye(dim, dtype=default_float()), tf.shape(covariance))
# As all arguments to tf.where are evaluated we need to make sure the Cholesky does not
# fail, even if it is unused. This is all the following line does, is does not affect the
# computation.
fix = tf.where(mask_expanded, batch_identity, tf.zeros_like(batch_identity))
return tf.where(mask_expanded, zeros, tf.linalg.cholesky(covariance + fix))
return StateSpaceModel(
initial_mean=initial_mean,
chol_initial_covariance=cholesky_or_zero(initial_covariance),
state_transitions=state_transitions,
state_offsets=state_offsets,
chol_process_covariances=cholesky_or_zero(process_covariances),
)
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
383,
2940,
709,
11125,
25767,
669,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
1... | 2.119453 | 12,440 |
import os, sys
thisdir = os.path.dirname(__file__)
libdirs = []
libdirs.append(os.path.join(thisdir, '../'))
libdirs.append(os.path.join(thisdir, '../../crypt'))
libdirs.append(os.path.join(thisdir, '../../config'))
libdirs.append(os.path.join(thisdir, '../../persist'))
for libdir in libdirs:
if libdir not in sys.path:
sys.path.insert(0, libdir)
#print(sys.path) | [
11748,
28686,
11,
25064,
198,
5661,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
8019,
15908,
82,
796,
17635,
198,
8019,
15908,
82,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
5661,
15908,
11,
705,
40720,
... | 2.319018 | 163 |
from typing import List, Union
import rdflib
from prefixcommons.curie_util import contract_uri, expand_uri, default_curie_maps
from rdflib import Namespace, URIRef
from rdflib.namespace import RDF, RDFS, OWL
from kgx.utils.kgx_utils import get_toolkit
toolkit = get_toolkit()
m = toolkit.generator.mappings
x = set()
mapping = {}
for key, value in m.items():
k = expand_uri(key)
v = toolkit.get_by_mapping(key)
if k == key:
x.add(key)
else:
mapping[k] = v
OBAN = Namespace('http://purl.org/oban/')
BIOLINK = Namespace('http://w3id.org/biolink/vocab/')
predicate_mapping = {
'http://purl.obolibrary.org/obo/RO_0002200' : 'has phenotype',
'http://purl.obolibrary.org/obo/RO_0000091' : 'has disposition',
'http://purl.obolibrary.org/obo/RO_0003303' : 'causes condition',
'http://purl.obolibrary.org/obo/RO_0002525' : 'is subsequence of',
OWL.sameAs.lower() : 'same_as',
OWL.equivalentClass.lower() : 'same_as',
RDFS.subClassOf.lower() : 'subclass_of',
'http://www.w3.org/2000/01/rdf-schema#subPropertyOf' : 'subclass_of',
}
predicate_mapping.update(
{
'{}{}'.format(BIOLINK, n) : n
for n in
[x.replace(',', '').replace(' ', '_') for x in toolkit.descendents('related to')]
}
)
predicate_mapping.update(mapping)
category_mapping = {
# subclasses mapped onto their superclasses:
"http://purl.obolibrary.org/obo/SO_0000405" : "sequence feature",
"http://purl.obolibrary.org/obo/SO_0000001" : "sequence feature",
"http://purl.obolibrary.org/obo/SO_0000100" : "sequence feature",
"http://purl.obolibrary.org/obo/SO_0000336" : "sequence feature",
"http://purl.obolibrary.org/obo/SO_0000340" : "sequence feature",
"http://purl.obolibrary.org/obo/SO_0000404" : "transcript",
"http://purl.obolibrary.org/obo/SO_0000460" : "sequence feature",
"http://purl.obolibrary.org/obo/SO_0000651" : "transcript",
"http://purl.obolibrary.org/obo/SO_0000655" : "transcript",
#?
"http://purl.obolibrary.org/obo/SO_0001217" : "gene",
"http://purl.obolibrary.org/obo/GENO_0000002" : "sequence variant",
'http://purl.obolibrary.org/obo/UPHENO_0001002' : 'phenotypic feature',
# Taken from the yaml
"http://purl.obolibrary.org/obo/CL_0000000" : "cell",
"http://purl.obolibrary.org/obo/UBERON_0001062" : "anatomical entity",
"http://purl.obolibrary.org/obo/ZFA_0009000" : "cell",
"http://purl.obolibrary.org/obo/UBERON_0004529" : "anatomical projection",
"http://purl.obolibrary.org/obo/UBERON_0000468" : "multi-cellular organism",
"http://purl.obolibrary.org/obo/UBERON_0000955" : "brain",
"http://purl.obolibrary.org/obo/PATO_0000001" : "quality",
"http://purl.obolibrary.org/obo/GO_0005623" : "cell",
"http://purl.obolibrary.org/obo/WBbt_0007833" : "organism",
"http://purl.obolibrary.org/obo/WBbt_0004017" : "cell",
"http://purl.obolibrary.org/obo/MONDO_0000001" : "disease",
"http://purl.obolibrary.org/obo/PATO_0000003" : "assay",
"http://purl.obolibrary.org/obo/PATO_0000006" : "process",
"http://purl.obolibrary.org/obo/PATO_0000011" : "age",
"http://purl.obolibrary.org/obo/ZFA_0000008" : "brain",
"http://purl.obolibrary.org/obo/ZFA_0001637" : "bony projection",
"http://purl.obolibrary.org/obo/WBPhenotype_0000061" : "extended life span",
"http://purl.obolibrary.org/obo/WBPhenotype_0000039" : "life span variant",
"http://purl.obolibrary.org/obo/WBPhenotype_0001171" : "shortened life span",
"http://purl.obolibrary.org/obo/CHEBI_23367" : "molecular entity",
"http://purl.obolibrary.org/obo/CHEBI_23888" : "drug",
"http://purl.obolibrary.org/obo/CHEBI_51086" : "chemical role",
"http://purl.obolibrary.org/obo/UPHENO_0001001" : "phenotypic feature",
"http://purl.obolibrary.org/obo/GO_0008150" : "biological_process",
"http://purl.obolibrary.org/obo/GO_0005575" : "cellular component",
"http://purl.obolibrary.org/obo/SO_0000704" : "gene",
"http://purl.obolibrary.org/obo/SO_0000110" : "sequence feature",
"http://purl.obolibrary.org/obo/GENO_0000536" : "genotype",
}
category_mapping.update(mapping)
category_mapping.update(
{
'{}{}'.format(BIOLINK, n.replace(',', '').title().replace(' ', '')) : n
for n in
toolkit.descendents('named thing')
}
)
property_mapping = {
OBAN.association_has_subject : 'subject',
OBAN.association_has_object : 'object',
OBAN.association_has_predicate : 'predicate',
BIOLINK.name : 'name',
RDFS.label : 'name',
RDF.type : 'type',
URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type') : 'type',
# Definition being treated as a description
BIOLINK.description : 'description',
URIRef('http://purl.obolibrary.org/obo/IAO_0000115') : 'description',
URIRef('http://purl.org/dc/elements/1.1/description') : 'description',
BIOLINK.has_evidence : 'has_evidence',
URIRef('http://purl.obolibrary.org/obo/RO_0002558') : 'has_evidence',
BIOLINK.synonym : 'synonym',
URIRef('http://www.geneontology.org/formats/oboInOwl#hasExactSynonym') : 'synonym',
OWL.sameAs : 'same_as',
OWL.equivalentClass : 'same_as',
BIOLINK.in_taxon : 'in_taxon',
URIRef('http://purl.obolibrary.org/obo/RO_0002162') : 'in_taxon',
}
is_property_multivalued = {
'subject' : False,
'object' : False,
'edge_label' : False,
'description' : False,
'synonym' : True,
'in_taxon' : False,
'same_as' : True,
'name' : False,
'has_evidence' : False,
'provided_by' : True,
'category' : True,
'publications' : True,
'type' : False,
}
def reverse_mapping(d:dict):
"""
Returns a dictionary where the keys are the values of the given dictionary,
and the values are sets of keys from the given dictionary.
"""
return {value : set(k for k, v in d.items() if v == value) for value in d.items()}
cmaps = [{
'OMIM' : 'https://omim.org/entry/',
'HGNC' : 'http://identifiers.org/hgnc/',
'DRUGBANK' : 'http://identifiers.org/drugbank:',
'biolink' : 'http://w3id.org/biolink/vocab/',
}, {'DRUGBANK' : 'http://w3id.org/data2services/data/drugbank/'}] + default_curie_maps
def contract(uri:URIRef) -> str:
"""
We sort the curies to ensure that we take the same item every time
"""
curies = contract_uri(str(uri), cmaps=cmaps)
if len(curies) > 0:
curies.sort()
return curies[0]
return None
def process_iri(iri:Union[str, URIRef]) -> str:
"""
Casts iri to a string, and then checks whether it maps to any pre-defined
values. If so returns that value, otherwise converts that iri to a curie
and returns.
"""
mappings = [
predicate_mapping,
category_mapping,
property_mapping,
]
for mapping in mappings:
for key, value in mapping.items():
if iri.lower() == key.lower():
return value
return iri
reverse_category_mapping = reverse_mapping(category_mapping)
def walk(node_iri:URIRef, next_node_generator):
"""
next_node_generator is a function that takes an iri and returns a generator for iris.
next_node_generator might return Tuple[iri, int], in which case int is taken to be
the score of the edge. If no score is returned, then the score will be
taken to be zero.
"""
if not isinstance(node_iri, URIRef):
node_iri = URIRef(node_iri)
to_visit = {node_iri : 0} # Dict[URIRef, Integer]
visited = {} # Dict[URIRef, Integer]
while to_visit != {}:
iri, score = to_visit.popitem()
visited[iri] = score
for t in next_node_generator(iri):
if isinstance(t, tuple) and len(t) > 1:
n, s = t
else:
n, s = t, 0
if n not in visited:
to_visit[n] = score + s
yield n, to_visit[n]
equals_predicates = [k for k, v in property_mapping.items() if v == 'same_as']
isa_predicates = [RDFS.subClassOf, RDF.type]
def find_category(iri:URIRef, rdfgraphs:List[rdflib.Graph]) -> str:
"""
Finds a category for the given iri, by walking up edges with isa predicates
and across edges with identity predicates.
Tries to get a category in category_mapping. If none are found then takes
the highest superclass it can find.
"""
if not isinstance(rdfgraphs, (list, tuple, set)):
rdfgraphs = [rdfgraphs]
if not isinstance(iri, URIRef):
iri = URIRef(iri)
def super_class_generator(iri:URIRef) -> URIRef:
"""
Generates nodes and scores for walking a path from the given iri to its
superclasses. Equivalence edges are weighted zero, since they don't count
as moving further up the ontological hierarchy.
Note: Not every node generated is gaurenteed to be a superclass
"""
ignore = [
'http://www.w3.org/2002/07/owl#Class',
'http://purl.obolibrary.org/obo/HP_0000001'
]
for rdfgraph in rdfgraphs:
for predicate in equals_predicates:
if not isinstance(predicate, URIRef):
predicate = URIRef(predicate)
for equivalent_iri in rdfgraph.subjects(predicate=predicate, object=iri):
if str(equivalent_iri) not in ignore:
yield equivalent_iri, 0
for equivalent_iri in rdfgraph.objects(subject=iri, predicate=predicate):
if str(equivalent_iri) not in ignore:
yield equivalent_iri, 0
for predicate in isa_predicates:
if not isinstance(predicate, URIRef):
predicate = URIRef(predicate)
for superclass_iri in rdfgraph.objects(subject=iri, predicate=predicate):
if str(superclass_iri) not in ignore:
yield superclass_iri, 1
best_iri, best_score = None, 0
for uri_ref, score in walk(iri, super_class_generator):
if str(uri_ref) in category_mapping and score > 0:
return category_mapping[str(uri_ref)]
elif score > best_score:
best_iri, best_score = str(uri_ref), score
return best_iri
| [
6738,
19720,
1330,
7343,
11,
4479,
198,
198,
11748,
374,
67,
2704,
571,
198,
6738,
21231,
9503,
684,
13,
22019,
494,
62,
22602,
1330,
2775,
62,
9900,
11,
4292,
62,
9900,
11,
4277,
62,
22019,
494,
62,
31803,
198,
6738,
374,
67,
2704,... | 2.281772 | 4,493 |
#!/usr/bin/env python
import sys
if __name__ == '__main__':
book1 = Book()
print('showing defaults:')
print(str(book1) + '\n')
book1.title = 'Animal farm'
book1.author = 'George Orwell'
book1.year = 1945
print(str(book1) + '\n')
book2 = Book('Alice in Wonderland', 'Lewis Carroll', 1865)
print(str(book2) + '\n')
try:
book3 = Book(1984, 'George Orwell', 1948)
except TypeError as error:
print(f'### error: {error}', file=sys.stderr)
sys.exit(1)
sys.exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1492,
16,
796,
4897,
3419,
198,
220,
220,
220,
3601,
10786,
1477,
7855,
... | 2.253165 | 237 |
from PythonLinearNonlinearControl.models.cartpole \
import CartPoleModel # NOQA
from PythonLinearNonlinearControl.models.first_order_lag \
import FirstOrderLagModel # NOQA
from PythonLinearNonlinearControl.models.two_wheeled \
import TwoWheeledModel # NOQA | [
6738,
11361,
14993,
451,
15419,
29127,
15988,
13,
27530,
13,
26674,
36869,
3467,
198,
220,
220,
220,
1330,
13690,
47,
2305,
17633,
220,
1303,
8005,
48,
32,
198,
6738,
11361,
14993,
451,
15419,
29127,
15988,
13,
27530,
13,
11085,
62,
287... | 3.044944 | 89 |
import numpy as np
from collections import defaultdict
from .utils import *
def semi_gradient_td0(env, pi, vfunc, vfunc_deriv, w,
gamma=1, alpha=0.1, N_episodes=1000,
ep_max_length=1000, alpha_decay=decay_none):
"""Evaluates state-value function with semi-gradient TD(0)
Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 203
Args:
env: Environment
pi: Policy
vfunc: Value function
vfunc_deriv: Derivative of value function
w: Weights
gamma: Discount factor
alpha: Step size
N_episodes: Run this many episodes
ep_max_length: Force termination of episode after this number of steps
alpha_decay: Decay function for alpha, default no decay
Returns:
w: Weights for state-value function
"""
for i_episode in range(N_episodes):
print("\r> Semi-gradient TD(0): Episode {}/{}".format(
i_episode+1, N_episodes), end="")
alpha_i = alpha_decay(alpha, i_episode, N_episodes)
state = env.reset()
done = False
steps = 0
while not done and steps < ep_max_length:
action = select_action_policy(pi, state)
state_new, reward, done, info = env.step(action)
v = vfunc(state, w)
dv = vfunc_deriv(state, w)
v_new = 0 if done else vfunc(state_new, w)
w += alpha_i*(reward + gamma*v_new - v) * dv
state = state_new
steps +=1
print()
return w
| [
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
764,
26791,
1330,
1635,
628,
198,
4299,
10663,
62,
49607,
62,
8671,
15,
7,
24330,
11,
31028,
11,
410,
20786,
11,
410,
20786,
62,
1082,
452,
11,
266,
11,
... | 2.23607 | 682 |
# Return the remainder
# remainder_lambda = lambda x, y: x % y
if __name__ == "__main__":
print(remainder(1, 3))
| [
2,
8229,
262,
17675,
628,
198,
198,
2,
17675,
62,
50033,
796,
37456,
2124,
11,
331,
25,
2124,
4064,
331,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,
2787,
391,
1082,
7,
16,
11,
... | 2.574468 | 47 |
#!/usr/bin/env python
"""
This is a sentence chunker based on classifier , to assertian the
worth of this method we can always evaluate the performance of the classifier by passing a trained
tagged sentence like this
>> chunker = ConsecutiveNPChunker(train_sents)
>> print chunker.evaluate(test_sents)
For training the classifier, we use the coll2000 collections
which has over 750k words in it. We split this in to parts
train and test, and with that we train all the classifiers
"""
import nltk
import re
"""
The classifier based chunker
"""
# The classifier based tagger
# The classifier based chunker
"""
The unigram NP chunker
"""
# The bigram NP chunker
# The trigram NP chunker
TWO_RULE_GRAMMER = r"""
NP: {<DT|PP\$>?<JJ>*<NN>} # chunk determiner/posessive, adjective and nouns
{<NNP>+} # chunk sequences of proper nouns
{<NN>+}
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
1212,
318,
257,
6827,
16058,
263,
1912,
319,
1398,
7483,
837,
284,
6818,
666,
262,
198,
9268,
286,
428,
2446,
356,
460,
1464,
13446,
262,
2854,
286,
262,
1398,
7483,
416,
... | 2.795732 | 328 |
import numpy as np
import cv2 as cv
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.html
img = cv.imread('square.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(gray,127,255,1)
contours,h = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
approx = cv.approxPolyDP(cnt,0.01*cv.arcLength(cnt,True),True)
if len(approx)==4:
M = cv.moments(cnt)
if (M['m00'] > 10):
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print "square"
cv.drawContours(img,[cnt],0,(0,0,255),-1)
print("Shape: Square, Area: %f, Centroid:(%f, %f)" %(M['m00'], cx, cy))
# elif len(approx)==5:
# print "pentagon"
# cv.drawContours(img,[cnt],0,255,-1)
# elif len(approx)==3:
# print "triangle"
# cv.drawContours(img,[cnt],0,(0,255,0),-1)
# elif len(approx) == 9:
# print "half-circle"
# cv.drawContours(img,[cnt],0,(255,255,0),-1)
# elif len(approx) > 15:
# M = cv.moments(cnt)
# if (M['m00'] > 10):
# cx = int(M['m10']/M['m00'])
# cy = int(M['m01']/M['m00'])
#
# print "square"
# cv.drawContours(img,[cnt],0,(0,0,255),-1)
# print("Shape: Circle, Area: %f, Centroid:(%f, %f)" %(M['m00'], cx, cy))
#
cv.imwrite('shapedetection_square.jpg',img)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
2,
3740,
1378,
9654,
33967,
12,
29412,
12,
83,
315,
305,
874,
13,
961,
83,
704,
420,
82,
13,
952,
14,
268,
14,
42861,
14,
9078,
62,
83,
44917,
82,
14,
... | 1.809353 | 834 |
from opengmcore import *
#import version
from __version__ import version
import inference
import hdf5
import sys
import types
configuration=OpengmConfiguration()
| [
6738,
1280,
39870,
7295,
1330,
1635,
198,
2,
11748,
2196,
220,
198,
6738,
11593,
9641,
834,
1330,
2196,
628,
198,
11748,
32278,
198,
11748,
289,
7568,
20,
198,
11748,
25064,
198,
11748,
3858,
198,
198,
11250,
3924,
28,
18257,
1516,
76,
... | 3.032258 | 62 |
import setka
import torch
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import tiny_model
import test_dataset
from test_metrics import tensor_loss as loss | [
11748,
900,
4914,
198,
11748,
28034,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
... | 2.84 | 75 |
import csv
from pathlib import Path
import configure
def csv_to_md_table(input, output, align='center', brow='true', bcol='false'):
'''Converts a CSV file to a Markdown table.
* `input` - File path string
* `output` - File path string
* `align` - `'left'`, `'right'`, `'center'` (default)
* `brow` - Bold the first row `'true'` (default), `'false'`
* `bcol` - Bold the first column `'true'`, `'false'` (default)
'''
data = None
headers = None
hdiv = None
first_key = None
pipe = '|'
nl = '\n'
bold = '**'
space = ' '
left = ':-----|'
right = '-----:|'
center = ':----:|'
try:
# Set header row alingment div.
if align == 'center':
hdiv = center
elif align == 'left':
hdiv = left
elif align == 'right':
hdiv = right
else:
raise Exception("The `align` argument must be 'left', 'right', or 'center'.")
print(f'File in: %s' % input)
# Create a list of dictionaries.
with Path(input).open('r', encoding='utf-8') as source:
reader = csv.DictReader(source)
data = [rows for rows in reader]
# Get the header row column names.
headers = list(data[0].keys())
# Get the dict key for the first column.
first_key = headers[0]
print(first_key)
# Write to MD file.
with Path(output).open('w', encoding='utf-8') as save:
save.write(pipe)
for h in headers:
if brow == 'true':
save.write(space + bold + h + bold + space + pipe)
else:
save.write(space + h + space + pipe)
save.write(nl)
save.write(pipe)
for _i in range(len(headers)):
save.write(hdiv)
save.write(nl)
for d in data:
save.write(pipe)
for h in headers:
if bcol == 'true' and h == first_key:
save.write(space + bold + d[h] + bold + space + pipe)
else:
save.write(space + d[h] + space + pipe)
save.write(nl)
print(f'File out: %s' % output)
except Exception as e:
print(f'Error: %s' % input)
print(e)
return
# -----------------------------------
# Tasks
# -----------------------------------
csv_to_md_table(configure.CSV_THK, configure.MD_THK, 'center', 'true', 'false')
csv_to_md_table(configure.CSV_FAN_2, configure.MD_FAN_2, 'center', 'true', 'false')
csv_to_md_table(configure.CSV_FAN_3, configure.MD_FAN_3, 'center', 'true', 'false')
csv_to_md_table(configure.CSV_FAN_4, configure.MD_FAN_4, 'center', 'true', 'false')
csv_to_md_table(configure.CSV_FAN_5, configure.MD_FAN_5, 'center', 'true', 'false')
| [
11748,
269,
21370,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
17425,
198,
198,
4299,
269,
21370,
62,
1462,
62,
9132,
62,
11487,
7,
15414,
11,
5072,
11,
10548,
11639,
16159,
3256,
4772,
11639,
7942,
3256,
275,
4033,
11639,
9562,
6,
... | 2.333633 | 1,112 |