content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Need to split up to avoid duplication
# The below function would be used to find occurrences where the source word is in the translation text.
'''
def search_single_word(self, language, word):
# Translate single word
found_words = ""
if language == '-en':
for dict_word in self.wordlist:
if word.lower() in dict_word[1].lower():
if found_words == "":
found_words = dict_word[0]
else:
found_words = found_words + ", " + dict_word[0]
elif language == '-ar':
for dict_word in self.wordlist:
if word.lower() in dict_word[0].lower():
if found_words == "":
found_words = dict_word[1]
else:
found_words = found_words + ", " + dict_word[1]
if found_words == "":
return "Unable to find word"
else:
return found_words
'''
| [
628,
198,
2,
10664,
284,
6626,
510,
284,
3368,
50124,
198,
2,
383,
2174,
2163,
561,
307,
973,
284,
1064,
40279,
810,
262,
2723,
1573,
318,
287,
262,
11059,
2420,
13,
198,
7061,
6,
198,
220,
220,
220,
825,
2989,
62,
29762,
62,
4775... | 1.95057 | 526 |
import pandas as pd
csv_file = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data',header=None)
df = csv_file[[0,len(csv_file.columns)-1]]
df.columns = ['label', 'value']
grouped_by_state = df.groupby('label')
aa = grouped_by_state['value'].sum()/grouped_by_state['value'].count()
final_frame = pd.DataFrame([aa.index,aa.values],).transpose()
final_frame.columns=['label','svalue']
final_frame.to_excel("usa.xlsx",index=False)
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
40664,
62,
7753,
796,
279,
67,
13,
961,
62,
40664,
10786,
4023,
1378,
17474,
13,
873,
13,
42008,
13,
15532,
14,
4029,
14,
30243,
12,
40684,
12,
19608,
18826,
14,
10709,
871,
14,
10709,
871,... | 2.589189 | 185 |
import pytest
from vetiver import mock, VetiverModel, VetiverAPI
from fastapi.testclient import TestClient
| [
11748,
12972,
9288,
198,
198,
6738,
20202,
1428,
1330,
15290,
11,
49744,
1428,
17633,
11,
49744,
1428,
17614,
198,
6738,
3049,
15042,
13,
9288,
16366,
1330,
6208,
11792,
628,
628
] | 3.7 | 30 |
import os
import struct
import sys
from __pypy__ import time
if sys.platform.startswith("linux"):
| [
11748,
28686,
198,
11748,
2878,
198,
11748,
25064,
198,
6738,
11593,
79,
4464,
88,
834,
1330,
640,
628,
198,
361,
25064,
13,
24254,
13,
9688,
2032,
342,
7203,
23289,
1,
2599,
198
] | 3.125 | 32 |
# asteroid game
from PIL import ImageFont, ImageDraw, Image
import cv2
import random
import numpy as np
import pathlib
from playsound import playsound
| [
2,
27460,
983,
220,
198,
198,
6738,
350,
4146,
1330,
7412,
23252,
11,
7412,
25302,
11,
7412,
198,
11748,
269,
85,
17,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
3108,
8019,
198,
198,
6738,
5341,
633,
1330,
5341,... | 2.233333 | 90 |
"""Python module to define and hold global variables."""
import tcod
context: tcod.context.Context # Active context.
console: tcod.console.Console # Active console.
| [
37811,
37906,
8265,
284,
8160,
290,
1745,
3298,
9633,
526,
15931,
198,
11748,
256,
19815,
198,
198,
22866,
25,
256,
19815,
13,
22866,
13,
21947,
220,
1303,
14199,
4732,
13,
198,
41947,
25,
256,
19815,
13,
41947,
13,
47581,
220,
1303,
... | 3.733333 | 45 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-22 09:05
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
2177,
12,
2919,
12,
1828,
7769,
25,
2713,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.690909 | 55 |
import pandas as pd
import numpy as np
from src.utils import (
load_config,
load_parameters,
)
from src.preprocessing_pipeline import impute_age
def test_impute_age():
"""Test the impute_age function"""
# Load in the test configuration & parameters
config = load_config(".env-test")
parameters = load_parameters(parameters_path=config["parameters_path"])
age_codes = (
parameters["pipeline_parameters"]["impute_age_kw_args"]
["age_codes"]
)
# Create the data
data = [
dict(id=1, col1="gen_male", col2=np.nan),
dict(id=2, col1="gen_female", col2=None),
dict(id=3, col1="young_female", col2=np.nan),
dict(id=4, col1="young_male", col2=None),
dict(id=5, col1="other_male", col2=np.nan),
dict(id=6, col1="other_female", col2=None),
dict(id=7, col1="gen_male", col2=12),
dict(id=8, col1="gen_female", col2=22),
dict(id=9, col1="young_female", col2=32),
dict(id=10, col1="young_male", col2=42),
dict(id=11, col1="other_male", col2=52),
dict(id=12, col1="other_female", col2=62),
]
df = pd.DataFrame(data).set_index("id", drop=True)
# Run the function
df_out = impute_age(
df=df,
source_column="col2",
title_cat_column="col1",
age_codes=age_codes
)
# Run the tests
assert df_out["col2"].loc[1] == 30
assert df_out["col2"].loc[2] == 35
assert df_out["col2"].loc[3] == 21
assert df_out["col2"].loc[4] == 5
assert df_out["col2"].loc[5] == 40
assert df_out["col2"].loc[6] == 50
assert df_out["col2"].loc[7] == 12
assert df_out["col2"].loc[8] == 22
assert df_out["col2"].loc[9] == 32
assert df_out["col2"].loc[10] == 42
assert df_out["col2"].loc[11] == 52
assert df_out["col2"].loc[12] == 62
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12351,
13,
26791,
1330,
357,
198,
220,
220,
220,
3440,
62,
11250,
11,
198,
220,
220,
220,
3440,
62,
17143,
7307,
11,
198,
8,
198,
6738,
12351,
13,
3866,... | 2.128176 | 866 |
# Import necessary packages
from __future__ import print_function
import os
import sys
import cv2
import numpy as np
# Create data matrix from a list of images
# Read images from the directory
# Add the weighted eigen faces to the mean face
if __name__ == '__main__':
# Number of EigenFaces
NUM_EIGEN_FACES = 10
# Maximum weight
MAX_SLIDER_VALUE = 255
# Directory containing images
dirName = "images"
# Read images
images = readImages(dirName)
# Size of images
sz = images[0].shape
# Create data matrix for PCA.
data = createDataMatrix(images)
# Compute the eigenvectors from the stack of images created
print("Calculating PCA ", end="...")
mean, eigenVectors = cv2.PCACompute(data, mean=None, maxComponents=NUM_EIGEN_FACES)
print ("DONE")
averageFace = mean.reshape(sz)
eigenFaces = [];
for eigenVector in eigenVectors:
eigenFace = eigenVector.reshape(sz)
eigenFaces.append(eigenFace)
# Create window for displaying Mean Face
cv2.namedWindow("Result", cv2.WINDOW_AUTOSIZE)
# Display result at 2x size
output = cv2.resize(averageFace, (0,0), fx=2, fy=2)
cv2.imshow("Result", output)
# Create Window for trackbars
cv2.namedWindow("Trackbars", cv2.WINDOW_AUTOSIZE)
sliderValues = []
# Create Trackbars
for i in xrange(0, NUM_EIGEN_FACES):
sliderValues.append(MAX_SLIDER_VALUE/2)
cv2.createTrackbar( "Weight" + str(i), "Trackbars", MAX_SLIDER_VALUE/2, MAX_SLIDER_VALUE, createNewFace)
# You can reset the sliders by clicking on the mean image.
cv2.setMouseCallback("Result", resetSliderValues);
print('''Usage:
Change the weights using the sliders
Click on the result window to reset sliders
Hit ESC to terminate program.''')
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
2,
17267,
3306,
10392,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
13610,
1366,
17593,
422,
257,
1351,
286,
4263,
... | 2.790997 | 622 |
from controllers._base_controller import app
from controllers.index_controller import *
from controllers.admin_controller import *
from controllers.user_controller import *
from controllers.api_controller import *
# FastAPIのルーティング用関数
app.add_api_route('/', index)
app.add_api_route('/admin', admin) # management view for administrator
app.add_api_route('/users', users) # management view for administrator
# app.add_api_route('/api/add-address', api_addaddress) # management view for administrator
# app.add_api_route('/api/download', api_download) # management view for administrator | [
6738,
20624,
13557,
8692,
62,
36500,
1330,
598,
198,
6738,
20624,
13,
9630,
62,
36500,
1330,
1635,
198,
6738,
20624,
13,
28482,
62,
36500,
1330,
1635,
198,
6738,
20624,
13,
7220,
62,
36500,
1330,
1635,
198,
6738,
20624,
13,
15042,
62,
... | 3.548193 | 166 |
# Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pjit partitioner with Mixture of Experts overrides."""
from typing import Any, Callable, Optional, Sequence, Union
from absl import logging
from flax import core as flax_core
import jax
import numpy as np
from t5x import adafactor
from t5x import partitioning as t5x_partitioning
from t5x import train_state as train_state_lib
from t5x.contrib.moe import training_utils
DataLayout = t5x_partitioning.DataLayout
FlaxOptimTrainState = train_state_lib.FlaxOptimTrainState
HardwareMesh = t5x_partitioning.HardwareMesh
InferenceState = train_state_lib.InferenceState
LogicalAxisRules = t5x_partitioning.LogicalAxisRules
PartitionSpec = t5x_partitioning.PartitionSpec
Pytree = Any
TrainState = train_state_lib.TrainState
class MoePjitPartitioner(t5x_partitioning.PjitPartitioner):
"""Pjit partitioner with overrides for Mixture of Experts support.
This MoE partitioner has two overrides relative to the default partitioner:
(1) It prepends an 'expert' axis to all MoE optimizer state terms, so that
they are sharded along the 'expert' axis; see get_logical_axes().
(2) In cases where model parallelism is used and the number of experts is less
than the number of devices, we treat the 'model' axis as a secondary data
axis. This allows us to decouple expert parallelism ('data' mesh axis)
from data parallelism ('data' and 'model' axes).
"""
def __init__(self,
num_experts: int,
num_partitions: Optional[int] = None,
model_parallel_submesh: Optional[HardwareMesh] = None,
params_on_devices: bool = True,
logical_axis_rules: Optional[LogicalAxisRules] = None,
state_filter_fn: Optional[Callable[[str], bool]] = None):
"""Configures the partitioner.
Args:
num_experts: Total number of experts across all devices.
num_partitions: Specifies the size of the model parallel submesh to be
automatically selected for the current topology. See
`model_parallel_submesh` for details on how this submesh is used.
Mutually exclusive with `model_parallel_submesh`.
model_parallel_submesh: 4-tuple that specifies the `(x, y, z, c)` submesh
model-parallel device tile -- an axis of accelerator parallelism
orthogonal to data parallelism. See t5x/partitioning.py for details.
This argument is mutually exclusive with `num_partitions`.
params_on_devices: Whether to keep the params on devices. If False, params
stay in the host memory.
logical_axis_rules: A priority-ordered sequence of KV tuples that maps
logical axis names to either `None` (not sharded), 'model' (to shard
across the model-parallel submesh), or 'data' (to shard across the
data-parallel submesh).
state_filter_fn: Function to identify which optimizer state axis rules
should be overridden to be sharded along the 'expert' axis. If None
(default), Adafactor expert sharding overrides are used.
"""
# If True, treat 'model' axis as secondary data axis.
self.two_data_axes = _override_model_axis(num_experts, num_partitions,
model_parallel_submesh)
if self.two_data_axes:
# Override num_partitions to repurpose the 'model' axis as a secondary
# data axis, along which only the batch is sharded. Experts will be
# replicated along this secondary data axis.
num_partitions = jax.device_count() // num_experts
# Override user specified model parallel submesh. Rely on T5X partitioning
# to determine new submesh from updated `num_partitions`.
logging.info(
'Overriding user specified `model_parallel_submesh`=%s to support '
'expert parallelism for updated `num_partitions`=%d',
model_parallel_submesh, num_partitions)
model_parallel_submesh = None
super().__init__(
num_partitions=num_partitions,
model_parallel_submesh=model_parallel_submesh,
params_on_devices=params_on_devices,
logical_axis_rules=logical_axis_rules)
self._state_filter_fn = state_filter_fn
def get_data_layout(self,
batch_size: Optional[int] = None,
host_index: Optional[int] = None) -> DataLayout:
"""Returns filled `DataLayout` based on the partitioned model layout.
Overrides default data layout in case were both mesh axes ('data' and
'model') are treated as data axes.
Args:
batch_size: If set, indicates the requested batch size. If not set, the
batch size is inferred from the layout.
host_index: Indicates the host index to use for the calculations, if not
set - use JAX-provided one. Should be in [0, num_hosts) interval and the
order should match the order of corresponding CPU devices in
`jax.devices()`.
Returns:
Filled `DataLayout` structure.
"""
if self.two_data_axes:
if host_index is not None:
raise NotImplementedError('Explicit host_index is not yet implemented.')
mesh_size = self._local_chunker.global_mesh.shape[
'data'] * self._local_chunker.global_mesh.shape['model']
batch_size = batch_size or mesh_size
if batch_size % mesh_size:
raise ValueError(
f'Batch size ({batch_size}) must be divisible by corresponding '
f'mesh size ({mesh_size}).')
num_shards = self._local_chunker.num_chunks['data']
if batch_size % num_shards:
raise ValueError(
f'Batch size ({batch_size}) must be divisible by number of '
f'replicas ({num_shards}).')
replica_id = self._local_chunker.get_local_chunk_info(
(batch_size,), ('data', 'model')).replica_id
return DataLayout(
batch_size=batch_size,
shard_id=self._local_chunker.chunk_ids['data'],
num_shards=num_shards,
is_first_host_in_replica_set=(replica_id == 0))
else:
return super().get_data_layout(batch_size, host_index)
def get_logical_axes(
self, train_state: Union[FlaxOptimTrainState, InferenceState]
) -> Union[FlaxOptimTrainState, InferenceState]:
"""Returns a copy of TrainState with Optional[AxisNames] as leaves.
Overrides the default logical axes by prepending the 'expert' axis to any
MoE optimizer state terms (identified by self._state_filter_fn) so they are
correctly sharded along the 'expert' axis.
Args:
train_state: Object holding all relevant training of inference state.
Returns:
State object matching structure of input train_state but with axis names
as leaves.
"""
logical_axes = train_state.as_logical_axes()
if isinstance(logical_axes, InferenceState):
# InferenceState does not contain any optimizer state, so we skip all
# expert partitioning overrides.
return logical_axes
else:
train_state: FlaxOptimTrainState
state_filter_fn = (
self._state_filter_fn or _infer_state_filter_fn(train_state))
if state_filter_fn is None:
# No state updates required.
return logical_axes
prepend_expert = lambda x: PartitionSpec( # pylint: disable=g-long-lambda
'expert',) + x if x else PartitionSpec('expert',)
optimizer_axes = logical_axes._optimizer # pylint: disable=protected-access
state_dict = flax_core.unfreeze(optimizer_axes.state_dict())
state_dict['state']['param_states'] = training_utils.tree_map_with_names(
prepend_expert, state_dict['state']['param_states'], state_filter_fn)
return train_state.restore_state(state_dict)
def partition(
self,
fn: Callable, # pylint: disable=g-bare-generic
in_axis_resources: Pytree,
out_axis_resources: Pytree,
static_argnums: Union[int, Sequence[int]] = (),
donate_argnums: Union[int, Sequence[int]] = ()
) -> t5x_partitioning.PjittedFnWithContext:
"""Partitions the computation using pjit.
Overrides the default pjit partitioning in cases where expert and data axes
are decoupled -- wherein we treat the 'model' axis as a secondary data axis.
Args:
fn: Function to partition.
in_axis_resources: Pytree of structure matching that of arguments to `fn`,
with all actual arguments replaced by resource assignment
specifications.
out_axis_resources: Like `in_axis_resources`, but specifies resource
assignment for function outputs.
static_argnums: Specifies which positional arguments to treat as static
(compile-time constant) in the partitioned function.
donate_argnums: Specifies which argument buffers are "donated" to the
computation.
Returns:
A partitioned version of the input function.
"""
if self.two_data_axes:
# Both axes are used for data parallelism in this case, so we override the
# partition specs.
in_axis_resources = _override_partition_specs(in_axis_resources)
out_axis_resources = _override_partition_specs(out_axis_resources)
pjitted = t5x_partitioning.pjit(
fn,
in_axis_resources=in_axis_resources,
out_axis_resources=out_axis_resources,
static_argnums=static_argnums,
donate_argnums=donate_argnums,
backend=self._backend)
return t5x_partitioning.PjittedFnWithContext(pjitted, self.mesh,
self._logical_axis_rules)
def standard_logical_axis_rules(
num_experts: int,
num_partitions: Optional[int] = None,
model_parallel_submesh: Optional[HardwareMesh] = None,
activation_partitioning_dims: int = 1,
parameter_partitioning_dims: int = 1,
additional_rules: Optional[LogicalAxisRules] = None):
"""Returns partitioning rules for MoE models.
The partitioning rules vary based on whether the expert and data axes need to
be decoupled; see also MoePjitPartitioner for details of when expert and data
axes need to be decouple.
Args:
num_experts: Total number of experts across all devices.
num_partitions: Size of the model parallel submesh. Model parallelism is
only used if num_model_partitions > 1. Ignored if model_parallel_submesh
is specified.
model_parallel_submesh: 4-tuple that specifies the `(x, y, z, c)` submesh
model-parallel device tile -- an axis of accelerator parallelism
orthogonal to data parallelism. Model parallelism is only used if
np.prod(model_parallel_submesh) > 1. Mutually exclusive with
`num_partitions`.
activation_partitioning_dims: Enables 2-D activation sharding when set to 2.
parameter_partitioning_dims: Enables 2-D parameter sharding when set to 2.
additional_rules: Additional rules (a sequence of tuples) that will be
appended to the standard rules.
Returns:
Sequence of logical axis rules.
"""
default_rules = t5x_partitioning.standard_logical_axis_rules(
activation_partitioning_dims, parameter_partitioning_dims)
moe_rules = [
('expert', 'data'), # Shard experts along the data axis
('expert_mlp', 'model'), # Expert MLPs partitioned along model axis
('expert_group', None), # Replicated axis for all-to-all constraints
('expert_replicas', None), # Experts replicated along this axis
('unmodeled', None), # Replicated weights
]
standard_rules = list(default_rules) + moe_rules
if additional_rules:
standard_rules.extend(additional_rules)
if _override_model_axis(num_experts, num_partitions, model_parallel_submesh):
overridden_rules = []
for logical_axis, mesh_axis in standard_rules:
if logical_axis == 'batch':
# Because we now treat the 'model' axis as a second data axis, we want
# to shard batches across both axes.
overridden_mesh_axis = ('data', 'model')
elif logical_axis == 'expert_replicas':
# "model" axis is repurposed as a second data axis, along which experts
# are replicated.
overridden_mesh_axis = 'model'
elif mesh_axis == 'model':
# Any weights ordinarily partitioned along the model axis, should be
# explicitly replicated.
overridden_mesh_axis = None
else:
overridden_mesh_axis = mesh_axis
overridden_rules.append((logical_axis, overridden_mesh_axis))
return overridden_rules
else:
return standard_rules
def data_partition_spec(two_data_axes: bool) -> PartitionSpec:
"""Returns data partitioning spec.
Args:
two_data_axes: If True, use 'model' axis as secondary data axis. Otherwise,
only use 'data' axis for data sharding.
Returns:
Mesh dependent partition spec.
"""
if two_data_axes:
# Use 'model' axis as secondary data axis. Shard batches across both axes.
return PartitionSpec(('data', 'model'),)
else:
return PartitionSpec('data',)
def _override_model_axis(
num_experts: int, num_partitions: Optional[int],
model_parallel_submesh: Optional[HardwareMesh]) -> bool:
"""Returns true iff there is no model parallelism & num experts < num devices.
Args:
num_experts: Total number of experts across all devices.
num_partitions: Size of the model parallel submesh. Model parallelism is
only used if num_model_partitions > 1. Mutually exclusive with
`model_parallel_submesh`.
model_parallel_submesh: 4-tuple that specifies the `(x, y, z, c)` submesh
model-parallel device tile -- an axis of accelerator parallelism
orthogonal to data parallelism. Model parallelism is only used if
np.prod(model_parallel_submesh) > 1. Mutually exclusive with
`num_partitions`.
Returns:
True if there is no model parallelism & num experts < num devices; False
otherwise.
"""
if (num_partitions is None) == (model_parallel_submesh is None):
raise ValueError(
'One, and only one, of {num_partitions, model_parallel_submesh} must '
'be specified. Received: %s and %s' %
(num_partitions, model_parallel_submesh))
if num_experts == 0 or jax.device_count() <= num_experts:
# No expert replication required. No need to override model mesh axis.
return False
return ((num_partitions is not None and num_partitions <= 1) or
(model_parallel_submesh is not None and
np.prod(model_parallel_submesh) <= 1))
def _override_partition_specs(resources: Pytree):
"""Override axis resources for two data axes setup.
In the two data axes setup, we treat the 'model' axis as a secondary data
axis. To this end, we override any hardcoded, raw partition specs:
- PartitionSpec('data',) -> PartitionSpec(('data', 'model'),)
- PartitionSpec('model',) -> None
There is no need to override any params or optimizer state as these will
inherit the correct specs from the logical axis rules; see
standard_logical_axis_rules().
Args:
resources: Axis resource assignment specifications.
Returns:
Axis resources with partition specs overridden to use 'model' as secondary
'data' axis.
"""
def _maybe_overridde_spec(axis_resource: Pytree):
"""Overrides "data" and "model" partition specs; leaves others unchanged."""
if axis_resource == PartitionSpec('data',):
# Shard all batches across both axes.
return PartitionSpec(('data', 'model'),)
elif axis_resource == PartitionSpec('model',):
# No model parallelism.
return None
else:
return axis_resource
if resources is None:
return resources
elif not isinstance(resources, Sequence):
return _maybe_overridde_spec(resources)
else:
overridden_resources = []
for resource in resources:
overridden_resources.append(_maybe_overridde_spec(resource))
return tuple(overridden_resources)
def _infer_state_filter_fn(
train_state: FlaxOptimTrainState) -> Optional[Callable[[str], bool]]:
"""Infers relevant regex matching sharded expert model state for optimizer.
Only the Adafactor optimizer is currently supported.
The model state generally inherits the correct partitioning specs from the
model parameters, except in cases where the kernel is factored (`v_col` and
`v_row` terms); see derive_logical_axes():
https://github.com/google-research/t5x/blob/main/t5x/adafactor.py#L591. For
those cases, we use the state_filter_fn to identify the factored kernel terms
that need to be partitioned along the expert axis.
Args:
train_state: Object holding optimizer and optimizer state (parameters).
Returns:
Function to identify which model state is sharded along 'expert' axis.
Raises:
ValueError if optimizer (on train state) is not an Adafactor optimizer.
"""
optimizer = train_state._optimizer # pylint: disable=protected-access
optimizer_def = optimizer.optimizer_def
# TODO(jamesleethorp): Revisit once other T5X optimizers are available.
if not isinstance(optimizer_def, adafactor.Adafactor):
raise ValueError('Inferred MoE overrides are currently only available for '
f'the Adafactor optimizer. Received: {optimizer_def}')
if optimizer_def.hyper_params.factored:
# Factored kernel terms (`v_col` and `v_row`) need to be identified for
# expert sharding.
return training_utils.match_fn(r'.*expert.*/kernel/v_.*')
else:
# Non-factored kernel terms (`v`) inherit the correct specs, so no state
# updates will be required.
return None
| [
2,
15069,
33160,
383,
309,
20,
55,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.800776 | 6,440 |
# Web app UI
import sqlite3, config
from fastapi import FastAPI, Request
from fastapi.templating import Jinja2Templates
from datetime import date
app = FastAPI()
templates = Jinja2Templates(directory="templates") # configuring HTML templates dirctory - display logic
@app.get("/") # all get request will be routed to the index function - / route is the base route - / decorator
@app.get("/stock/{symbol}")
| [
2,
5313,
598,
12454,
198,
11748,
44161,
578,
18,
11,
4566,
198,
6738,
3049,
15042,
1330,
12549,
17614,
11,
19390,
198,
6738,
3049,
15042,
13,
11498,
489,
803,
1330,
17297,
6592,
17,
12966,
17041,
198,
6738,
4818,
8079,
1330,
3128,
198,
... | 3.436975 | 119 |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Jacob M. Graving <jgraving@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import h5py
import os
import pandas as pd
import ipdb
from deepposekit.io.DataGenerator import DataGenerator
__all__ = ["initialize_dataset", "initialize_skeleton", "merge_new_images", "update_skeleton"]
def initialize_skeleton(skeleton):
""" Initialize the skeleton from input data.
Takes in either a .csv or .xlsx file and makes a DataFrame.
Parameters
----------
skeleton: pandas.DataFrame
Filepath of the .csv or .xlsx file that has indexed information
on name of the keypoint (part, e.g. head), parent (the direct
connecting part, e.g. neck connects to head, parent is head),
and swap (swapping positions with a part when reflected over X).
"""
if isinstance(skeleton, str):
if skeleton.endswith(".csv"):
skeleton = pd.read_csv(skeleton)
elif skeleton.endswith(".xlsx"):
skeleton = pd.read_excel(skeleton)
else:
raise ValueError("skeleton must be .csv or .xlsx file")
elif isinstance(skeleton, pd.DataFrame):
skeleton = skeleton
else:
raise TypeError("skeleton must be type `str` or pandas.DataFrame")
if "name" not in skeleton.columns:
raise KeyError("skeleton file must contain a `name` column")
elif "parent" not in skeleton.columns:
raise KeyError("skeleton file must contain a `parent` column")
if "x" not in skeleton.columns:
skeleton["x"] = -1
if "y" not in skeleton.columns:
skeleton["y"] = -1
if "tree" not in skeleton.columns:
skeleton["tree"] = -1
for idx, name in enumerate(skeleton["parent"].values):
branch = np.where(skeleton["name"] == name)[0]
if branch.shape[0] > 0:
branch = branch[0]
skeleton.loc[idx, "tree"] = branch
if "swap_index" not in skeleton.columns:
skeleton["swap_index"] = -1
for idx, name in enumerate(skeleton["name"].values):
for jdx, swap_name in enumerate(skeleton["swap"].values):
if swap_name == name:
skeleton.loc[idx, "swap_index"] = jdx
return skeleton
def initialize_dataset(
datapath, images, skeleton, keypoints=None, dataset="images", overwrite=False
):
"""
Intialize an image dataset for annotation as an h5 file
Parameters
----------
datapath : str
The path to the annotations file. Must be .h5
e.g. '/path/to/file.h5'
images : ndarray, shape (n_images, height, width, channels)
A numpy array containing image data.
`images.dtype` should be np.uint8
skeleton: str or pandas.DataFrame
Filepath of the .csv or .xlsx file that has indexed information
on name of the keypoint (part, e.g. head), parent (the direct
connecting part, e.g. neck connects to head, parent is head),
and swap (swapping positions with a part when reflected).
See example files for more information.
keypoints : None or ndarray, shape (n_images, n_keypoints, 2)
Optionally pass keypoints for initializing annotations for the
new images.
dataset : str, default = "images"
The name of the dataset within the h5 file to save the images.
overwrite: bool, default = False
Whether to overwrite an existing .h5 file with the same name.
"""
if os.path.exists(datapath) and overwrite is False:
raise OSError(
"Annotation set {} already exists. Delete the file or set `overwrite=True`.".format(
datapath
)
)
if not isinstance(images, np.ndarray):
raise TypeError(
"images must be ndarray with shape (n_images, height, width, channels)"
)
elif images.ndim != 4:
raise TypeError(
"images must be ndarray with shape (n_images, height, width, channels)"
)
elif images.dtype != np.uint8:
raise TypeError("`images` must be ndarray with dtype np.uint8")
if keypoints is not None:
if not isinstance(keypoints, np.ndarray):
raise TypeError(
"keypoints must be None or ndarray with shape (n_images, n_keypoints, 2)"
)
elif keypoints.ndim != 3:
raise TypeError(
"images must be ndarray with shape (n_images, n_keypoints, 2)"
)
elif keypoints.shape[0] != images.shape[0]:
raise IndexError(
"shape for `images` and `keypoints` must match along axis 0."
)
n_images = images.shape[0]
height = images.shape[1]
width = images.shape[2]
n_channels = images.shape[3]
skeleton = initialize_skeleton(skeleton)
skeleton_names = skeleton["name"].values
skeleton = skeleton[["tree", "swap_index"]].values
n_keypoints = skeleton.shape[0]
with h5py.File(datapath, mode="w") as h5file:
h5file.create_dataset(
dataset,
shape=images.shape,
dtype=np.uint8,
data=images,
maxshape=(None,) + images.shape[1:],
)
data = keypoints if keypoints is not None else -np.ones((n_images, n_keypoints, 2))
h5file.create_dataset(
"annotations",
(n_images, n_keypoints, 2),
dtype=np.float64,
data=data,
maxshape=(None,) + data.shape[1:],
)
data = np.zeros((n_images, n_keypoints), dtype=bool)
h5file.create_dataset(
"annotated",
(n_images, n_keypoints),
dtype=bool,
data=data,
maxshape=(None,) + data.shape[1:],
)
h5file.create_dataset("skeleton", skeleton.shape, dtype=np.int32, data=skeleton)
h5file.create_dataset(
"skeleton_names",
(skeleton.shape[0],),
dtype="S32",
data=skeleton_names.astype("S32"),
)
def merge_new_images(
datapath,
merged_datapath,
images,
frame_numbers,
video_names,
keypoints=None,
dataset="images",
overwrite=False,
mode="full",
):
"""
Merge new images with an annotation set
# todo: data type checks for frame_number and video_names + default vals if not provided
Parameters
----------
datapath : str
The path to the annotations file. Must be .h5
e.g. '/path/to/file.h5'
merged_datapath : str
The path to save the merged annotations file. Must be .h5
e.g. '/path/to/merged_file.h5'
images : ndarray, shape (n_images, height, width, channels)
A numpy array containing image data.
`images.dtype` should be np.uint8
keypoints : None or ndarray, shape (n_images, n_keypoints, 2)
Optionally pass keypoints for initializing annotations for the
new images.
dataset : str, default = "images"
The dataset within the h5 file to save the images.
overwrite: bool, default = False
Whether to overwrite an existing .h5 file with the same name.
mode : str
The mode for loading the existing data.
Must be "annotated", or "full" (the full dataset)
"""
if os.path.exists(merged_datapath) and overwrite is False:
raise OSError(
"Annotation set {} already exists. Delete the file or set `overwrite=True`.".format(
merged_datapath
)
)
if not isinstance(images, np.ndarray):
raise TypeError(
"images must be ndarray with shape (n_images, height, width, channels)"
)
elif images.ndim != 4:
raise TypeError(
"images must be ndarray with shape (n_images, height, width, channels)"
)
elif images.dtype != np.uint8:
raise TypeError("`images` must be ndarray with dtype np.uint8")
if keypoints is not None:
if not isinstance(keypoints, np.ndarray):
raise TypeError(
"keypoints must be None or ndarray with shape (n_images, n_keypoints, 2)"
)
elif keypoints.ndim != 3:
raise TypeError(
"images must be ndarray with shape (n_images, n_keypoints, 2)"
)
elif keypoints.shape[0] != images.shape[0]:
raise IndexError(
"shape for `images` and `keypoints` must match along axis 0."
)
data_generator = DataGenerator(datapath, dataset=dataset, mode="full")
if images.shape[1:] != data_generator.image_shape:
raise IndexError(
"`images` shape {} does not match existing dataset {}".format(
images.shape[1:], data_generator.image_shape
)
)
if keypoints is not None:
if keypoints.shape[-1] == 3:
keypoints = keypoints[:, :, :2]
if keypoints.shape[1:] != data_generator.keypoints_shape:
raise IndexError(
"`keypoints` shape {} does not match existing dataset {}".format(
keypoints.shape[1:], data_generator.keypoints_shape
)
)
with h5py.File(datapath, mode="r") as h5file, h5py.File(merged_datapath, "w") as merged_h5file:
n_samples_merged = h5file[dataset].shape[0] + images.shape[0]
merged_h5file.create_dataset(
dataset, # 'images'
shape=(n_samples_merged,) + data_generator.image_shape,
dtype=np.uint8,
maxshape=(None,) + data_generator.image_shape,
)
merged_h5file.create_dataset(
"frame_number",
shape=(n_samples_merged,),
dtype=np.int,
maxshape=(None,)
)
merged_h5file.create_dataset(
"video_name",
shape=(n_samples_merged,),
dtype="S64",
maxshape=(None,)
)
merged_h5file.create_dataset(
"annotations",
shape=(n_samples_merged,) + data_generator.keypoints_shape,
dtype=np.float64,
maxshape=(None,) + data_generator.keypoints_shape,
)
merged_h5file.create_dataset(
"annotated",
(n_samples_merged, data_generator.keypoints_shape[0]),
dtype=bool,
maxshape=(None, data_generator.keypoints_shape[0]),
)
merged_h5file.create_dataset(
"skeleton", h5file["skeleton"].shape, dtype=np.int32, data=h5file["skeleton"][:]
)
merged_h5file.create_dataset(
"skeleton_names", h5file["skeleton_names"].shape, dtype="S32", data=h5file["skeleton_names"][:]
)
# copy old datsets
for idx in range(h5file[dataset].shape[0]):
merged_h5file[dataset][idx] = h5file[dataset][idx]
merged_h5file["video_name"][idx] = h5file["video_name"][idx]
merged_h5file["frame_number"][idx] = h5file["frame_number"][idx]
merged_h5file["annotations"][idx] = h5file["annotations"][idx]
merged_h5file["annotated"][idx] = h5file["annotated"][idx]
# add new data
for idx in range(h5file[dataset].shape[0], n_samples_merged):
new_idx = idx - h5file[dataset].shape[0]
merged_h5file[dataset][idx] = images[new_idx]
merged_h5file["video_name"][idx] = np.array(video_names[new_idx], dtype='S64')
merged_h5file["frame_number"][idx] = frame_numbers[new_idx]
if keypoints is not None:
merged_h5file["annotations"][idx] = keypoints[new_idx]
else:
merged_h5file["annotations"][idx] = np.zeros(data_generator.keypoints_shape)
merged_h5file["annotated"][idx] = np.zeros(
data_generator.keypoints_shape[0], dtype=bool
)
def update_skeleton(old_dataset, new_dataset, new_skeleton):
"""
upates the skeleton associated with old_dataset by replacing it with new_skeleton
saved as new_dataset
use to add or remove features from a dataset that has already been labeled
"""
# load old data
with h5py.File(old_dataset, 'r') as file:
old_data = {key: value[...] for key, value in file.items()}
# get image characteristics
n_images = old_data['images'].shape[0]
height = old_data['images'].shape[1]
width = old_data['images'].shape[2]
n_channels = old_data['images'].shape[3]
# load new skeleton
skeleton = initialize_skeleton(new_skeleton)
skeleton_names = skeleton["name"].values
skeleton = skeleton[["tree", "swap_index"]].values
n_keypoints = skeleton.shape[0]
# create annotated and annotations tensors
# (copy old annotations and set new feature positions to -1)
annotations = -np.ones((n_images, n_keypoints, 2))
annotated = np.zeros((n_images, n_keypoints), dtype=bool)
for idx, skeleton_name in enumerate(skeleton_names):
old_idx = np.where(old_data['skeleton_names'] == np.array(skeleton_name, dtype='S32'))[0]
if len(old_idx)>0:
try:
annotated[:,idx] = old_data['annotated'][:,int(old_idx)]
annotations[:,idx,:] = old_data['annotations'][:,int(old_idx),:]
except:
ipdb.set_trace()
with h5py.File(new_dataset, mode="w") as h5file:
# images
h5file.create_dataset(
"images",
shape=old_data['images'].shape,
dtype=np.uint8,
data=old_data['images'],
maxshape=(None,) + old_data['images'].shape[1:],
)
# annotations
h5file.create_dataset(
"annotations",
(n_images, n_keypoints, 2),
dtype=np.float64,
data=annotations,
maxshape=(None,) + annotations.shape[1:],
)
# annotated
h5file.create_dataset(
"annotated",
(n_images, n_keypoints),
dtype=bool,
data=annotated,
maxshape=(None,) + annotated.shape[1:],
)
# skeleton
h5file.create_dataset("skeleton", skeleton.shape, dtype=np.int32, data=skeleton)
# skeleton_names
h5file.create_dataset(
"skeleton_names",
(skeleton.shape[0],),
dtype="S32",
data=skeleton_names.astype("S32"),
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2864,
12,
23344,
12806,
337,
13,
7037,
1075,
1279,
73,
46784,
1075,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
... | 2.18154 | 6,869 |
from shutil import copyfile
import pdb
from copy import deepcopy
import os
import arrow
from io import StringIO
import re
import semver
import rdflib
from rdflib import RDFS, RDF, OWL, Namespace
from rdflib.namespace import FOAF
from SPARQLWrapper import SPARQLWrapper
from SPARQLWrapper import JSON, SELECT, INSERT, DIGEST, GET, POST, DELETE
LOAD = 'LOAD'
from rdflib import URIRef, Literal
import validators
from .common import VIRTUOSO, semver_compare
if __name__ == '__main__':
endpoint = BrickEndpoint('http://localhost:8890/sparql', '1.0.3')
endpoint._load_schema()
test_qstr = """
select ?s where {
?s rdfs:subClassOf+ brick:Temperature_Sensor .
}
"""
res = endpoint.query(test_qstr)
print(res)
| [
6738,
4423,
346,
1330,
4866,
7753,
198,
11748,
279,
9945,
198,
6738,
4866,
1330,
2769,
30073,
198,
11748,
28686,
198,
11748,
15452,
198,
6738,
33245,
1330,
10903,
9399,
198,
11748,
302,
198,
198,
11748,
5026,
332,
198,
11748,
374,
67,
2... | 2.585034 | 294 |
#! /usr/bin/env python
import pandas as pd
from .clean import clean_text
""" NOTES
- Data columns:
['abstract_text',
'abstract_title',
'bibliography',
'cancelled',
'code',
'figure_legend_1',
'figure_legend_2',
'figure_title_1',
'figure_title_2',
'final_status',
'id',
'is_complete',
'keyword_1',
'keyword_2',
'keyword_3',
'keyword_4',
'keywords',
'legend_1',
'legend_2',
'not_to_remind',
'program_day',
'program_session',
'publish_onsite',
'relance_register',
'topic_1',
'topic_2',
'topic_3',
'user_id',
'validate',
'year']
"""
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
764,
27773,
1330,
3424,
62,
5239,
198,
198,
37811,
5626,
1546,
198,
198,
12,
6060,
15180,
25,
198,
220,
220,
220,
37250,
397,
8709,... | 1.929178 | 353 |
from cleo import Command
from src.masonite.orm.migrations.Migration import Migration
class MigrateRollbackCommand(Command):
"""
Run migrations.
migrate:rollback
{--c|connection=default : The connection you want to run migrations on}
"""
| [
6738,
1190,
78,
1330,
9455,
198,
198,
6738,
12351,
13,
76,
888,
578,
13,
579,
13,
76,
3692,
602,
13,
44,
4254,
1330,
36991,
628,
198,
4871,
337,
42175,
26869,
1891,
21575,
7,
21575,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
... | 3.011364 | 88 |
import importlib
import logging
from collections import OrderedDict, defaultdict
from portfoliotool.utils.xmlutils import (
get_attribute, get_attributes, get_subnode, get_subnode_subnodes,
get_textnode
)
log = logging.getLogger(__name__)
| [
11748,
1330,
8019,
198,
11748,
18931,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
11,
4277,
11600,
198,
198,
6738,
2493,
9062,
5151,
970,
13,
26791,
13,
19875,
26791,
1330,
357,
198,
220,
220,
220,
651,
62,
42348,
11,
651,
62,
... | 2.942529 | 87 |
import pickle as pk
import sys
from ast import literal_eval
from datetime import datetime, timedelta
from threading import Thread
from urllib.error import URLError
from urllib.request import urlopen
import configuration as config
import logger
import requests
import tests
import utils
from flask import Flask, request
from selenium import webdriver
from timeout_decorator import timeout, TimeoutError
app = Flask(__name__)
driver_options = webdriver.ChromeOptions()
driver_options.add_argument('headless')
driver_options.add_argument('window-size=1980,1080')
# driver_options = Options()
# driver_options.headless = True
group_status = {}
with open('team_id_name_data.PyData', mode='rb') as read_file:
team_names = pk.load(read_file)
@app.route('/', methods=['GET', 'POST'])
@timeout(seconds=config.ACCESS_TIMEOUT_S, use_signals=False)
@timeout(config.TEST_TIMEOUT_S, use_signals=False)
if __name__ == '__main__':
try:
utils.load_admins("admins.json")
if len(sys.argv) > 1:
server_port = int(sys.argv[1])
logger.log_info('starting server on custom port', server_port)
runserver(server_port)
else:
logger.log_info('starting server on default port')
runserver()
except Exception:
for gid in group_status:
group_status[gid]['driver'].close()
| [
11748,
2298,
293,
355,
279,
74,
198,
11748,
25064,
198,
6738,
6468,
1330,
18875,
62,
18206,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
4704,
278,
1330,
14122,
198,
6738,
2956,
297,
571,
13,
18224,
1330,
37902,... | 2.662162 | 518 |
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
db = SQLAlchemy()
ma = Marshmallow()
from .user import User, UserSchema
from .sistema import Sistema, SistemaSchema
from .comando import Comando, ComandoSchema
from .blacklist_token import BlacklistToken, BlacklistTokenSchema | [
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
42903,
62,
76,
5406,
42725,
1330,
9786,
42725,
198,
198,
9945,
796,
16363,
2348,
26599,
3419,
198,
2611,
796,
9786,
42725,
3419,
628,
198,
6738,
764,
7220,
1330,
1... | 3.406593 | 91 |
'''
Query Kowalski with cone searches centred
on CLU galaxies, searching for transients
given a set of constraints.
'''
def print_query_params(args):
'''Print a summary of the query parameters'''
print("#-----")
print("Cone search parameters:")
print(f"Search radius {args.radius} arcmin")
print(f"Minimum time between the first and last alert {args.min_days} days")
print(f"Maximum time between the first and last alert {args.max_days} days")
print(f"CLU galaxies selected with distance between {args.min_dist}Mpc and {args.max_dist}Mpc, with Dec > {args.min_dec}")
print(f"Query divided in {args.slices} slices")
print("#-----")
print(" ")
def get_programidx(program_name, username, password):
'''Given a marshal science program name, it returns its programidx'''
r = requests.post('http://skipper.caltech.edu:8080/cgi-bin/growth/list_programs.cgi', auth=(username, password))
programs = json.loads(r.text)
program_dict = {p['name']:p['programidx'] for i,p in enumerate(programs)}
try:
return program_dict[program_name]
except KeyError:
print(f'The user {username} does not have access to the program {program_name}')
return None
def get_candidates_growth_marshal(program_name, username, password):
'''Query the GROWTH db for the science programs'''
programidx = get_programidx(program_name, username, password)
if programidx == None:
return None
r = requests.post('http://skipper.caltech.edu:8080/cgi-bin/growth/list_program_sources.cgi', \
auth=(username, password), data={'programidx':str(programidx)})
sources = json.loads(r.text)
sources_out = []
for s in sources:
coords = SkyCoord(ra=s['ra']*u.deg, dec=s['dec']*u.deg, frame='icrs')
sources_out.append({"name":s['name'], "ra":coords.ra, "dec":coords.dec, \
"classification":s['classification'], "redshift":s['redshift'], "creation_date":s['creationdate']})
return sources_out
def query_kowalski_clu(username, password, clu):
'''Query kowalski to get a table of CLU galaxies.'''
k = Kowalski(username=username, password=password, verbose=False)
q = {"query_type": "general_search",
"query": "db['CLU_20180513'].find({},{'distmpc': 1})"
}
r = k.query(query=q)
return r
def check_clu_transients(sources_kowalski, clu_sources):
'''Check if the selected sources are present in the
CLU science program. If so, print out the relevant information.'''
sources_in_clu = []
sources_not_in_clu = []
list_clu_sources = list(s['name'] for s in clu_sources)
for source in sources_kowalski:
print("-------")
if source in list_clu_sources:
clu_source = clu_sources[np.where(np.array(list_clu_sources) == source)[0][0]]
try:
for k in clu_source.keys():
print(f"{k}: {clu_source[k]}")
sources_in_clu.append(source)
except:
pdb.set_trace()
else:
print(f"{source} was not saved in CLU")
sources_not_in_clu.append(source)
print("-------")
print("Summary:")
print(f"Sources saved in CLU: {sources_in_clu}")
print(f"Sources not saved in CLU: {sources_not_in_clu}")
def query_kowalski(username, password, clu, args):
'''Query kowalski and apply the selection criteria'''
k = Kowalski(username=username, password=password, verbose=False)
#Initialize a set for the results
set_objectId_all = set([])
for slice_lim,i in zip(np.linspace(0,len(clu),args.slices)[:-1], np.arange(len(np.linspace(0,len(clu),args.slices)[:-1]))):
try:
t = clu[int(slice_lim):int(np.linspace(0,len(clu),args.slices)[:-1][i+1])]
except IndexError:
t = clu[int(slice_lim):]
coords_arr = []
galaxy_names_arr = []
for galaxy,ra, dec in zip(t["name"],t["ra"], t["dec"]):
try:
coords = SkyCoord(ra=ra*u.deg, dec=dec*u.deg)
coords_arr.append((coords.ra.deg,coords.dec.deg))
except ValueError:
print("Problems with the galaxy coordinates?")
pdb.set_trace()
continue
galaxy_names_arr.append(galaxy)
try:
print(f"slice: {int(slice_lim)}:{int(np.linspace(0,len(clu),args.slices)[:-1][i+1])}" )
except:
print(f"slice: {int(slice_lim)}:{int(len(clu))}" )
q = {"query_type": "cone_search",
"object_coordinates": {
"radec": f"{coords_arr}",
"cone_search_radius": f"{args.radius}",
"cone_search_unit": "arcmin"
},
"catalogs": {
"ZTF_alerts": {
"filter": {
"candidate.ndethist": {'$gt': 1},
"candidate.rb": {'$gt': 0.2}
},
"projection": {
"objectId": 1,
"candidate.rcid": 1,
"candidate.ra": 1,
"candidate.dec": 1,
"candidate.jd": 1,
"candidate.ndethist": 1,
"candidate.jdstarthist": 1,
"candidate.jdendhist": 1,
"candidate.jdendhist": 1,
"candidate.magpsf": 1,
"candidate.sigmapsf": 1,
"candidate.fid": 1,
"candidate.programid": 1,
"candidate.isdiffpos": 1,
"candidate.ndethist": 1,
"candidate.ssdistnr": 1,
"candidate.rb": 1,
"candidate.drb": 1,
"candidate.distpsnr1": 1,
"candidate.sgscore1": 1,
"candidate.srmag1": 1,
"candidate.distpsnr2": 1,
"candidate.sgscore2": 1,
"candidate.srmag2": 1,
"candidate.distpsnr3": 1,
"candidate.sgscore3": 1,
"candidate.srmag3": 1
}
}
}
}
#Perform the query
r = k.query(query=q)
print('Search completed for this slice.')
# #Dump the results in a json file
# with open(f'results_clu25Mpc_1week_{i+1}.json', 'w') as j:
# json.dump(r, j)
#Identify 'candid' for all relevant candidates
objectId_list = []
with_neg_sub = []
old = []
stellar_list = []
try:
keys_list = list(r['result_data']['ZTF_alerts'].keys())
except:
print("Error in the keys list?? Check 'r' ")
pdb.set_trace()
for key in keys_list:
all_info = r['result_data']['ZTF_alerts'][key]
for info in all_info:
# #Stop at a certain candidId for debugging
# if info['objectId'] == 'ZTF19aanfkyc':
# pdb.set_trace()
if info['objectId'] in old:
continue
if info['objectId'] in stellar_list:
continue
try:
if info['candidate']['drb'] < 0.5:
continue
except:
do = 'do nothing.'
if np.abs(info['candidate']['ssdistnr']) < 10:
continue
if info['candidate']['isdiffpos'] in ['f',0]:
with_neg_sub.append(info['objectId'])
if (info['candidate']['jdendhist'] - info['candidate']['jdstarthist']) < args.min_days:
continue
if (info['candidate']['jdendhist'] - info['candidate']['jdstarthist']) > args.max_days:
old.append(info['objectId'])
try:
if (np.abs(info['candidate']['distpsnr1']) < 1. and info['candidate']['sgscore1'] > 0.0):
stellar_list.append(info['objectId'])
except:
do = 'do nothing.'
try:
if (np.abs(info['candidate']['distpsnr1']) < 15. and info['candidate']['srmag1'] < 15. and info['candidate']['srmag1'] > 0. and info['candidate']['sgscore1'] >= 0.5):
continue
except:
do = 'do nothing.'
try:
if (np.abs(info['candidate']['distpsnr2']) < 15. and info['candidate']['srmag2'] < 15. and info['candidate']['srmag2'] > 0. and info['candidate']['sgscore2'] >= 0.5):
continue
except:
do = 'do nothing.'
try:
if (np.abs(info['candidate']['distpsnr3']) < 15. and info['candidate']['srmag3'] < 15. and info['candidate']['srmag3'] > 0. and info['candidate']['sgscore3'] >= 0.5):
continue
except:
do = 'do nothing.'
objectId_list.append(info['objectId'])
set_objectId = set(objectId_list)
#Remove those objects with negative subtraction
for n in set(with_neg_sub):
try:
set_objectId.remove(n)
except:
do = 'do nothing'
#Remove stellar objects
for n in set(stellar_list):
try:
set_objectId.remove(n)
except:
do = 'do nothing'
#Remove those objects considered old
for n in set(old):
try:
set_objectId.remove(n)
except:
do = 'do nothing'
print(set_objectId)
set_objectId_all = set_objectId_all | set_objectId
print("Cumulative:", set_objectId_all)
'''
print('----stats-----')
print('Number of sources with negative sub: ', len(set(with_neg_sub)))
print('Number of sources with only pos subtraction: ', len(set_objectId))
print(f"Number of sources older than {args.max_days} days: {len(set(old))}, specifically {set(old)}")
'''
return set_objectId_all
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query kowalski.')
parser.add_argument('--radius', dest='radius', type=float, required=False, \
help='Search radius (arcmin)', default = 1.)
parser.add_argument('--min-days', dest='min_days', type=float, required=False, \
help='Minimum time (days) between the first and last alert', default = 3.)
parser.add_argument('--max-days', dest='max_days', type=float, required=False, \
help='Maximum time (days) between the first and last alert', default = 14.)
parser.add_argument('--min-dist', dest='min_dist', type=float, required=False, \
help='Minimum distance(Mpc) of the CLU galaxies to explore', default = 0.)
parser.add_argument('--max-dist', dest='max_dist', type=float, required=False, \
help='Maximum distance(Mpc) of the CLU galaxies to explore', default = 200.)
parser.add_argument('--min-dec', dest='min_dec', type=float, required=False, \
help='Minimum declination (celestial, deg) of the CLU galaxies to explore', default = -30.)
parser.add_argument('--slices', dest='slices', type=int, required=False, \
help='Number (integer) of slices in which the query will be devided', default = 40)
args = parser.parse_args()
import requests
import json
import pdb
import numpy as np
from astropy.time import Time
from astropy.io import ascii
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
from penquins import Kowalski
#Print a summary of the query input
print_query_params(args)
#Read the CLU catalog
clu = Table.read('CLU_20181213V2.fits')
clu = clu[clu['dec'] > args.min_dec]
clu = clu[clu['distmpc'] >= args.min_dist]
clu = clu[clu['distmpc'] <= args.max_dist]
print(f"There are {len(clu)} CLU galaxies in this sample.")
#Read the secrets
secrets = ascii.read('secrets.csv', format = 'csv')
username = secrets['kowalski_user'][0]
password = secrets['kowalski_pwd'][0]
#Query kowalski
sources_kowalski = query_kowalski(username, password, clu, args)
#Check the CLU science program on the Marshal
username_marshal = secrets['marshal_user'][0]
password_marshal= secrets['marshal_pwd'][0]
program_name='Census of the Local Universe'
clu_sources = get_candidates_growth_marshal(program_name, username_marshal, password_marshal)
#For each transient check if it is present in the CLU science program
check_clu_transients(sources_kowalski, clu_sources)
print("Done.")
'''
#Plot the data
for galaxy_name, idcoords in zip(galaxy_names_arr, r['result_data']['ZTF_alerts'].keys()):
all_info=r['result_data']['ZTF_alerts'][idcoords]
jd_arr=[]
mag_arr=[]
magerr_arr=[]
filter_arr=[]
for info in all_info:
if info['candidate']['isdiffpos'] != 't':
continue
magpsf=info['candidate']['magpsf']
sigmapsf=info['candidate']['sigmapsf']
jd=info['candidate']['jd']
fil=info['candidate']['fid']
filter_arr.append(fil)
jd_arr.append(jd)
mag_arr.append(magpsf)
magerr_arr.append(sigmapsf)
if mag_arr!=[]:
print(info['candidate']['programid'])
jd0=min(jd_arr)
jd0_time=Time(jd0, format='jd')
for i in np.arange(len(jd_arr)):
jd_arr[i]=jd_arr[i]-jd0
plt.figure()
print(galaxy_name, info['objectId'], info['candidate']['ra'], info['candidate']['dec'])
plt.title(galaxy_name + ' '+info['objectId'])
#plt.errorbar(jd_arr, mag_arr, yerr=magerr_arr, color='blue', linestyle=' ', marker='o')
for jj,mm,me,ff in zip(jd_arr, mag_arr, magerr_arr, filter_arr):
if ff==1:
fcolor='b'
if ff==2:
fcolor='r'
if ff==3:
fcolor='y'
plt.errorbar(jj, mm, yerr=me, color=fcolor, linestyle=' ', marker='o')
plt.xlabel(f'Days since {jd0_time.iso}')
plt.gca().invert_yaxis()
plt.show()
'''
| [
7061,
6,
198,
20746,
509,
322,
874,
4106,
351,
27763,
15455,
1247,
445,
198,
261,
7852,
52,
27982,
11,
10342,
329,
1007,
2334,
198,
35569,
257,
900,
286,
17778,
13,
198,
7061,
6,
628,
198,
4299,
3601,
62,
22766,
62,
37266,
7,
22046,... | 2.017198 | 7,152 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Milan Ondrašovič <milan.ondrasovic@gmail.com>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
import abc
import dataclasses
import itertools
from pathlib import Path
from xml.etree import ElementTree
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import transforms as T
class UADetracContextDetectionDataset(torch.utils.data.Dataset):
"""UA-DETRAC detection dataset with the additional capability of adding
contextual images. If no context is specified, this class produces a dataset
representation that is tantamount to that which torchvision object detection
modules expect.
"""
@dataclasses.dataclass(frozen=True)
class _SeqBoxesIndex:
"""An auxiliary classs to store sequence index and image index within
that sequence.
"""
seq_idx: int
image_idx: int
def __init__(
self,
root_path,
subset='train',
*,
past_context=0,
future_context=0,
context_stride=1,
transforms=None
):
"""Constructor.
Args:
root_path (str): Path to the UA-DETRAC dataset.
subset (str, optional): Data subset ('train' or 'test').
Defaults to 'train'.
past_context (int, optional): A non-negative integer specifying the
number of frames in the past. Defaults to 0.
future_context (int, optional): A non-negative integer specifying
the number of frames in the future. Defaults to 0.
context_stride (int, optional): A positive integer representing the
stride when traversing the past as well as future contextual
frames. Defaults to 1.
transforms (Callable, optional): Transformation to apply to
individual frames. Beware that if context is required, some
transformations may be nonsensical. Defaults to None.
"""
self._context_rel_idxs = _calc_context_rel_idxs(
past_context, future_context, context_stride
)
self._global_to_local_seq_image_idxs = []
self._seq_image_paths = []
self._seq_boxes = []
self.transforms = transforms
self._init_data_indices(root_path, subset)
def __getitem__(self, idx):
"""Retrieves a random sample from the dataset.
Args:
idx (int): Data sample index.
Returns:
Tuple[torch.Tensor, Dict[str, List[torch.Tensor]]]: Returns a data
sample consisting of a center image in a tensor format, and target
specification as a dictionary with the following content:
'boxes': A Nx4 tensor of boxes in xyxy format.
'labels': A N, tensor of labels (0 indicates background).
'context_images': A list of tensors of contextual images
(including the center image).
"""
seq_box_idx = self._global_to_local_seq_image_idxs[idx]
seq_idx, center_image_idx = seq_box_idx.seq_idx, seq_box_idx.image_idx
image_file_paths = self._seq_image_paths[seq_idx]
abs_context_idxs = np.clip(
self._context_rel_idxs + center_image_idx, 0,
len(image_file_paths) - 1
)
center_image = None
context_images = []
prev_idx = -1
for context_idx in abs_context_idxs:
if context_idx != prev_idx:
image_file_path = image_file_paths[context_idx]
image = Image.open(image_file_path)
if context_idx == center_image_idx:
center_image = image
context_images.append(image)
prev_idx = context_idx
assert center_image is not None
image_id = torch.as_tensor([idx], dtype=torch.int64)
boxes = self._seq_boxes[seq_idx][center_image_idx]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
areas = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
labels = torch.ones((len(boxes),), dtype=torch.int64)
is_crowd = torch.zeros_like(labels)
target = {
'image_id': image_id,
'boxes': boxes,
'area': areas,
'labels': labels,
'iscrowd': is_crowd,
'context_images': context_images,
}
if self.transforms is not None:
center_image, target = self.transforms(center_image, target)
return center_image, target
def __len__(self):
"""Returns the length of the dataset. It represents the number of
images (frames) of the entire dataset.
Returns:
int: Dataset length.
"""
return len(self._global_to_local_seq_image_idxs)
def _init_data_indices(self, root_path, subset):
"""Initializes data indices to faster access. It reads image (frame)
file names as well as their corresponding bounding boxes for faster
access later on.
Args:
root_path (str): UA-DETRAC dataset root path.
subset (str): Whether to read 'train' or 'test' data subset.
"""
images_dir, annos_dir = self._deduce_images_and_annos_paths(
root_path, subset
)
for seq_idx, seq_dir in enumerate(images_dir.iterdir()):
xml_file_name = seq_dir.stem + '_v3.xml'
xml_file_path = str(annos_dir / xml_file_name)
image_boxes_map = dict(self._iter_seq_boxes(xml_file_path))
seq_image_file_paths = []
seq_image_boxes = []
image_idx_gen = itertools.count()
for image_num, image_file_path in self._iter_seq_image_file_paths(
seq_dir
):
boxes = image_boxes_map.get(image_num)
if boxes is not None:
seq_image_file_paths.append(image_file_path)
seq_image_boxes.append(boxes)
image_idx = next(image_idx_gen)
seq_boxes_idx = self._SeqBoxesIndex(seq_idx, image_idx)
self._global_to_local_seq_image_idxs.append(seq_boxes_idx)
self._seq_image_paths.append(seq_image_file_paths)
self._seq_boxes.append(seq_image_boxes)
@staticmethod
def _deduce_images_and_annos_paths(root_path, subset):
"""Deduces paths for images and annotations. It returns the root path
that contains all the sequences belonging to the specific subset.
Args:
root_path (str): Root directory path to the UA-DETRAC dataset.
subset (str): Data subset type ('train' or 'test').
Returns:
Tuple[pathlib.Path, pathlib.Path]: Directory paths for images and
annotations.
"""
assert subset in ('train', 'test')
subset = subset.capitalize()
root_dir = Path(root_path)
images_idr = root_dir / ('Insight-MVT_Annotation_' + subset)
annos_dir = root_dir / 'DETRAC_public' / ('540p-' + subset)
return images_idr, annos_dir
@staticmethod
def _iter_seq_image_file_paths(seq_dir):
"""Iterates over image file names for a specific sequence from the
UA-DETRAC dataset.
Args:
seq_dir (pathlib.Path): Sequence directory path.
Yields:
Tuple[int, str]: Tuple containing image (frame) number and
the corresponding file path.
"""
image_num_path_pairs = [
(int(p.stem[-5:]), str(p)) for p in seq_dir.iterdir()
]
yield from iter(sorted(image_num_path_pairs))
@staticmethod
def _iter_seq_boxes(xml_file_path):
"""Iterates over a sequence of bounding boxes contained within a
specific XML file corresponding to some sequence from the UA-DETRAC
dataset.
Args:
xml_file_path (str): Sequence specification XML file path.
Yields:
Tuple[int, List[Tuple[float, float, float, float]]]: A tuple
containing the frame number and the list of bounding boxes in a
xyxy format.
"""
tree = ElementTree.parse(xml_file_path)
root = tree.getroot()
for frame in root.findall('./frame'):
frame_num = int(frame.attrib['num'])
boxes = []
for target in frame.findall('.//target'):
box_attr = target.find('box').attrib
x = float(box_attr['left'])
y = float(box_attr['top'])
w = float(box_attr['width'])
h = float(box_attr['height'])
box = (x, y, x + w, y + h)
boxes.append(box)
yield frame_num, boxes
if __name__ == '__main__':
import functools
import cv2 as cv
from config import cfg
dataset = make_dataset(cfg)
data_loader = make_data_loader(cfg, dataset)
n_batches_shown = 4
with ImageBatchVisualizer(cfg, max_size=400) as visualizer:
for images, targets in itertools.islice(data_loader, n_batches_shown):
if not visualizer.preview_batch_images(images, targets):
break
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
201,
198,
2,
15069,
357,
66,
8,
33448,
21574,
440,
24631,
32790,
47297,
46195,
1279,
25433,
272,
13,
623... | 2.164215 | 4,963 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
from gae_libs.testcase import TestCase
from libs import analysis_status
from model import result_status
from model import triage_status
from model.flake.flake_culprit import FlakeCulprit
from model.flake.master_flake_analysis import DataPoint
from model.flake.master_flake_analysis import MasterFlakeAnalysis
| [
2,
15069,
1584,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
6738,... | 3.796992 | 133 |
from image_builder.api.service.image_builder_service import build_image
| [
6738,
2939,
62,
38272,
13,
15042,
13,
15271,
13,
9060,
62,
38272,
62,
15271,
1330,
1382,
62,
9060,
628,
628
] | 3.75 | 20 |
from pdb import set_trace as T
import numpy as np
from forge.blade.io import Action as Static
from forge.blade.io.action.node import NodeType
from forge.ethyr.io import utils
class ActionArgs:
'''An action argument pair'''
class Action:
'''IO class used for interacting with game actions
Used via .next to turn the complex action tree into
a flat list of action and argument selections.
'''
@property
def atnArgs(self):
'''Final chosed action argument pair'''
return self.ret
@property
def outs(self):
'''The logit packets from intermediate selection'''
return self.out
def next(self, env, ent, atn, outs=None):
'''Compute the available choices for the next action
Args:
env: the environment observation
ent: the entity selecting actions
atn: the previously selected action
outs: the logit packets from selecting the previous action
Returns:
args: the next arguments for selection
done: whether we have reached the end of the selection tree
'''
done = False
#Record action
if outs is not None:
self.out[self.prev] = outs
self.prev = atn
#Return argument
if self.nxt is not None:
args = []
if len(self.nxt) == 0:
done = True
return args, done
args = self.nxt[0]
self.ret.args = args #Only one arg support for now
self.nxt = self.nxt[1:]
return [args], done
args = atn.args(env, ent, self.config)
if atn.nodeType is NodeType.ACTION:
self.ret.action = atn
self.nxt = args
done = len(args) == 0
return args, done
@staticmethod
def flat(root=Static):
'''Returns a flat action tree'''
rets = [root]
if root.nodeType is NodeType.SELECTION:
for edge in root.edges:
rets += Action.flat(edge)
return rets
@staticmethod
def leaves(root=Static):
'''Returns only the action leaves'''
rets = []
for e in Action.flat():
if e.leaf:
rets.append(e)
return rets
@staticmethod
def actions(root=Static):
'''Returns only selection nodes'''
rets = []
for e in Action.flat():
if e.nodeType is action.NodeType.SELECTION:
rets.append(e)
return rets
def serialize(outs, iden):
'''Internal action serializer for communication across machines'''
from forge.ethyr.io.serial import Serial
ret = []
for key, out in outs.items():
key = Serial.key(key, iden)
arguments, idx = out
args, idx = [], int(out[1])
for e in arguments:
#May need e.serial[-1]
#to form a unique key
k = Serial.key(e, iden)
args.append(k)
ret.append([key, args, idx])
return ret
#Dimension packing: batch, atnList, atn, serial key
def batch(actionLists):
'''Internal batcher for lists of actions'''
atnTensor, idxTensor = [], []
keyTensor, lenTensor = [], []
#Pack inner set
for actionList in actionLists:
keys, atns, idxs = [], [], []
for key, atn, idx in actionList:
atns.append(np.array(atn))
idxs.append(idx)
keys.append(key)
idxs = np.array(idxs)
keys = np.array(keys)
atns, lens = utils.pack(atns)
atnTensor.append(atns)
idxTensor.append(idxs)
keyTensor.append(keys)
lenTensor.append(lens)
#Pack outer set
idxTensor, _ = utils.pack(idxTensor)
atnTensor, _ = utils.pack(atnTensor)
keyTensor, _ = utils.pack(keyTensor)
lenTensor = utils.pack(lenTensor)
return atnTensor, idxTensor, keyTensor, lenTensor
def unbatch(atnTensor, idxTensor, keyTensor, lenTensor):
'''Internal inverse batcher'''
lenTensor, lenLens = lenTensor
actions = []
#Unpack outer set (careful with unpack dim)
atnTensor = utils.unpack(atnTensor, lenLens, dim=1)
idxTensor = utils.unpack(idxTensor, lenLens, dim=1)
keyTensor = utils.unpack(keyTensor, lenLens, dim=1)
lenTensor = utils.unpack(lenTensor, lenLens, dim=1)
#Unpack inner set
for atns, idxs, keys, lens in zip(
atnTensor, idxTensor, keyTensor, lenTensor):
atns = utils.unpack(atns, lens, dim=-2)
actions.append(list(zip(keys, atns, idxs)))
return actions
| [
6738,
279,
9945,
1330,
900,
62,
40546,
355,
309,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
28325,
13,
22500,
13,
952,
1330,
7561,
355,
36125,
198,
6738,
28325,
13,
22500,
13,
952,
13,
2673,
13,
17440,
1330,
19081,
6030,
198,... | 2.21773 | 2,053 |
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
357,
3826,
27975,
45761,
393,
3740,
1378,
2503,
13,
41791,
13,
2398,
14,
677,
4541,
14,
70,
489,
12,
18,
13,
... | 2.333333 | 51 |
import cmath
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.signal as signal
import csv
from .Periodogram import Periodogram
sns.set() | [
11748,
269,
11018,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
629,
541,
88,
13,
12683,
282,
355,
6737,
198,
11748,
269,
2137... | 3 | 58 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
6738,
42625,
14208,
13,
10414,
1330,
... | 3.111111 | 45 |
print('hello\tworld')
print('hello\nworld')
print( len('hello world' ) )
print('hello world' [2])
my_letter_list = ['a', 'a', 'b','b', 'c']
print(my_letter_list)
print( set(my_letter_list) )
my_unique_letters = set(my_letter_list)
print(my_unique_letters)
print( len(my_unique_letters ) )
print('d' in my_unique_letters)
print( list (my_unique_letters)[0]) | [
4798,
10786,
31373,
59,
4246,
1764,
11537,
198,
198,
4798,
10786,
31373,
59,
77,
6894,
11537,
198,
198,
4798,
7,
220,
18896,
10786,
31373,
995,
6,
220,
1267,
1267,
198,
198,
4798,
10786,
31373,
995,
6,
685,
17,
12962,
198,
198,
1820,
... | 2.469799 | 149 |
"""The builtin list implementation
Lists optimize their storage by holding certain primitive datatypes in
unwrapped form. For more information:
http://morepypy.blogspot.com/2011/10/more-compact-lists-with-list-strategies.html
"""
import operator
import sys
from rpython.rlib import debug, jit, rerased
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.objectmodel import (
import_from_mixin, instantiate, newlist_hint, resizelist_hint, specialize)
from rpython.rlib import longlong2float
from rpython.tool.sourcetools import func_with_new_name
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, applevel, interp2app, unwrap_spec)
from pypy.interpreter.generator import GeneratorIterator
from pypy.interpreter.signature import Signature
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std.bytesobject import W_BytesObject
from pypy.objspace.std.floatobject import W_FloatObject
from pypy.objspace.std.intobject import W_IntObject
from pypy.objspace.std.iterobject import (
W_FastListIterObject, W_ReverseSeqIterObject)
from pypy.objspace.std.sliceobject import W_SliceObject, unwrap_start_stop
from pypy.objspace.std.tupleobject import W_AbstractTupleObject
from pypy.objspace.std.unicodeobject import W_UnicodeObject
from pypy.objspace.std.util import get_positive_index, negate
__all__ = ['W_ListObject', 'make_range_list', 'make_empty_list_with_size']
UNROLL_CUTOFF = 5
@jit.look_inside_iff(lambda space, list_w, sizehint:
jit.loop_unrolling_heuristic(list_w, len(list_w), UNROLL_CUTOFF))
_do_extend_jitdriver = jit.JitDriver(
name='list__do_extend_from_iterable',
greens=['w_type'],
reds=['i', 'w_iterator', 'w_list'],
get_printable_location=_get_printable_location)
find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find')
class EmptyListStrategy(ListStrategy):
"""EmptyListStrategy is used when a W_List withouth elements is created.
The storage is None. When items are added to the W_List a new RPython list
is created and the strategy and storage of the W_List are changed depending
to the added item.
W_Lists do not switch back to EmptyListStrategy when becoming empty again.
"""
erase, unerase = rerased.new_erasing_pair("empty")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
getitems_fixedsize = func_with_new_name(getitems_copy,
"getitems_fixedsize")
getitems_unroll = getitems_fixedsize
class SizeListStrategy(EmptyListStrategy):
"""Like empty, but when modified it'll preallocate the size to sizehint."""
class SimpleRangeListStrategy(BaseRangeListStrategy):
"""SimpleRangeListStrategy is used when a list is created using the range
method providing only positive length. The storage is a one element tuple
with positive integer storing length."""
erase, unerase = rerased.new_erasing_pair("simple_range")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
@specialize.arg(2)
_getitems_range_unroll = jit.unroll_safe(
func_with_new_name(_getitems_range, "_getitems_range_unroll"))
class RangeListStrategy(BaseRangeListStrategy):
"""RangeListStrategy is used when a list is created using the range method.
The storage is a tuple containing only three integers start, step and
length and elements are calculated based on these values. On any operation
destroying the range (inserting, appending non-ints) the strategy is
switched to IntegerListStrategy."""
erase, unerase = rerased.new_erasing_pair("range")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
@specialize.arg(2)
_getitems_range_unroll = jit.unroll_safe(
func_with_new_name(_getitems_range, "_getitems_range_unroll"))
# _______________________________________________________
init_signature = Signature(['sequence'], None, None)
init_defaults = [None]
app = applevel("""
def listrepr(currently_in_repr, l):
'The app-level part of repr().'
list_id = id(l)
if list_id in currently_in_repr:
return '[...]'
currently_in_repr[list_id] = 1
try:
return "[" + ", ".join([repr(x) for x in l]) + ']'
finally:
try:
del currently_in_repr[list_id]
except:
pass
""", filename=__file__)
listrepr = app.interphook("listrepr")
# ____________________________________________________________
# Sorting
# Reverse a slice of a list in place, from lo up to (exclusive) hi.
# (used in sort)
TimSort = make_timsort_class()
IntBaseTimSort = make_timsort_class()
FloatBaseTimSort = make_timsort_class()
IntOrFloatBaseTimSort = make_timsort_class()
StringBaseTimSort = make_timsort_class()
UnicodeBaseTimSort = make_timsort_class()
# NOTE: all the subclasses of TimSort should inherit from a common subclass,
# so make sure that only SimpleSort inherits directly from TimSort.
# This is necessary to hide the parent method TimSort.lt() from the
# annotator.
W_ListObject.typedef = TypeDef("list",
__doc__ = """list() -> new empty list
list(iterable) -> new list initialized from iterable's items""",
__new__ = interp2app(W_ListObject.descr_new),
__init__ = interp2app(W_ListObject.descr_init),
__repr__ = interp2app(W_ListObject.descr_repr),
__hash__ = None,
__eq__ = interp2app(W_ListObject.descr_eq),
__ne__ = interp2app(W_ListObject.descr_ne),
__lt__ = interp2app(W_ListObject.descr_lt),
__le__ = interp2app(W_ListObject.descr_le),
__gt__ = interp2app(W_ListObject.descr_gt),
__ge__ = interp2app(W_ListObject.descr_ge),
__len__ = interp2app(W_ListObject.descr_len),
__iter__ = interp2app(W_ListObject.descr_iter),
__contains__ = interp2app(W_ListObject.descr_contains),
__add__ = interp2app(W_ListObject.descr_add),
__iadd__ = interp2app(W_ListObject.descr_inplace_add),
__mul__ = interp2app(W_ListObject.descr_mul),
__rmul__ = interp2app(W_ListObject.descr_mul),
__imul__ = interp2app(W_ListObject.descr_inplace_mul),
__getitem__ = interp2app(W_ListObject.descr_getitem),
__setitem__ = interp2app(W_ListObject.descr_setitem),
__delitem__ = interp2app(W_ListObject.descr_delitem),
sort = interp2app(W_ListObject.descr_sort),
index = interp2app(W_ListObject.descr_index),
append = interp2app(W_ListObject.append),
reverse = interp2app(W_ListObject.descr_reverse),
__reversed__ = interp2app(W_ListObject.descr_reversed),
count = interp2app(W_ListObject.descr_count),
pop = interp2app(W_ListObject.descr_pop),
extend = interp2app(W_ListObject.extend),
insert = interp2app(W_ListObject.descr_insert),
remove = interp2app(W_ListObject.descr_remove),
)
W_ListObject.typedef.flag_sequence_bug_compat = True
| [
37811,
464,
3170,
259,
1351,
7822,
198,
198,
43,
1023,
27183,
511,
6143,
416,
4769,
1728,
20049,
4818,
265,
9497,
287,
198,
403,
29988,
1496,
1296,
13,
1114,
517,
1321,
25,
198,
198,
4023,
1378,
3549,
79,
4464,
88,
13,
35217,
13,
78... | 2.622015 | 2,680 |
from requests import post
from requests.auth import HTTPBasicAuth
| [
6738,
7007,
1330,
1281,
198,
6738,
7007,
13,
18439,
1330,
14626,
26416,
30515,
628
] | 4.785714 | 14 |
import datetime
import pygame
from mithril.controls.label import Label
from mithril.graphics.base_shapes import VerticalLine
from mithril.graphics.util_shapes import RoundedRectangle
DEFAULT_COLOR = (255, 255, 255)
DEFAULT_BORDER_COLOR = (181, 181, 181)
DEFAULT_HOVER_BORDER_COLOR = (3, 158, 211)
| [
11748,
4818,
8079,
198,
198,
11748,
12972,
6057,
198,
198,
6738,
285,
342,
22379,
13,
13716,
82,
13,
18242,
1330,
36052,
198,
6738,
285,
342,
22379,
13,
70,
11549,
13,
8692,
62,
1477,
7916,
1330,
38937,
13949,
198,
6738,
285,
342,
223... | 2.813084 | 107 |
'''
Created on Nov. 23, 2017
@author Andrew Habib
'''
'''
Currently, the spotbugs output files may contain
analysis results of more than one .java file.
This happens in cases where analyzed bug involves
more than one .java file.
'''
import json
import os
import sys
from xml.etree import cElementTree as ET
from Util import XmlReader, SpotbugsMsg, CustomEncoder, NO_WARNING
'''
Takes only one argument: path to spotbugs raw data
'''
if __name__ == '__main__':
location_to_data = os.path.join(os.getcwd(), sys.argv[1])
list_of_data = sorted(os.listdir(location_to_data))
data_paths = list(map(lambda f: os.path.join(location_to_data, f), list_of_data))
parsed_reports_per_project = []
for proj, tree in XmlReader(data_paths):
parsed_reports_per_project.extend(parse_spotbugs_xml_output(proj, tree))
# time_stamp = time.strftime("%Y%m%d-%H%M%S")
time_stamp = ''
parsed_output_file_name = "sb_parsed" + time_stamp + ".json"
with open(parsed_output_file_name, "w") as file:
json.dump(parsed_reports_per_project, file, cls=CustomEncoder, indent=4)
| [
7061,
6,
198,
198,
41972,
319,
5267,
13,
2242,
11,
2177,
198,
198,
31,
9800,
6858,
19654,
571,
198,
198,
7061,
6,
198,
198,
7061,
6,
198,
21327,
11,
262,
4136,
32965,
5072,
3696,
743,
3994,
220,
198,
20930,
2482,
286,
517,
621,
53... | 2.537079 | 445 |
import boto3
import json
import re
import requests
from yig.bot import listener, RE_MATCH_FLAG
import yig.config
@listener(r"history.<(https.*)>", RE_MATCH_FLAG)
def show_history(bot):
""":bookmark_tabs: *history chara session*
`/cc history YOUR_CHARACTER_SHEET_URL`
"""
matcher = re.match(r".*<(https.*)>", bot.message)
url_plane = matcher.group(1)
lst_key = search_all_session(bot.team_id, url_plane)
return "見た目を無視した版です。\n\n" + "\n".join(lst_key), None
| [
11748,
275,
2069,
18,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
7007,
198,
6738,
331,
328,
13,
13645,
1330,
24783,
11,
4526,
62,
44,
11417,
62,
38948,
198,
198,
11748,
331,
328,
13,
11250,
198,
198,
31,
4868,
877,
7,
81,
1,
... | 2.293839 | 211 |
import os
nome_arq = input()
arq = open(nome_arq, 'r')
for i in arq.readlines():
file_name = i.strip('\n').split(' ')
for j in file_name:
if os.path.exists(j):
print(j)
arq.close()
# print(file_name)
# file = open(file, 'r')
# if
# print(arq.readlines()) | [
11748,
28686,
198,
198,
77,
462,
62,
283,
80,
796,
5128,
3419,
198,
198,
283,
80,
796,
1280,
7,
77,
462,
62,
283,
80,
11,
705,
81,
11537,
198,
198,
1640,
1312,
287,
610,
80,
13,
961,
6615,
33529,
198,
220,
220,
220,
2393,
62,
... | 1.91875 | 160 |
import tweepy
from autenticadores import google_api_auth
from random import choice
import gspread
# TODO remover (?)
def google_sshet():
"""
Função simples para retornar um objeto capaz de manipular as planilhas do Google Sheets.
"""
session = google_api_auth()
ggle_cred = gspread.Client(None, session)
return ggle_cred
# TODO remover
def checar_timelines(twitter_hander, mastodon_handler, url, orgao):
"""
Recupera os 10 últimos toots/tweets da conta do Mastodon/Twitter.
Caso a URL não esteja entre as últimas notificadas, é feita a postagem.
Feature necessária para não floodar a timeline alheia caso um site fique offline por longos períodos de tempo.
"""
mastodon_bot = mastodon_handler
twitter_bot = twitter_hander
timeline = mastodon_bot.timeline_home(limit=10)
urls_postadas = [toot["content"] for toot in timeline]
contem = any(url in toot for toot in urls_postadas)
if not contem:
mastodon_bot.toot(lista_frases(url=url, orgao=orgao))
try:
twitter_bot.update_status(status=lista_frases(url=url, orgao=orgao))
except tweepy.TweepError as error:
if error.api_code == 187:
print('duplicate message')
else:
raise error
| [
11748,
4184,
538,
88,
198,
6738,
1960,
298,
291,
324,
2850,
1330,
23645,
62,
15042,
62,
18439,
198,
6738,
4738,
1330,
3572,
198,
11748,
308,
43639,
198,
198,
2,
16926,
46,
816,
2502,
357,
10091,
198,
4299,
23645,
62,
824,
3202,
33529,... | 2.398148 | 540 |
from sql_alchemy_db_instance import db
import pandas as pd
import sqlite3
from sqlalchemy import Column, Integer, Float
import pandas as pd
import numpy as np
from pandas import DataFrame
from flask_sqlalchemy import SQLAlchemy
| [
6738,
44161,
62,
282,
26599,
62,
9945,
62,
39098,
1330,
20613,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
44161,
578,
18,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
48436,
198,
11748,
19798,
292,
355,
279,
67,
19... | 3.171053 | 76 |
#!/usr/bin/env python3
'''Sphinx documentation builder configuration file.
For a full list of options see the documentation:
https://www.sphinx-doc.org/en/master/usage/configuration.html
'''
import eminus
project = 'eminus'
author = 'Wanja Timm Schulze'
copyright = '2021-2022, Wanja Timm Schulze'
version = eminus.__version__
release = eminus.__version__.rpartition('.')[0]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode'
]
templates_path = ['_templates']
pygments_style = 'friendly'
pygments_dark_style = 'native'
language = 'en'
html_theme = 'furo'
html_favicon = 'logo/eminus_favicon.png'
html_theme_options = {
'light_logo': 'logo/eminus_logo.png',
'light_css_variables': {
'color-brand-primary': '#006700',
'color-brand-content': '#1a962b'
},
'dark_logo': 'logo/eminus_logo_dark.png',
'dark_css_variables': {
'color-brand-primary': '#70a973',
'color-brand-content': '#a0dba2'
},
'footer_icons': [
{
'name': 'GitLab',
'url': 'https://gitlab.com/nextdft/eminus',
'html': '<svg stroke="currentColor" fill="currentColor" viewBox="0 0 16 16"><g transform="matrix(.083313 0 0 .083313 -7.8292 -8.1245)"><path d="m282.83 170.73-0.27-0.69-26.14-68.22a6.81 6.81 0 0 0-2.69-3.24 7 7 0 0 0-8 0.43 7 7 0 0 0-2.32 3.52l-17.65 54h-71.47l-17.65-54a6.86 6.86 0 0 0-2.32-3.53 7 7 0 0 0-8-0.43 6.87 6.87 0 0 0-2.69 3.24l-26.19 68.19-0.26 0.69a48.54 48.54 0 0 0 16.1 56.1l0.09 0.07 0.24 0.17 39.82 29.82 19.7 14.91 12 9.06a8.07 8.07 0 0 0 9.76 0l12-9.06 19.7-14.91 40.06-30 0.1-0.08a48.56 48.56 0 0 0 16.08-56.04z"></path></g></svg>' # noqa: E501
}
]
}
html_static_path = ['']
html_show_sphinx = False
autodoc_preserve_defaults = True
napoleon_use_rtype = False
def dunder_skip(app, what, name, obj, would_skip, options):
'''Exclude all dunder methods.'''
if name.startswith('_'):
return True
return would_skip
def setup(app):
'''Customized build process.'''
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import examples_builder
app.connect('builder-inited', examples_builder.generate)
app.connect('autodoc-skip-member', dunder_skip)
app.connect('build-finished', examples_builder.clean)
return
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
50,
746,
28413,
10314,
27098,
8398,
2393,
13,
198,
198,
1890,
257,
1336,
1351,
286,
3689,
766,
262,
10314,
25,
198,
5450,
1378,
2503,
13,
82,
746,
28413,
12,
15390,
13,
... | 2.158038 | 1,101 |
n = int(input())
nh = int(input())
vh = float(input())
sal = nh * vh
print(f'NUMBER = {n}')
print(f'SALARY = U$ {sal:.2f}')
| [
77,
796,
493,
7,
15414,
28955,
201,
198,
77,
71,
796,
493,
7,
15414,
28955,
201,
198,
85,
71,
796,
12178,
7,
15414,
28955,
201,
198,
21680,
796,
299,
71,
1635,
410,
71,
201,
198,
4798,
7,
69,
6,
41359,
13246,
796,
1391,
77,
92,
... | 1.911765 | 68 |
import jwt
from ...ports.providers import JwtProvider
| [
11748,
474,
46569,
198,
6738,
2644,
3742,
13,
15234,
4157,
1330,
449,
46569,
29495,
628
] | 3.666667 | 15 |
from invoke import task
from subprocess import run
from tasks.util.env import (
KUBECTL_BIN,
PROJ_ROOT,
)
@task
def uninstall(ctx):
"""
Uninstall uk8s
"""
rm_cmd = "sudo snap remove microk8s"
print(rm_cmd)
run(rm_cmd, shell=True, check=True)
@task
def reset(ctx):
"""
Reset the uk8s cluster from scratch
"""
# Uninstall the existing
uninstall(ctx)
# Install
install_cmd = "./bin/install_microk8s.sh"
print(install_cmd)
run(install_cmd, cwd=PROJ_ROOT, shell=True, check=True)
# Update credentials
credentials(ctx)
@task
def credentials(ctx):
"""
Set credentials for the uk8s cluster
"""
# Delete existing .kube config directory
del_cmd = "sudo rm -rf ~/.kube"
print(del_cmd)
run(del_cmd, shell=True, check=True)
# Create new .kube config directory
mkdir_cmd = "mkdir -p ~/.kube"
print(mkdir_cmd)
run(mkdir_cmd, shell=True, check=True)
# Load the local config
config_cmd = "sudo microk8s config > ~/.kube/config"
print(config_cmd)
run(config_cmd, shell=True, check=True)
# Check we can access the cluster
cmd = "{} get nodes".format(KUBECTL_BIN)
print(cmd)
run(cmd, shell=True, check=True)
| [
6738,
26342,
1330,
4876,
198,
6738,
850,
14681,
1330,
1057,
198,
198,
6738,
8861,
13,
22602,
13,
24330,
1330,
357,
198,
220,
220,
220,
509,
10526,
9782,
43,
62,
33,
1268,
11,
198,
220,
220,
220,
21965,
41,
62,
13252,
2394,
11,
198,
... | 2.435798 | 514 |
import numpy as np
from _main_decision._helper_functions.log_sum_exp import log_sum_exp
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
4808,
12417,
62,
12501,
1166,
13557,
2978,
525,
62,
12543,
2733,
13,
6404,
62,
16345,
62,
11201,
1330,
2604,
62,
16345,
62,
11201,
628,
628
] | 2.787879 | 33 |
from .. import base
class VolumeType(base.Resource):
"""A Volume Type is the type of volume to be created."""
class VolumeTypeManager(base.ManagerWithFind):
"""Manage :class:`VolumeType` resources."""
resource_class = VolumeType
def list(self, search_opts=None, is_public=True):
"""Lists all volume types.
:param search_opts
:param is_public
:rtype: list of :class:`VolumeType`.
"""
query_string = ''
if not is_public:
query_string = '?is_public=%s' % is_public
return self._list("/volume_types%s" % (query_string), "volume_types")
def get(self, volume_type):
"""Get a specific volume type.
:param volume_type: The ID of the :class:`VolumeType` to get.
:rtype: :class:`VolumeType`
"""
return self._get("/volume_types/%s" % base.getid(volume_type), "volume_type")
| [
6738,
11485,
1330,
2779,
628,
198,
4871,
14701,
6030,
7,
8692,
13,
26198,
2599,
198,
220,
220,
220,
37227,
32,
14701,
5994,
318,
262,
2099,
286,
6115,
284,
307,
2727,
526,
15931,
628,
198,
4871,
14701,
6030,
13511,
7,
8692,
13,
13511,... | 2.427807 | 374 |
import httplib2
import json
import sys
if __name__== '__main__':
print("Running Endpoint Tester....\n")
address = input("Please enter the address of the server ou want to access!")
if address == '':
address = 'http://localhost:5000'
print("Making the GET Request for /puppies...")
try:
url = address + '/puppies'
h = httplib2.Http()
print(url)
resp, result = h.request(url, 'GET')
if resp['result'] != '200':
raise Exception("Recieved an unsuccessful status code of {}".format(resp['status']))
except Exception as err:
print("Test 1 FAILED: Could not make GET Request to web server")
print(err.args)
else:
print("Test 1 PASS: Successfully made a GET Request to puppies")
print("Making a POST Request to /puppies...")
try:
url = address + '/puppies'
h = httplib2.Http()
resp, result = h.request(url, 'POST')
if resp['status'] != '200':
raise Exception("Recieved an unsuccessful status code {}".format(resp['status']))
except Exception as err:
print("Test 2 FAILED: Could not make POST Request to web server")
print(err.args)
sys.exit()
else:
print("Test 2 PASS: Successfully made POST Request to /puppies")
print("Making GET Request to /puppies/id")
try:
id = 1
while id <=10:
url = address + '/puppies/{}'.format(id)
h = httplib2.Http()
resp,result = h.request(url, 'GET')
if resp['status'] != '200':
raise Exception("Recieved an unsuccessful status code {}".format(resp['status']))
id = id +1
except Exception as err:
print("Test 3 FAILED: Could not make GET Request to /puppies/id")
print(err.args)
sys.exit()
else:
print("Test 3 PASS: Successfully made GET Request to /puppies/id")
print("Making PUT Request to /puppies/id")
try:
id = 1
while id <=10:
url = address + '/puppies/{}'.format(id)
h = httplib2.Http()
resp,result = h.request(url, 'PUT')
if resp['status'] != '200':
raise Exception("Recieved an unsuccessful status code {}".format(resp['status']))
id = id +1
except Exception as err:
print("Test 4 FAILED: Could not make PUT Request to /puppies/id")
print(err.args)
else:
print("Test 4 PASSED: Successfully made a PUT Request to /puppies/id")
print("Making DELETE Request to puppies/id")
try:
id = 1
while id<=10:
url = address + '/puppies/{}'.format(id)
h = httplib2.Http()
resp,result = h.request(url, 'DELETE')
if resp['status'] != '200':
raise Exception("Recieved an unsuccessful status code {}".format(resp['stauts']))
id = id +1
except Exception as err:
print("Test 5 FAILED: Could not make a DELETE Request to /puppies/id")
print(err.args)
else:
print("Test 5 PASS: Successfully made a DELETE Request to /puppies/id")
print("HURRAY, ALL TEST PASSED!!")
| [
11748,
1841,
489,
571,
17,
201,
198,
11748,
33918,
201,
198,
11748,
25064,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
855,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
3601,
7203,
28768,
5268,
4122,
309,
7834,
1106,... | 2.196929 | 1,498 |
import pynvim
import todoist
from dateutil.parser import parse
from datetime import datetime, time, timezone
from dateutil.tz import tzlocal
@pynvim.plugin
| [
11748,
279,
2047,
31124,
198,
11748,
284,
4598,
396,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11,
640,
11340,
198,
6738,
3128,
22602,
13,
22877,
1330,
256,
89,
12001,
628,
198,
... | 3.291667 | 48 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""representer point"""
import logging
import functools
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ...base_interpret import Interpreter
from .example_base_interpreter import ExampleBaseInterpreter
from ..common.utils import get_sublayer, get_struct_res, get_top_and_bottom_n_examples
class SoftmaxClassifier(nn.Layer):
"""
Softmax classifier with cross-entropy loss.
"""
def __init__(self, in_feature, out_feature, params):
"""
Initialization.
"""
super().__init__()
self.linear = paddle.nn.Linear(in_feature, out_feature, bias_attr=False)
self.linear.weight.set_value(params)
def forward(self, features, probas):
"""
Calculate loss for the loss function and L2 regularizer.
"""
logits = self.linear(features)
logits_max = paddle.max(logits, axis=1, keepdim=True)
logits = logits - logits_max
A = paddle.log(paddle.sum(paddle.exp(logits), axis=1))
B = paddle.sum(logits * probas, axis=1)
loss = paddle.sum(A - B)
l2 = paddle.sum(paddle.square(self.linear.weight))
return (loss, l2)
class RepresenterPointBase(nn.Layer):
"""
Class for learning a representer point model
"""
def __init__(
self,
paddle_model,
optimizer_name="SGD",
classifier_layer_name="classifier",
learning_rate=5e-2,
lmbd=0.03,
epochs=40000,
correlation=True,
):
"""
Initialization
"""
super().__init__()
weight, params = self._get_params(paddle_model, classifier_layer_name)
self.softmax_classifier = SoftmaxClassifier(weight.shape[0] + 1, weight.shape[1], params)
self.learning_rate = learning_rate
self.lmbd = lmbd
self.epochs = epochs
self.optimizer = getattr(paddle.optimizer, optimizer_name)
self.correlation = correlation
def _get_params(self, paddle_model, classifier_layer_name):
"""
Get the parameters of classifier_layer in model.
"""
classifier = get_sublayer(paddle_model, classifier_layer_name)
weight, bias = classifier.weight, classifier.bias
params = paddle.concat([weight, paddle.unsqueeze(bias, axis=0)], axis=0)
return weight, params
def train(self, input_feature, input_probas):
"""
Train a representer point model.
"""
# input_feature is the feature of a given model, input_probas is the probabilities of input_feature
input_feature = paddle.concat(
[
input_feature,
paddle.ones((input_feature.shape[0], 1), dtype=input_feature.dtype),
],
axis=1,
)
input_num = len(input_probas)
min_loss = float("inf")
optimizer = self.optimizer(
learning_rate=self.learning_rate,
parameters=self.softmax_classifier.linear.parameters(),
)
print("Training representer point model, it will take several minutes...")
for epoch in range(self.epochs):
classifier_loss, L2 = self.softmax_classifier(input_feature, input_probas)
loss = L2 * self.lmbd + classifier_loss / input_num
classifier_mean_loss = classifier_loss / input_num
loss.backward()
grad_loss = paddle.mean(paddle.abs(self.softmax_classifier.linear.weight.grad)).numpy()
# save the W with the lowest grad_loss
if grad_loss < min_loss:
if epoch == 0:
init_grad = grad_loss
min_loss = grad_loss
best_W = self.softmax_classifier.linear.weight
if min_loss < init_grad / 200:
logging.info(f"stopping criteria reached in epoch:{epoch}")
optimizer.clear_grad()
break
optimizer.step()
optimizer.clear_grad()
if epoch % 1000 == 0:
logging.info(
f"Eopch:{epoch:4d}\tloss:{loss.numpy()}\tphi_loss:{classifier_mean_loss.numpy()}\tgrad:{grad_loss}")
# caluculate w based on the representer theorem's decomposition
logits = paddle.matmul(input_feature, best_W)
logits_max = paddle.max(logits, axis=1, keepdim=True)
logits = logits - logits_max # avoids numerical overflow
softmax_value = F.softmax(logits)
# derivative of softmax cross entropy
weight_matrix = softmax_value - input_probas
weight_matrix = weight_matrix / (-2.0 * self.lmbd * input_num) # alpha
if self.correlation:
try:
from scipy.stats.stats import pearsonr
except ImportError as e:
import sys
sys.stderr.write(
'''Info about import scipy: please install scipy firstly. cmd: pip install scipy. We need to calculate the pearsonr correlation between the representre point model and the gived model'''
)
return weight_matrix
best_w = paddle.matmul(paddle.t(input_feature), weight_matrix) # alpha * f_i^T
# calculate y_p, which is the prediction based on decomposition of w by representer theorem
logits = paddle.matmul(input_feature, best_w) # alpha * f_i^T * f_t
logits_max = paddle.max(logits, axis=1, keepdim=True)
logits = logits - logits_max
y_p = F.softmax(logits)
print("L1 difference between ground truth prediction and prediction by representer theorem decomposition")
print(F.l1_loss(input_probas, y_p).numpy())
print("pearson correlation between ground truth prediction and prediciton by representer theorem")
corr, _ = pearsonr(input_probas.flatten().numpy(), (y_p).flatten().numpy())
print(corr)
return weight_matrix
class RepresenterPointModel(ExampleBaseInterpreter):
"""
Representer Point Model for NLP tasks.
More details regarding the representer point method can be found in the original paper:
https://proceedings.neurips.cc/paper/2018/file/8a7129b8f3edd95b7d969dfc2c8e9d9d-Paper.pdf
"""
def __init__(
self,
paddle_model,
train_dataloader,
device="gpu",
classifier_layer_name="classifier",
predict_fn=None,
learning_rate=5e-2,
lmbd=0.03,
epochs=40000,
):
"""
Initialization.
Args:
paddle_model(callable): A model with ``forward``.
train_dataloader(iterable): Dataloader of model's training data.
device(str: default=gpu): Device type, and it should be ``gpu``, ``cpu`` etc.
classifier_layer_name(str: default=classifier): Name of the classifier layer in paddle_model.
predict_fn(callabel: default=None): If the paddle_model prediction has special process, user can customize the prediction function.
learning_rate(float: default=5e-2): Learning rate.
lmbd(float: default=0.03): The coefficient of l2 regularization.
epochs(int: default=4000): The total epochs to trianing representer point model.
"""
ExampleBaseInterpreter.__init__(self, paddle_model, device, predict_fn, classifier_layer_name)
self.paddle_model = paddle_model
self.classifier_layer_name = classifier_layer_name
self.represerter_point = RepresenterPointBase(
paddle_model,
classifier_layer_name=classifier_layer_name,
learning_rate=learning_rate,
lmbd=lmbd,
epochs=epochs,
)
self.train_feature, self.train_probas, _ = self.extract_feature(paddle_model, train_dataloader)
self.weight_matrix = self.represerter_point.train(self.train_feature, self.train_probas)
def interpret(self, data, sample_num=3):
"""
Select postive and negtive examples for a given data.
Args:
data(iterable): Dataloader to interpret.
sample_num(int: default=3): the number of positive examples and negtive examples selected for each instance. Return all the training examples ordered by `influence score` if this parameter is -1.
"""
if sample_num == -1:
sample_num = len(self.train_feature)
pos_examples = []
neg_examples = []
val_feature, _, preds = self.extract_feature(self.paddle_model, data)
for index, target_class in enumerate(preds):
tmp = self.weight_matrix[:, target_class] * paddle.sum(
self.train_feature * paddle.to_tensor(val_feature[index]), axis=1)
pos_idx, neg_idx = get_top_and_bottom_n_examples(tmp, sample_num=sample_num)
pos_examples.append(pos_idx)
neg_examples.append(neg_idx)
preds = preds.tolist()
res = get_struct_res(preds, pos_examples, neg_examples)
return res
@paddle.no_grad()
| [
2,
220,
220,
15069,
357,
66,
8,
33160,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 2.308315 | 4,197 |
"""Hackerrank Problem: https://www.hackerrank.com/challenges/np-min-and-max/problem
Task
You are given a 2-D array with dimensions N x M.
Your task is to perform the min function over axis 1 and then find the max of that.
"""
import numpy
n, m = map(int, input().split(' '))
array = []
for _ in range(n):
a = list(map(int, input().split(' ')))
array.append(a)
min_array = numpy.min(array, axis=1)
print(max(min_array))
| [
37811,
32833,
8056,
962,
20647,
25,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
37659,
12,
1084,
12,
392,
12,
9806,
14,
45573,
198,
198,
25714,
198,
198,
1639,
389,
1813,
257,
362,
12,
35,
7177,
351,
15225... | 2.751592 | 157 |
# Generated by Django 2.1.1 on 2018-10-03 03:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
2864,
12,
940,
12,
3070,
7643,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# Copyright (c) 2019 Lukas Koschmieder
import asyncio
from bqplot import Axis, Figure, LinearScale, Lines, Toolbar
from ipywidgets import Label, HBox, VBox
import logging
module_logger = logging.getLogger(__name__)
module_logger.setLevel(logging.INFO)
module_logger.addHandler(logging.StreamHandler())
| [
2,
15069,
357,
66,
8,
13130,
28102,
292,
18884,
354,
76,
798,
263,
198,
198,
11748,
30351,
952,
198,
6738,
275,
80,
29487,
1330,
38349,
11,
11291,
11,
44800,
29990,
11,
26299,
11,
16984,
5657,
198,
6738,
20966,
88,
28029,
11407,
1330,... | 3.070707 | 99 |
"""The code template to supply to the front end. This is what the user will
be asked to complete and submit for grading.
Do not include any imports.
This is not a REPL environment so include explicit 'print' statements
for any outputs you want to be displayed back to the user.
Use triple single quotes to enclose the formatted code block.
"""
challenge_code = '''dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev)
def mag_z_0_v2(B, time):
"""Simulates an electron (initial state |0>) in a magnetic field, using a
Z rotation.
Args:
B (float): The strength of the field, assumed to point in the z direction.
time (float): The time we evolve the electron state for.
Returns:
array[complex]: The state of the system after evolution.
"""
e = 1.6e-19
m_e = 9.1e-31
alpha = B*e/(2*m_e)
##################
# YOUR CODE HERE #
##################
return qml.state()
B, t = 0.1, 0.6
if np.allclose(mag_z_0_v1(B, t), mag_z_0_v2(B, t)):
print("The two circuits give the same answer!")
'''
| [
37811,
464,
2438,
11055,
284,
5127,
284,
262,
2166,
886,
13,
770,
318,
644,
262,
2836,
481,
198,
220,
220,
220,
307,
1965,
284,
1844,
290,
9199,
329,
43165,
13,
628,
220,
220,
220,
2141,
407,
2291,
597,
17944,
13,
628,
220,
220,
2... | 2.705446 | 404 |
try:
# Use internal aplpy
from .my_aplpy import *
except ImportError:
# Try and load the aplpy from the environment
from aplpy import *
| [
28311,
25,
198,
220,
220,
220,
220,
198,
220,
220,
220,
1303,
5765,
5387,
257,
489,
9078,
198,
220,
220,
220,
220,
198,
220,
220,
220,
422,
764,
1820,
62,
64,
489,
9078,
1330,
1635,
198,
220,
220,
220,
220,
198,
16341,
17267,
1233... | 2.263158 | 76 |
# Number Of Islands
if __name__ == "__main__":
sol = Solution()
grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
print(sol.numIslands(grid))
| [
2,
7913,
3226,
12010,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1540,
796,
28186,
3419,
198,
220,
220,
220,
10706,
796,
685,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.774834 | 151 |
#-*- coding: utf-8 -*-
#!/usr/bin/python3
#!/Author : Gibartes
from moduleInterface.defines import *
from moduleInterface.interface import ModuleComponentInterface
#from defines import *
#from interface import ModuleComponentInterface
from structureReader import structureReader as sr
#from structureReader import StructureReader as parser
#from structureReader import _WindowsEventLogStructure as structure
import binascii
import os,sys,platform
# carving module for evt/evtx
if __name__ == '__main__':
idx = ModuleEvt()
try:
idx.set_attrib(ModuleConstant.FILE_ATTRIBUTE,sys.argv[1]) # Insert .idx File
except:
print("This moudule needs exactly one parameter.")
sys.exit(1)
idx.set_attrib(ModuleConstant.IMAGE_BASE,0) # Set offset of the file base
#idx.set_attrib(ModuleConstant.IMAGE_LAST,65535)
idx.set_attrib(ModuleConstant.CLUSTER_SIZE,1024)
cret = idx.execute()
print(cret)
idx.set_attrib(ModuleConstant.IMAGE_BASE,65536) # Set offset of the file base
idx.set_attrib(ModuleConstant.IMAGE_LAST,262140)
idx.set_attrib(ModuleConstant.CLUSTER_SIZE,1024)
cret = idx.execute()
print(cret)
sys.exit(0)
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
48443,
13838,
1058,
12488,
433,
274,
198,
198,
6738,
8265,
39317,
13,
4299,
1127,
220,
220,
1330,
1635,
198,... | 2.46169 | 509 |
#!/usr/bin/env python
# Filename: rm_unCompleted_task
"""
introduction:
authors: Huang Lingcao
email:huanglingcao@gmail.com
add time: 16 October, 2019
"""
import os
dir = 'multi_inf_results'
for i in range(0,78):
if os.path.isfile(os.path.join(dir,str(i)+'.txt')):
if os.path.isfile(os.path.join(dir,str(i)+'.txt_done')):
print('task %d is completed'%i)
else:
print('task %d is not completed, remove the folder' % i)
folder = os.path.join(dir, 'I%d'%i)
os.system('rm -rf '+ folder)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
7066,
12453,
25,
42721,
62,
403,
43768,
62,
35943,
220,
198,
37811,
198,
27427,
596,
25,
198,
198,
41617,
25,
31663,
25116,
66,
5488,
198,
12888,
25,
13415,
27499,
66,
5488,
31,
1... | 2.164063 | 256 |
from allauth.account.adapter import DefaultAccountAdapter
from allauth.exceptions import ImmediateHttpResponse
from allauth.socialaccount import signals
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from allauth.socialaccount.models import SocialLogin
from django.conf import settings
from django.contrib.auth import login
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from rest_auth.utils import jwt_encode
from seedorf.users.models import User
from seedorf.utils.email import send_mail
from seedorf.utils.firebase import get_firebase_link
| [
6738,
477,
18439,
13,
23317,
13,
324,
3429,
1330,
15161,
30116,
47307,
198,
6738,
477,
18439,
13,
1069,
11755,
1330,
1846,
13857,
43481,
31077,
198,
6738,
477,
18439,
13,
14557,
23317,
1330,
10425,
198,
6738,
477,
18439,
13,
14557,
23317,... | 3.845679 | 162 |
"""
"""
from datetime import datetime
from typing import Optional
from bs4 import BeautifulSoup
import dateutil.parser
from pastebin_crawler.helpers import remove_beginning_slash_from_str
from pastebin_crawler.helpers.constants import Urls, PostDivName
from pastebin_crawler.services import BaseCrawlerService
| [
37811,
198,
198,
37811,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
3128,
22602,
13,
48610,
198,
198,
6738,
1613,
23497,
62,
66,
39464,
13,
1... | 3.488889 | 90 |
#!/usr/bin/python
import sys
import glob
################################################################################
# Fake Comport + Arduino simulation
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
198,
11748,
15095,
628,
198,
29113,
29113,
14468,
198,
2,
33482,
955,
634,
1343,
27634,
18640,
198
] | 5.75 | 28 |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
| [
2220,
7203,
31,
65,
41319,
62,
31391,
1003,
31391,
14,
11249,
62,
4299,
82,
14,
260,
7501,
25,
4023,
13,
65,
48274,
1600,
366,
4023,
62,
17474,
4943,
198
] | 2.37931 | 29 |
# Boletim com listas compostas
'''Crie um programa que leia NOME e DUAS
NOTAS de vários alunos e guarde tudo em
uma LISTA COMPOSTA. No final, mostre um
BOLETIM contendo a MÉDIA de cada um e
permita que o usuário possa mostrar as
NOTAS de cada aluno individualmente'''
lista = []
while True:
alunos = []
notas = []
alunos.clear()
notas.clear()
alunos.append(str(input('Nome: ')))
notas.append(float(input('Nota 1: ')))
notas.append(float(input('Nota 2: ')))
alunos.append(notas)
lista.append(alunos)
resp = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
if resp in 'Nn':
break
print('\033[1:37m''-=''\033[m' * 30)
print(f'{"No.":<5}{"NOME":<10}{"MÉDIA":>9}') # :<4, :<10 e :>8 são Códigos de alinhamento
print('\033[1:37m''-''\033[m' * 32)
for i, aluno in enumerate(lista):
nota1 = lista[i][1][0]
nota2 = lista[i][1][1]
media = (nota1 + nota2) / 2
print(f'{i:<3} {lista[i][0]:<8} {media:>5.1f}')
while True:
print('\033[1:37m''-''\033[m' * 35)
n = int(input('Mostrar notas de qual aluno? (999 interrompe): '))
if n == 999:
print('FINALIZANDO...')
break
if n <= len(lista) - 1:
print(f'Notas de {lista[n][0]} são {lista[n][1]}')
print('<<< VOLTE SEMPRE >>>')
| [
2,
3248,
1616,
320,
401,
1351,
292,
36541,
292,
198,
7061,
6,
34,
5034,
23781,
1430,
64,
8358,
443,
544,
399,
13649,
304,
35480,
1921,
198,
11929,
1921,
390,
410,
6557,
380,
418,
435,
403,
418,
304,
4860,
68,
256,
12003,
795,
198,
... | 2.028481 | 632 |
# -*- coding: utf-8 -*-
'''
Functions to work with JSON
'''
from __future__ import absolute_import, unicode_literals
# Import Python libs
import json # future lint: blacklisted-module
import logging
# Import Salt libs
import salt.utils.data
import salt.utils.stringutils
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def find_json(raw):
'''
Pass in a raw string and load the json when it starts. This allows for a
string to start with garbage and end with json but be cleanly loaded
'''
ret = {}
for ind, _ in enumerate(raw):
working = '\n'.join(raw.splitlines()[ind:])
try:
ret = json.loads(working, object_hook=salt.utils.data.decode_dict) # future lint: blacklisted-function
except ValueError:
continue
if ret:
return ret
if not ret:
# Not json, raise an error
raise ValueError
def import_json():
'''
Import a json module, starting with the quick ones and going down the list)
'''
for fast_json in ('ujson', 'yajl', 'json'):
try:
mod = __import__(fast_json)
log.trace('loaded %s json lib', fast_json)
return mod
except ImportError:
continue
def load(fp, **kwargs):
'''
.. versionadded:: Oxygen
Wraps json.load
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
'''
return kwargs.pop('_json_module', json).load(fp, **kwargs)
def loads(s, **kwargs):
'''
.. versionadded:: Oxygen
Wraps json.loads and prevents a traceback in the event that a bytestring is
passed to the function. (Python < 3.6 cannot load bytestrings)
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
'''
json_module = kwargs.pop('_json_module', json)
try:
return json_module.loads(s, **kwargs)
except TypeError as exc:
# json.loads cannot load bytestrings in Python < 3.6
if six.PY3 and isinstance(s, bytes):
return json_module.loads(s.decode(__salt_system_encoding__), **kwargs)
else:
raise exc
def dump(obj, fp, **kwargs):
'''
.. versionadded:: Oxygen
Wraps json.dump, and assumes that ensure_ascii is False (unless explicitly
passed as True) for unicode compatibility. Note that setting it to True
will mess up any unicode characters, as they will be dumped as the string
literal version of the unicode code point.
On Python 2, encodes the result to a str since json.dump does not want
unicode types.
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
'''
json_module = kwargs.pop('_json_module', json)
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
if six.PY2:
obj = salt.utils.data.encode(obj)
return json_module.dump(obj, fp, **kwargs) # future lint: blacklisted-function
def dumps(obj, **kwargs):
'''
.. versionadded:: Oxygen
Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly
passed as True) for unicode compatibility. Note that setting it to True
will mess up any unicode characters, as they will be dumped as the string
literal version of the unicode code point.
On Python 2, encodes the result to a str since json.dumps does not want
unicode types.
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
'''
import sys
json_module = kwargs.pop('_json_module', json)
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
if six.PY2:
obj = salt.utils.data.encode(obj)
return json_module.dumps(obj, **kwargs) # future lint: blacklisted-function
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
24629,
2733,
284,
670,
351,
19449,
198,
7061,
6,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
2,
... | 2.629357 | 1,492 |
from functools import wraps
from flask import session, url_for, redirect, request
__author__ = 'ibininja'
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
6738,
42903,
1330,
6246,
11,
19016,
62,
1640,
11,
18941,
11,
2581,
198,
198,
834,
9800,
834,
796,
705,
571,
259,
259,
6592,
6,
198
] | 3.272727 | 33 |
import copy
from hex_walker_constants import TIP_SERVO, MID_SERVO, ROT_SERVO, WAIST_SERVO
# NOTE: these values are ANGLES not raw pwms
# thing.rot_servo and thing.list[ROT_SERVO] are synonymous/aliases that point to the same underlying data, for both read/write
# TODO: remove normal/crouch version and make them derived from tall version
# NOTE: table naming convention is: (standing height)_(gait)_(what type of movement)_TABLE
# table to be used when the robot is trying to rotate in place
LEG_NORMAL_ROTATION_TABLE = {
"NEUTRAL": Leg_Position(90, 90, 0),
"UP_NEUTRAL": Leg_Position(90, 135, 0),
"RIGHT": Leg_Position(120, 90, 5),
"UP_RIGHT": Leg_Position(120, 135, 5),
"LEFT": Leg_Position(60, 90, 5),
"UP_LEFT": Leg_Position(60, 135, 5)
}
# table to be used when the robot is trying to move in a "normal" way (moving with two legs forward)
# tip motor, mid motor, rot motor
LEG_NORMAL_MOVEMENT_TABLE = {
#all the positions for the front and back legs
"NEUTRAL": Leg_Position(90, 90, 30),
"UP_NEUTRAL": Leg_Position(90, 135, 30),
"CORN_OUT": Leg_Position(75, 105, 45),
"CORN_IN": Leg_Position(125, 80, 0),
"CORN_UP_OUT": Leg_Position(75, 150, 45),
"CORN_UP_IN": Leg_Position(125, 125, 0),
#now all of the positions for the side legs
"SIDE_RIGHT": Leg_Position(115, 90, 0),
"SIDE_LEFT": Leg_Position(65, 90, 0),
"SIDE_UP_RIGHT": Leg_Position(115, 135, 0),
"SIDE_UP_LEFT": Leg_Position(65, 135, 0)
}
# # table to be used when the robot is trying to move in a "sideways" way (moving with a single leg forward)
# NORMAL_TRI_SIDE_MOVEMENT_TABLE = {
# "NEUTRAL": Leg_Position(90, 90, 0),
# "UP_NEUTRAL": Leg_Position(90, 135, 0),
# "CORN_LEFT": Leg_Position(0, 0, 0),
# "CORN_RIGHT": Leg_Position(0, 0, 0),
# "CENT_OUT": Leg_Position(0, 0, 0),
# "CENT_IN": Leg_Position(0, 0, 0)
# }
# this is extra. don't do this until the above is working
# table to be used when the robot is trying to rotate in place
LEG_CROUCH_ROTATION_TABLE = {
"NEUTRAL": Leg_Position(90, 135, 0),
"UP_NEUTRAL": Leg_Position(90, 180, 0),
"RIGHT": Leg_Position(120, 130, 0),
"UP_RIGHT": Leg_Position(120, 180, 0),
"LEFT": Leg_Position(60, 130, 0),
"UP_LEFT": Leg_Position(60, 180, 0)
}
LEG_CROUCH_MOVEMENT_TABLE = {
"NEUTRAL": Leg_Position(90, 135, 0),
"UP_NEUTRAL": Leg_Position(90, 180, 0),
"CORN_LEFT": Leg_Position(85, 125, 0),
"CORN_RIGHT": Leg_Position(95, 125, 0),
"CORN_UP_LEFT": Leg_Position(85, 175, 0),
"CORN_UP_RIGHT": Leg_Position(95, 175, 0),
"SIDE_RIGHT": Leg_Position(105, 125, 0),
"SIDE_LEFT": Leg_Position(75, 125, 0),
"SIDE_UP_RIGHT": Leg_Position(95, 170, 0),
"SIDE_UP_LEFT": Leg_Position(85, 170, 0)
}
# CROUCH_TRI_SIDE_MOVEMENT_TABLE = {
# "OUT_RIGHT": Leg_Position(0, 0, 0),
# "OUT": Leg_Position(0, 0, 0),
# "OUT_LEFT": Leg_Position(0, 0, 0),
# "RIGHT": Leg_Position(0, 0, 0),
# "NEUTRAL": Leg_Position(0, 0, 0),
# "LEFT": Leg_Position(0, 0, 0),
# "TUCK_RIGHT": Leg_Position(0, 0, 0),
# "TUCK": Leg_Position(0, 0, 0),
# "TUCK_LEFT": Leg_Position(0, 0, 0)
# }
# misc table
# TODO: make neutral/up_neutral in the other movement tables all reference it in this table... only change it in one place!
LEG_MISC_TABLE = {
"NEUTRAL": Leg_Position(90, 45, 30),
"UP_NEUTRAL": Leg_Position(90, 90, 0),
"BOUNCE": Leg_Position(90, 75, 0),
"PULL_UP": Leg_Position(90, 75, 0),
"TWIST_LEFT": Leg_Position(70, 45, 50),
"TWIST_RIGHT": Leg_Position(110, 45, 50),
"STRAIGHT_OUT": Leg_Position(90, 90, 90),
"INIT": Leg_Position(90, 90, 0)
}
LEG_TALL_ROTATION_TABLE = {
"NEUTRAL": LEG_MISC_TABLE["NEUTRAL"],
"UP_NEUTRAL": LEG_MISC_TABLE["UP_NEUTRAL"],
"RIGHT": Leg_Position(120, 45, 30),
"UP_RIGHT": Leg_Position(120, 90, 0),
"LEFT": Leg_Position(60, 45, 30),
"UP_LEFT": Leg_Position(60, 90, 0)
}
# TODO: duplicate neutral/up_neutral with corn_ and side_ names, for readability
LEG_TALL_MOVEMENT_TABLE = {
"NEUTRAL": LEG_MISC_TABLE["NEUTRAL"],
"UP_NEUTRAL": LEG_MISC_TABLE["UP_NEUTRAL"],
"CORN_LEFT": Leg_Position(80, 52, 35),
"CORN_RIGHT": Leg_Position(100, 52, 35),
"CORN_UP_LEFT": Leg_Position(80, 90, 0),
"CORN_UP_RIGHT": Leg_Position(100, 90, 0),
"SIDE_RIGHT": Leg_Position(130, 60, 25),
"SIDE_LEFT": Leg_Position(50, 60, 25),
"SIDE_UP_RIGHT": Leg_Position(130, 80, 0),
"SIDE_UP_LEFT": Leg_Position(50, 80, 0)
}
# step 1: build a better logical motion sequence, done
# step 2: define the poses to make that sequence work, WIP
# this is assuming the default position is 90/60/30, which would require most of the other existing motions to be changed...
LEG_NEW_MOVEMENT_TABLE = {
"SIDE_NEUTRAL": Leg_Position(90, 60, 30),
"SIDE_NEUTRAL_UP": Leg_Position(90, 90, 0),
"SIDE_RIGHT": Leg_Position(116, 53, 48),
"SIDE_RIGHT_UP": Leg_Position(116, 83, 18),
"SIDE_LEFT": Leg_Position(64, 53, 48),
"SIDE_LEFT_UP": Leg_Position(64, 83, 18),
"CORN_NEUTRAL": Leg_Position(90, 60, 30),
"CORN_NEUTRAL_UP": Leg_Position(90, 90, 0),
"CORN_LEFTIN": Leg_Position(55, 55, 10),
"CORN_LEFTIN_UP": Leg_Position(55, 90, 10),
"CORN_RIGHTIN": Leg_Position(125, 55, 10),
"CORN_RIGHTIN_UP": Leg_Position(125, 90, 10),
"CORN_LEFTOUT": Leg_Position(75, 45, 70),
"CORN_LEFTOUT_UP": Leg_Position(75, 75, 40),
"CORN_RIGHTOUT": Leg_Position(105, 45, 70),
"CORN_RIGHTOUT_UP": Leg_Position(105, 75, 40)
}
# There's no center in because the mid motor is limited to 45 degrees
LEG_TALL_SIDE_MOVEMENT_TABLE = {
"NEUTRAL": LEG_MISC_TABLE["NEUTRAL"],
"UP_NEUTRAL": LEG_MISC_TABLE["UP_NEUTRAL"],
"SIDE_OUT_LEFT": Leg_Position(65, 50, 30),
"SIDE_OUT_RIGHT": Leg_Position(115, 50, 30),
"SIDE_UP_OUT_LEFT": Leg_Position(65, 70, 0),
"SIDE_UP_OUT_RIGHT": Leg_Position(115, 70, 0),
"CENTER_OUT": Leg_Position(90, 45, 30),
"CENTER_UP_OUT": Leg_Position(90, 70, 10),
}
| [
11748,
4866,
198,
6738,
17910,
62,
20783,
62,
9979,
1187,
1330,
309,
4061,
62,
35009,
29516,
11,
25269,
62,
35009,
29516,
11,
371,
2394,
62,
35009,
29516,
11,
16400,
8808,
62,
35009,
29516,
198,
198,
2,
24550,
25,
777,
3815,
389,
3537... | 2.150561 | 2,763 |
""" script.py
"""
import http.server
import socketserver
from pyfiguration import conf
# Create a request handler
Handler = http.server.SimpleHTTPRequestHandler
@conf.addIntField(
field="server.port",
description="The port on which the server will start",
default=8000,
minValue=80,
maxValue=9999,
)
if __name__ == "__main__":
startServer()
| [
37811,
4226,
13,
9078,
198,
37811,
198,
11748,
2638,
13,
15388,
198,
11748,
37037,
18497,
198,
198,
6738,
12972,
5647,
3924,
1330,
1013,
628,
198,
2,
13610,
257,
2581,
21360,
198,
25060,
796,
2638,
13,
15388,
13,
26437,
40717,
18453,
25... | 3.02439 | 123 |
from ..utils.database import db
from marshmallow_sqlalchemy import ModelSchema
from marshmallow import fields
| [
6738,
11485,
26791,
13,
48806,
1330,
20613,
198,
6738,
22397,
42725,
62,
25410,
282,
26599,
1330,
9104,
27054,
2611,
198,
6738,
22397,
42725,
1330,
7032,
628,
198
] | 4.148148 | 27 |
from django.test import TestCase
from ..templatetags.freight_filters import formatnumber, power10
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
11485,
11498,
489,
265,
316,
3775,
13,
19503,
432,
62,
10379,
1010,
1330,
5794,
17618,
11,
1176,
940,
628
] | 3.333333 | 30 |
import urllib.request
import json
from .models import Source,Article
#getting api key
api_key = None
#getting news base url
base_url = None
articles_url = None
def get_source(category):
'''
Function that gets json response to our url request
'''
get_source_url = base_url.format(category,api_key)
print('***get_source_url***')
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
source_results = None
if get_source_response['sources']:
source_results_list = get_source_response['sources']
source_results = process_source(source_results_list)
return source_results
def process_source(source_list):
'''
function that processes the news results and transform them to a list of objects
Args:
sources_list: A list of dictionaries that contain news details
Returns:
sources_results: Alist of news source objects
'''
source_results = []
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
urlToImage = source_item.get('urlToImage')
source_object = Source(id,name,description,url,category,language,country,urlToImage)
source_results.append(source_object)
return source_results
def get_articles(id):
'''
Function that gets the json response to url request
'''
get_articles_url = articles_url.format(id,api_key)
print(f'***{get_articles_url}***')
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
articles_results = json.loads(url.read())
articles_object = None
if articles_results['articles']:
articles_object = process_articles(articles_results['articles'])
return articles_object | [
11748,
2956,
297,
571,
13,
25927,
198,
11748,
33918,
198,
6738,
764,
27530,
1330,
8090,
11,
14906,
628,
628,
198,
198,
2,
37210,
40391,
1994,
198,
15042,
62,
2539,
796,
6045,
198,
198,
2,
37210,
1705,
2779,
19016,
198,
8692,
62,
6371,... | 2.613139 | 822 |
#!/usr/bin/env python3
# Write a program that simulates random read coverage over a chromosome
# Report min, max, and average coverage
# Make variables for genome size, read number, read length
# Input values from the command line
# Note that you will not sample the ends of a chromosome very well
# So don't count the first and last parts of a chromsome
import sys
import random
genomelen = int(sys.argv[1])
read = int(sys.argv[2])
readlen = int(sys.argv[3])
#make genome
genome = [0]* genomelen
for i in range(read):
rand = random.randint(0,genomelen - read)
for j in range(read):
genome[j+rand]+=1
min = genome[read]
max = genome[read]
Cov = 0 #coverage
for a in genome[read:-read]:
if a < min: min = a
if a > max: max = a
Cov += a
#print(min, max, f'{Cov/(genomelen - 2*read):.5f}')
print(min, max, '{:.2f}'.format(Cov/(genomelen-2*read)))
#collab with Jojo and class in general
"""
python3 32xcoverage.py 1000 100 100
5 20 10.82375
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
19430,
257,
1430,
326,
985,
15968,
4738,
1100,
5197,
625,
257,
34348,
198,
2,
6358,
949,
11,
3509,
11,
290,
2811,
5197,
198,
2,
6889,
9633,
329,
19270,
2546,
11,
1100,
... | 2.71875 | 352 |
from twilio.rest import Client
from src.variables.config import *
| [
198,
6738,
665,
346,
952,
13,
2118,
1330,
20985,
198,
6738,
12351,
13,
25641,
2977,
13,
11250,
1330,
1635,
198
] | 3.35 | 20 |
#link https://practice.geeksforgeeks.org/problems/job-sequencing-problem-1587115620/1#
def JobScheduling(Jobs,n):
'''
:param Jobs: list of "Job" class defined in driver code, with "profit" and "deadline".
:param n: total number of jobs
:return: A list of size 2 having list[0] = count of jobs and list[1] = max profit
'''
'''
{
class Job:.
def __init__(self,profit=0,deadline=0):
self.profit = profit
self.deadline = deadline
self.id = 0
}
'''
# code here
maxx, c = 0, 0
Jobs.sort(key=lambda x : x.profit,reverse=True)
sdl =set()
for i in Jobs:
temp=i.deadline-1
while temp>=0 :
if temp not in sdl:
c+=1
sdl.add(temp)
maxx+=i.profit
break
temp-=1
return [c,maxx]
#{
# Driver Code Starts
#Initial Template for Python 3
import atexit
import io
import sys
#Contributed by : Nagendra Jha
class Job:
'''
Job class which stores profit and deadline.
'''
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases) :
n = int(input())
info = list(map(int,input().strip().split()))
Jobs = [Job() for i in range(n)]
for i in range(n):
Jobs[i].id = info[3*i]
Jobs[i].deadline = info[3 * i + 1]
Jobs[i].profit=info[3*i+2]
res = JobScheduling(Jobs,n)
print (res[0], end=" ")
print (res[1])
# } Driver Code Ends | [
2,
8726,
3740,
1378,
39541,
13,
469,
2573,
30293,
2573,
13,
2398,
14,
1676,
22143,
14,
21858,
12,
3107,
9532,
12,
45573,
12,
1314,
5774,
1157,
3980,
1238,
14,
16,
2,
198,
198,
4299,
15768,
50,
1740,
16619,
7,
41,
8158,
11,
77,
259... | 2.033898 | 767 |
import inception_v3
if __name__ == '__main__':
model = inception_v3.InceptionV3('imagenet_test_lmdb', 'imagenet_train_lmdb', 1000)
train_proto = model.inception_v3_proto(64)
test_proto = model.inception_v3_proto(64, phase='TEST')
save_proto(train_proto, 'imagenet_train.prototxt')
save_proto(test_proto, 'imagenet_test.prototxt')
| [
11748,
30839,
62,
85,
18,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2746,
796,
30839,
62,
85,
18,
13,
818,
4516,
53,
18,
10786,
320,
11286,
316,
62,
9288,
62,
75,
9132,
65,
3256,
... | 2.261146 | 157 |
# based on the keras documentation
#
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Conv1D,Activation
from keras import losses, optimizers
import keras.utils as keras_utils
import json
f = open('data_for_everything')
j = json.load(f)
# our parameters
nn_input_size = len(j[0]['in'])
nn_hidden_layer_size = 20
nn_hidden_layers_n = 1
nn_output_layer_size = len(j[0]['out'])
epochs_count = 2000
batch_size = 100
# define the model.
model = Sequential()
model.add(Dense(nn_hidden_layer_size, activation='relu',
input_shape=(nn_input_size,)))
# hidden layers. we went for three. seems decent.
for i in range(0, nn_hidden_layers_n):
model.add(Dense(nn_hidden_layer_size, activation='relu',
input_shape=(nn_hidden_layer_size,)))
# output layer.
model.add(Dense(nn_output_layer_size, activation='relu',
input_shape=(nn_hidden_layer_size,)))
# compile it.
model.compile(optimizer=optimizers.Adam(),
loss=losses.binary_crossentropy,
metrics=['accuracy'])
######
###### LOAD DATA HERE
######
#data = np.empty(shape=(len(j),196))
#labels = np.empty(shape=(len(j),664))
t_data = []
t_label = []
for i in j:
t_data += [i['in']]
t_label += [i['out']]
amount_for_training = 1000
data = np.array(t_data[:amount_for_training])
labels = np.array(t_label[:amount_for_training])
test_data = np.array(t_data[amount_for_training:])
test_labels = np.array(t_label[amount_for_training:])
print data
model.fit(data, labels, epochs=epochs_count, batch_size=batch_size)
score = model.evaluate(test_data, test_labels, batch_size=batch_size)
print score
#print j[1001]['bill']
#print t_label[100]
#print model.predict(np.matrix(t_data[1001],))
model.save('trained.dat')
| [
198,
2,
1912,
319,
262,
41927,
292,
10314,
198,
2,
220,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
34872,
16,
35,
11,
25526,
341... | 2.539913 | 689 |
#
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#
import argparse
from unet3 import cross_entropy_balanced
import tensorflow.keras.backend as K
from keras.models import load_model
from keras.layers import *
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import graph_util
import tensorflow as tf
import numpy as np
import warnings
from tensorflow.python.util import deprecation
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
deprecation._PRINT_DEPRECATION_WARNINGS = False
warnings.filterwarnings("ignore")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def args():
"""
Argument Parsing Handler:
-m <path_to_keras> :
Path to keras model
-o <model_output> :
Path to directory that will store pb model
"""
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--path_to_keras", type=str,
help="Path to keras model.", default='')
parser.add_argument("-o", "--model_output", type=str,
help="Path to directory that will store pb model.", default='')
return parser.parse_args()
if __name__ == '__main__':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
arg_obj = args()
assert arg_obj.path_to_keras != '', '[ERROR] No keras path given.'
assert arg_obj.model_output != '', '[ERROR] No output path given.'
main(arg_obj)
| [
2,
198,
2,
15069,
357,
34,
8,
33448,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
198,
11748,
1822,
29572,
198,
6738,
555,
316,
18,
1330,
3272,
62,
298,
28338,
62,
27753,... | 2.597246 | 581 |
import ast
import collections
import contextlib
import enum
from functools import partial
from functools import wraps
import inspect
from inspect import BoundArguments
import re
import sys
import textwrap
import traceback
import types
from dataclasses import dataclass
from dataclasses import replace
from typing import *
try:
import icontract
except ModuleNotFoundError:
icontract = None # type: ignore
try:
import hypothesis
from hypothesis import strategies as st
from hypothesis.internal.conjecture.data import ConjectureData
except ModuleNotFoundError:
hypothesis = None # type: ignore
from crosshair.fnutil import fn_globals
from crosshair.fnutil import set_first_arg_type
from crosshair.fnutil import FunctionInfo
from crosshair.options import AnalysisKind
from crosshair.util import CrosshairInternal, IgnoreAttempt, UnexploredPath
from crosshair.util import debug
from crosshair.util import eval_friendly_repr
from crosshair.util import frame_summary_for_fn
from crosshair.util import is_pure_python
from crosshair.util import sourcelines
from crosshair.util import DynamicScopeVar
# For convience
INVARIANT = ConditionExprType.INVARIANT
PRECONDIITON = ConditionExprType.PRECONDIITON
POSTCONDIITON = ConditionExprType.POSTCONDIITON
class NoEnforce:
"""
Signal to suppress contract enforcement.
This function wrapper does nothing on its own. But the enforcement tracer
looks for it and will skip conditions on `fn` when this wrapper is detected.
"""
class ImpliesTransformer(ast.NodeTransformer):
"""
Transform AST to rewrite implies operation.
Pre- and post-conditions commonly want an implies(X, Y) operation.
But it's important to only evaluate Y when X is true; so we rewrite
this function into "Y if X else True"
"""
_NO_RETURN = object()
UNABLE_TO_REPR = "<unable to repr>"
@dataclass()
@dataclass
@dataclass(frozen=True)
class Conditions:
"""Describe the contract of a function."""
fn: Callable
"""
The body of the function to analyze.
Ideally, this is just the body of the function and does not include checking
pre- or post-conditions. (though this is not always possible)
"""
src_fn: Callable
"""
The body of the function to use for error reporting. Usually the same as
`fn`, but sometimes the original is wrapped in shell for exception handling
or other reasons.
"""
pre: List[ConditionExpr]
""" The preconditions of the function. """
post: List[ConditionExpr]
""" The postconditions of the function. """
raises: FrozenSet[Type[BaseException]]
"""
A set of expection types that are expected.
Subtypes of expected exceptions are also considered to be expected.
CrossHair will attempt to report when this function raises an
unexpected exception.
"""
sig: inspect.Signature
"""
The signature of the funtion. Argument and return type
annotations should be resolved to real python types when possible.
"""
# TODO: can mutation checking be implemented as just another kind of postcondition?
mutable_args: Optional[FrozenSet[str]]
"""
A set of arguments that are deeply immutable.
When None, no assertion about mutability is provided.
OTOH, an empty set asserts that the function does not mutate any argument.
"""
fn_syntax_messages: List[ConditionSyntaxMessage]
"""
A list of errors resulting from the parsing of the contract.
In general, conditions should not be checked when such messages exist.
"""
counterexample_description_maker: Optional[
Callable[[BoundArguments, object], str]
] = None
"""
An optional callback that formats a counterexample invocation as text.
It takes the example arguments and the returned value
(or the senitnel value _MISSING if function did not complete).
"""
@dataclass(frozen=True)
def add_completion_conditions(conditions: Conditions):
"""Assume a trivial postcondition of "True" when preconditions exist."""
post = conditions.post
if not post and conditions.pre:
filename, line, _lines = sourcelines(conditions.src_fn)
post.append(ConditionExpr(POSTCONDIITON, lambda vars: True, filename, line, ""))
_HEADER_LINE = re.compile(
r"""^(\s*)\:? # whitespace with optional leading colon
((?:post)|(?:pre)|(?:raises)|(?:inv)) # noncapturing keywords
(?:\[([\w\s\,\.]*)\])? # optional params in square brackets
\:\:?\s* # single or double colons
(.*?) # The (non-greedy) content
\s*$""",
re.VERBOSE,
)
_SECTION_LINE = re.compile(r"^(\s*)(.*?)\s*$")
@dataclass(init=False)
_RAISE_SPHINX_RE = re.compile(r"\:raises\s+(\w+)\:", re.MULTILINE)
_PARSER_MAP = {
AnalysisKind.PEP316: Pep316Parser,
AnalysisKind.icontract: IcontractParser,
AnalysisKind.asserts: AssertsParser,
AnalysisKind.hypothesis: HypothesisParser,
}
# Condition parsers may be needed at various places in the stack.
# We configure them through the use of a magic threadlocal value:
_CALLTREE_PARSER = DynamicScopeVar(ConditionParser, "calltree parser")
| [
11748,
6468,
198,
11748,
17268,
198,
11748,
4732,
8019,
198,
11748,
33829,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
11748,
10104,
198,
6738,
10104,
1330,
30149,
28100,
2886,
198,
11748,
302,
... | 3.100299 | 1,675 |
import botutils
import intutils
import sbotutils
if __name__=="__main__":
pass | [
11748,
10214,
26791,
198,
11748,
493,
26791,
198,
11748,
264,
13645,
26791,
198,
198,
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
198,
220,
220,
220,
1208
] | 2.862069 | 29 |
MEAN = {"imagenet": [0.485, 0.456, 0.406], "cifar": [0.4914, 0.4822, 0.4465]}
STD = {"imagenet": [0.229, 0.224, 0.225], "cifar": [0.2023, 0.1994, 0.2010]}
WEIGHT_DECAY = 2e-4
LABEL_SMOOTH = 2e-1
| [
11682,
1565,
796,
19779,
320,
11286,
316,
1298,
685,
15,
13,
32642,
11,
657,
13,
29228,
11,
657,
13,
29703,
4357,
366,
66,
361,
283,
1298,
685,
15,
13,
2920,
1415,
11,
657,
13,
2780,
1828,
11,
657,
13,
2598,
2996,
48999,
198,
3214... | 1.824074 | 108 |
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import status
import logging
from apprest.serializers.quota import CalipsoUserQuotaSerializer
from apprest.services.image import CalipsoAvailableImagesServices
from apprest.services.quota import CalipsoUserQuotaServices
class GetUsedQuotaFromUser(APIView):
"""
get:
Return the used quota for given user
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
pagination_class = None
| [
6738,
1334,
62,
30604,
13,
41299,
3299,
1330,
23575,
47649,
3299,
11,
14392,
47649,
3299,
198,
6738,
1334,
62,
30604,
13,
1069,
11755,
1330,
2448,
3411,
21306,
798,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62... | 3.780488 | 205 |
"""
the Runner class has two main functions
1 - keeping track of episode rewards
2 - logging reward info to tensorboard
3 - saving reward history to csv
"""
import csv
import logging
import numpy as np
import tensorflow as tf
logger = logging.getLogger(__name__)
class Runner(object):
"""
Giving the runner total steps allows a percent of expt stat - very useful
Also can control how often it logs
"""
| [
37811,
198,
1169,
21529,
1398,
468,
734,
1388,
5499,
198,
16,
532,
5291,
2610,
286,
4471,
11530,
198,
17,
532,
18931,
6721,
7508,
284,
11192,
273,
3526,
198,
18,
532,
8914,
6721,
2106,
284,
269,
21370,
198,
37811,
198,
198,
11748,
269... | 3.447154 | 123 |
from locators import Alert as AL
| [
6738,
1179,
2024,
1330,
23276,
355,
8355,
198
] | 4.125 | 8 |
import requests
import re
import pymysql
from pyecharts import Bar, Pie, Bar3D
from lotterymodel import PeriodModel,WinningNumberModel,SalesInfoModel
# 解析开奖信息Html
# parsePage()
# 以Pie的方式展示每个数字的频率
analysisFrequency_Pie3D() | [
171,
119,
123,
11748,
7007,
198,
11748,
302,
198,
11748,
279,
4948,
893,
13976,
198,
6738,
12972,
3055,
5889,
1330,
2409,
11,
21690,
11,
2409,
18,
35,
198,
6738,
22098,
19849,
1330,
18581,
17633,
11,
16643,
768,
15057,
17633,
11,
44490,... | 2.036364 | 110 |
import torch
from torch import nn
from backbone.drn.drn import drn_a_50
if __name__ == "__main__":
conv_list = Backbone_DRNA50_in3(in_C=3)
x = torch.randn((1, 3, 320, 320))
# torch.Size([1, 3, 320, 320])
# torch.Size([1, 64, 160, 160])
# torch.Size([1, 256, 80, 80])
# torch.Size([1, 512, 40, 40])
# torch.Size([1, 1024, 40, 40])
for conv in conv_list:
print(x.size())
x = conv(x)
print(x.size())
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
198,
6738,
32774,
13,
7109,
77,
13,
7109,
77,
1330,
1553,
77,
62,
64,
62,
1120,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3063,
... | 2.097222 | 216 |
import unittest
from unittest.mock import patch
from common.codebuild import CodeBuild
from common.exceptions import CodeBuildProjectNotFound
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
2219,
13,
8189,
11249,
1330,
6127,
15580,
198,
6738,
2219,
13,
1069,
11755,
1330,
6127,
15580,
16775,
3673,
21077,
628
] | 3.891892 | 37 |
from db.models.build_jobs import BuildJob
builds = BuildJob.objects.select_related(
'status',
)
builds = builds.prefetch_related(
'user',
'project',
'project__user',
)
builds_details = builds.select_related('code_reference')
| [
6738,
20613,
13,
27530,
13,
11249,
62,
43863,
1330,
10934,
33308,
198,
198,
11249,
82,
796,
10934,
33308,
13,
48205,
13,
19738,
62,
5363,
7,
198,
220,
220,
220,
705,
13376,
3256,
198,
8,
198,
11249,
82,
796,
12188,
13,
3866,
69,
756... | 2.730337 | 89 |
# Import Python Libs
from __future__ import absolute_import
import logging
# Local imports
from . import utils
from . import util_which
log = logging.getLogger(__name__)
init_types_available = set([ "systemd" , "sysV"])
class init_exception(Exception):
"""Base class for exceptions in this module."""
pass
class init_exception_init_type(init_exception):
"""Exception raised for errors in the init_type
Attributes:
msg -- explanation of the error
"""
class init_exception_service(init_exception):
"""Exception raised for errors in the init implementation
Attributes:
msg -- explanation of the error
"""
# TODO: this is largely untested
| [
2,
17267,
11361,
7980,
82,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
18931,
198,
198,
2,
10714,
17944,
198,
6738,
764,
1330,
3384,
4487,
198,
6738,
764,
1330,
7736,
62,
4758,
628,
198,
6404,
796,
18931,
13,
1136... | 3.109649 | 228 |
"""Config flow to configure Google Hangouts."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from homeassistant.core import callback
from .const import CONF_2FA, CONF_REFRESH_TOKEN, DOMAIN as HANGOUTS_DOMAIN
@callback
def configured_hangouts(hass):
"""Return the configures Google Hangouts Account."""
entries = hass.config_entries.async_entries(HANGOUTS_DOMAIN)
if entries:
return entries[0]
return None
@config_entries.HANDLERS.register(HANGOUTS_DOMAIN)
class HangoutsFlowHandler(config_entries.ConfigFlow):
"""Config flow Google Hangouts."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
def __init__(self):
"""Initialize Google Hangouts config flow."""
self._credentials = None
self._refresh_token = None
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
errors = {}
if configured_hangouts(self.hass) is not None:
return self.async_abort(reason="already_configured")
if user_input is not None:
from hangups import get_auth
from .hangups_utils import (HangoutsCredentials,
HangoutsRefreshToken,
GoogleAuthError, Google2FAError)
self._credentials = HangoutsCredentials(user_input[CONF_EMAIL],
user_input[CONF_PASSWORD])
self._refresh_token = HangoutsRefreshToken(None)
try:
await self.hass.async_add_executor_job(get_auth,
self._credentials,
self._refresh_token)
return await self.async_step_final()
except GoogleAuthError as err:
if isinstance(err, Google2FAError):
return await self.async_step_2fa()
msg = str(err)
if msg == 'Unknown verification code input':
errors['base'] = 'invalid_2fa_method'
else:
errors['base'] = 'invalid_login'
return self.async_show_form(
step_id='user',
data_schema=vol.Schema({
vol.Required(CONF_EMAIL): str,
vol.Required(CONF_PASSWORD): str
}),
errors=errors
)
async def async_step_2fa(self, user_input=None):
"""Handle the 2fa step, if needed."""
errors = {}
if user_input is not None:
from hangups import get_auth
from .hangups_utils import GoogleAuthError
self._credentials.set_verification_code(user_input[CONF_2FA])
try:
await self.hass.async_add_executor_job(get_auth,
self._credentials,
self._refresh_token)
return await self.async_step_final()
except GoogleAuthError:
errors['base'] = 'invalid_2fa'
return self.async_show_form(
step_id=CONF_2FA,
data_schema=vol.Schema({
vol.Required(CONF_2FA): str,
}),
errors=errors
)
async def async_step_final(self):
"""Handle the final step, create the config entry."""
return self.async_create_entry(
title=self._credentials.get_email(),
data={
CONF_EMAIL: self._credentials.get_email(),
CONF_REFRESH_TOKEN: self._refresh_token.get()
})
async def async_step_import(self, _):
"""Handle a flow import."""
return await self.async_step_user()
| [
37811,
16934,
5202,
284,
17425,
3012,
24300,
5269,
526,
15931,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1363,
562,
10167,
1330,
4566,
62,
298,
1678,
198,
6738,
1363,
562,
10167,
13,
9979,
1330,
7102,
37,
62,
27630,
4146... | 1.943387 | 1,996 |
from google.appengine.api import xmpp
from google.appengine.api import users
PONDER_MSG = "Hmm. Let me think on that a bit."
TELLME_MSG = "While I'm thinking, perhaps you can answer me this: %s"
SOMEONE_ANSWERED_MSG = ("We seek those who are wise and fast. One out of two "
"is not enough. Another has answered my question.")
ANSWER_INTRO_MSG = "You asked me: %s"
ANSWER_MSG = "I have thought long and hard, and concluded: %s"
WAIT_MSG = ("Please! One question at a time! You can ask me another once you "
"have an answer to your current question.")
THANKS_MSG = "Thank you for your wisdom."
TELLME_THANKS_MSG = ("Thank you for your wisdom."
" I'm still thinking about your question.")
EMPTYQ_MSG = "Sorry, I don't have anything to ask you at the moment."
HELP_MSG = ("I am the amazing Crowd Guru. Ask me a question by typing '/tellme "
"the meaning of life', and I will answer you forthwith! To learn "
"more, go to %s/")
MAX_ANSWER_TIME = 120
| [
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
2124,
76,
381,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
2985,
198,
198,
47,
1340,
14418,
62,
5653,
38,
796,
366,
44217,
13,
3914,
502,
892,
319,
326,
257,
1643,
526,
198,
932... | 2.667532 | 385 |
import argparse
import os
import numpy as np
import tensorflow as tf
from data import get_split
from model import TinySleepNet
from minibatching import batch_generator
from config import config
def compute_performance(cm):
"""Computer performance metrics from confusion matrix.
It computers performance metrics from confusion matrix.
It returns:
- Total number of samples
- Number of samples in each class
- Accuracy
- Macro-F1 score
- Per-class precision
- Per-class recall
- Per-class f1-score
"""
tp = np.diagonal(cm).astype(np.float)
tpfp = np.sum(cm, axis=0).astype(np.float) # sum of each col
tpfn = np.sum(cm, axis=1).astype(np.float) # sum of each row
acc = np.sum(tp) / np.sum(cm)
precision = tp / tpfp
recall = tp / tpfn
f1 = (2 * precision * recall) / (precision + recall)
mf1 = np.mean(f1)
total = np.sum(cm)
n_each_class = tpfn
return total, n_each_class, acc, mf1, precision, recall, f1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="isruc")
parser.add_argument("--dir", type=str, default="./pretrained_model")
args = parser.parse_args()
model_dir = os.path.join(args.dir,args.dataset)
predict(
dataset = args.dataset,
model_dir=model_dir,
)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
1366,
1330,
651,
62,
35312,
198,
6738,
2746,
1330,
20443,
40555,
7934,
198,
6738,
949,
571,
19775,
1330,
1545... | 2.516304 | 552 |
import numpy as np
from mantraml.data import TabularDataset
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
24818,
859,
75,
13,
7890,
1330,
16904,
934,
27354,
292,
316,
198
] | 3.05 | 20 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
_SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water = swig_import_helper()
del swig_import_helper
else:
import _SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
_object = object
_newclass = 1
except AttributeError:
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
import SimFlowEnergyTransfer_ConvectiveHeater_Water
SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water_swigregister = _SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water.SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water_swigregister
SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water_swigregister(SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water)
SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water_sequence_swigregister = _SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water.SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water_sequence_swigregister
SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water_sequence_swigregister(SimFlowEnergyTransfer_ConvectiveHeater_Radiant_Water_sequence)
# This file is compatible with both classic and new-style classes.
| [
2,
770,
2393,
373,
6338,
7560,
416,
12672,
3528,
357,
4023,
1378,
2503,
13,
2032,
328,
13,
2398,
737,
198,
2,
10628,
513,
13,
15,
13,
22,
198,
2,
198,
2,
2141,
407,
787,
2458,
284,
428,
2393,
4556,
345,
760,
644,
345,
389,
1804,... | 3.104628 | 497 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Data module for importing and cleaning data sets.
Import / download / cache datasets in `download` module.
Get and filter sample in `sample` module.
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
6601,
8265,
329,
33332,
290,
12724,
1366,
5621,
13,
201,
198,
201,
198,
20939,
1220,
4321,
... | 3.028571 | 70 |
"""
Downloads the latest version of the vendor dependences, and replaces all copies of them in the repository
"""
import os
import shutil
import tempfile
from urllib.request import urlopen, Request
from libraries.scripts.updater.utils import walk_with_blacklist
from libraries.scripts.git.git_python_wrappers import commit_all_changes
CACHE_DIRECTORY = os.path.join(tempfile.gettempdir(), "gos_vendor_cache")
if __name__ == "__main__":
main()
| [
37811,
198,
10002,
82,
262,
3452,
2196,
286,
262,
18371,
4745,
3007,
11,
290,
24020,
477,
9088,
286,
606,
287,
262,
16099,
198,
37811,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
6738,
2956,
297,
571,
13,
2... | 3.282609 | 138 |
import asyncio
from unsync import unsync
from codetiming import Timer
import time
# unsync
# Unsynchronize asyncio by using an ambient event loop, or executing in separate threads or processes.
#
# Quick Overview
# Functions marked with the @unsync decorator will behave in one of the following ways:
#
# async functions will run in the unsync.loop event loop executed from unsync.thread
# Regular functions will execute in unsync.thread_executor, a ThreadPoolExecutor
# Useful for IO bounded work that does not support asyncio
# Regular functions marked with @unsync(cpu_bound=True) will execute in unsync.process_executor, a ProcessPoolExecutor
# Useful for CPU bounded work
# All @unsync functions will return an Unfuture object. This new future type combines the behavior of asyncio.Future and concurrent.Future with the following changes:
#
# Unfuture.set_result is threadsafe unlike asyncio.Future
# Unfuture instances can be awaited, even if made from concurrent.Future
# Unfuture.result() is a blocking operation except in unsync.loop/unsync.thread where it behaves like asyncio.Future.result and will throw an exception if the future is not done
# Executing an async function outside of an existing event loop is troublesome
# * asyncio.Future is not thread safe
# * concurrent.Future's cannot be directly awaited
# * Future.result() is a blocking operation even within an event loop
# * asyncio.Future.result() will throw an exception if the future is not done
# * async Functions always execute in the asyncio loop (not thread or process backed)
# * Cancellation and timeouts are tricky in threads and processes
# * Executing an async function outside of an existing event loop is troublesome
# * Testing is hard
# 1) Example with asyncio (loop):
with Timer(text=f"asyncio (loop): {{:.4f}}"):
asyncio.run(main())
# Takes 1 second to run
# 2) Example with unsync (loop):
@unsync
with Timer(text=f"unsync (loop): {{:.4f}}"):
unfuture1 = unsync_async()
unfuture2 = unsync_async()
print(unfuture1.result() + unfuture2.result())
# Takes 1 second to run
# 3) Example with unsync (thread_executor):
@unsync
with Timer(text=f"unsync (thread_executor): {{:.4f}}"):
tasks = [non_async_function(0.1) for _ in range(10)]
print([task.result() for task in tasks])
# 4) Example with unsync (process_executor):
@unsync
with Timer(text=f"unsync (thread_executor): {{:.4f}}"):
tasks = [non_async_function(0.1) for _ in range(10)]
print([task.result() for task in tasks])
# 5) Unfuture.then
@unsync
@unsync
with Timer(text=f"Unfuture.then: {{:.4f}}"):
res = initiate(3).then(process)
print(res.result())
# 6) Mixin
import unsync
import uvloop
@unsync
uvloop.install() # Equivalent to asyncio.set_event_loop_policy(EventLoopPolicy())
main() | [
11748,
30351,
952,
198,
198,
6738,
555,
27261,
1330,
555,
27261,
198,
6738,
14873,
316,
320,
278,
1330,
5045,
263,
198,
11748,
640,
198,
198,
2,
555,
27261,
198,
2,
791,
28869,
11413,
1096,
30351,
952,
416,
1262,
281,
25237,
1785,
905... | 3.295991 | 848 |
"""The Player class stores all the functions and data related to the Player."""
from pygame import Rect
| [
37811,
464,
7853,
1398,
7000,
477,
262,
5499,
290,
1366,
3519,
284,
262,
7853,
526,
15931,
198,
198,
6738,
12972,
6057,
1330,
48599,
628,
198
] | 4.28 | 25 |
"""
"""
# Imports from libraries
from datetime import datetime
import os
import pickle
from pytups import SuperDict, TupList
from typing import Dict, Tuple
# Imports from cornflow libraries
from cornflow_client import InstanceCore
from cornflow_client.core.tools import load_json
# Imports from internal modules
from .tools import (
get_date_from_string,
get_date_string_from_ts,
get_hour_from_date_time,
get_one_date,
get_one_date_time,
get_time_slot_string,
get_week_from_ts,
)
| [
37811,
198,
198,
37811,
198,
2,
1846,
3742,
422,
12782,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
6738,
12972,
83,
4739,
1330,
3115,
35,
713,
11,
49595,
8053,
198,
6738,
19720,
1330,
360,
... | 2.828729 | 181 |
# -*- coding: utf-8 -*-
a = 'Formulário de Login, "LogForm v0.3"'
print(f'{a:^59}')
print('#'*59)
usuarios = []
senhas = []
while True:
if not usuarios:
print('Não há usuários registrados anteriormente.\nRegistre um novo usuário!\n')
print('Registrar novo usuário?\n\t1 - SIM\n\t2 - NÃO\n\t')
se = str(input('>>> '))
if se == '1':
new_usr()
print('Usuário registrado com sucesso!')
print()
print(usuarios)
print(senhas)
break
elif se == '2':
print('O programa encerrará!')
break
else:
print('Por favor, digite uma opção válida!\n\n\n')
continue
login()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
64,
796,
705,
8479,
377,
6557,
27250,
390,
23093,
11,
366,
11187,
8479,
410,
15,
13,
18,
30543,
198,
4798,
7,
69,
6,
90,
64,
25,
61,
3270,
92,
11537,
198,
4798,
10... | 1.983498 | 303 |
#
# gemini_python
#
# primitives_ghost_spect.py
# ------------------------------------------------------------------------------
import os
import numpy as np
import math
from copy import deepcopy
import scipy
import scipy.signal as signal
from scipy.optimize import leastsq
import functools
from datetime import datetime, date, time, timedelta
import re
import astropy.coordinates as astrocoord
import astropy.io.fits as astropyio
from astropy.time import Time
from astropy import units as u
from astropy import constants as const
from astropy.stats import sigma_clip
from scipy import interpolate
import scipy.ndimage as nd
from pysynphot import observation, spectrum
import astrodata
from geminidr.gemini.lookups import DQ_definitions as DQ
from gempy.gemini import gemini_tools as gt
# from gempy.mosaic.mosaicAD import MosaicAD
from .polyfit import GhostArm, Extractor, SlitView
from .polyfit.ghost import GhostArm
from .primitives_ghost import GHOST, filename_updater
from . import parameters_ghost_spect
from .lookups import polyfit_dict, line_list, keyword_comments, targetn_dict
from recipe_system.utils.decorators import parameter_override
# ------------------------------------------------------------------------------
GEMINI_SOUTH_LOC = astrocoord.EarthLocation.from_geodetic((-70, 44, 12.096),
(-30, 14, 26.700),
height=2722.,
ellipsoid='WGS84')
BAD_FLAT_FLAG = 16
# FIXME: This should go somewhere else, but where?
from scipy.ndimage import median_filter
def convolve_with_mask(data, mask, rectangle_width = (100,20)):
"""Helper function to convolve a masked array with a uniform rectangle after median
filtering to remove cosmic rays.
"""
#Create our rectangular function
rectangle_function = np.zeros_like(data)
rectangle_function[:rectangle_width[0], :rectangle_width[1]] = 1.0
rectangle_function = np.roll(rectangle_function, int(-rectangle_width[
0] / 2), axis=0)
rectangle_function = np.roll(rectangle_function, int(-rectangle_width[1]/2),
axis=1)
rectangle_fft = np.fft.rfft2(rectangle_function)
#Median filter in case of cosmic rays
filt_data = median_filter(data,3)
#Now convolve. The mask is never set to exactly zero in order to avoid divide
#by zero errors outside the mask.
convolved_data = np.fft.irfft2(np.fft.rfft2(filt_data * (mask + 1e-4))*rectangle_fft)
convolved_data /= np.fft.irfft2(np.fft.rfft2(mask + 1e-4)*rectangle_fft)
return convolved_data
@parameter_override
class GHOSTSpect(GHOST):
"""
Primitive class for processing GHOST science data.
This class contains the primitives necessary for processing GHOST science
data, as well as all related calibration files from the main spectrograph
cameras. Slit viewer images are processed with another primitive class
(:class:`ghostdr.ghost.primitives_ghost_slit.GHOSTSlit`).
"""
"""Applicable tagset"""
tagset = set(["GEMINI", "GHOST"]) # NOT SPECT because of bias/dark
def addWavelengthSolution(self, adinputs=None, **params):
"""
Compute and append a wavelength solution for the data.
The GHOST instrument is designed to be very stable over a long period
of time, so it is not strictly necessary to take arcs for every
observation. The alternative is use the arcs taken most recently
before and after the observation of interest, and compute an
average of their wavelength solutions.
The average is weighted by
the inverse of the time between each arc observation and science
observation. E.g., if the 'before' arc is taken 12 days before the
science observation, and the 'after' arc is taken 3 days after the
science observation, then the 'after' arc will have a weight of 80%
in the final wavelength solution (12/15), and the 'before' arc 20%
(3/15).
In the event that either a 'before' arc can't be found but an 'after'
arc can, or vice versa, the wavelength solution from the arc that was
found will be applied as-is. If neither a 'before' nor 'after' arc can
be found, an IOError will be raised.
It is possible to explicitly pass which arc files to use as
the ``arc`` parameter. This should be a list of two-tuples, with each
tuple being of the form
``('before_arc_filepath', 'after_arc_filepath')``. This list must be
the same length as the list of ``adinputs``, with a one-to-one
correspondence between the two lists.
Parameters
----------
suffix: str
suffix to be added to output files
arc: list of two-tuples
A list of two-tuples, with each tuple corresponding to an element of
the ``adinputs`` list. Within each tuple, the two elements are the
designated 'before' and 'after' arc for that observation.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
# No attempt to check if this primitive has already been run -
# new arcs may be available which we wish to apply. Any old WAVL
# extensions will simply be removed.
# CJS: Heavily edited because of the new AD way
# Get processed slits, slitFlats, and flats (for xmod)
# slits and slitFlats may be provided as parameters
arc_list = params["arcs"]
# if arc_list is None:
# # CJS: This populates the calibrations cache (dictionary) with
# # "processed_slit" filenames for each input AD
# self.getProcessedArc(adinputs)
# # This then gets those filenames
# arc_list = [self._get_cal(ad, 'processed_arc')
# for ad in adinputs]
# log.stdinfo(arc_list)
# for ad, arcs in zip(
# *gt.make_lists(adinputs, arc_list, force_ad=True)):
for i, ad in enumerate(adinputs):
found_arcs = False
if arc_list:
try:
arc_before, arc_after = arc_list[i]
found_arcs = True
except (TypeError, ValueError):
pass
# self.getProcessedArc(ad, howmany=2)
# if not found_arcs:
# try:
# arcs_calib = self._get_cal(ad, 'processed_arc', )
# log.stdinfo('Found following arcs: {}'.format(
# ', '.join([_ for _ in arcs_calib])
# ))
# arc_before, arc_after = self._get_cal(ad, 'processed_arc',)
# except (TypeError, ValueError):
# # Triggers if only one arc, or more than two
# arc_before = self._get_cal(ad, 'processed_arc',)[0]
# arc_after = None
if not found_arcs:
# Fetch the arc_before and arc_after in sequence
arc_before = self._request_bracket_arc(ad, before=True)
arc_after = self._request_bracket_arc(ad, before=False)
if arc_before is None and arc_after is None:
raise IOError('No valid arcs found for {}'.format(ad.filename))
log.stdinfo('Arcs for {}: \n'
' before: {}\n'
' after: {}'.format(ad.filename,
arc_before, arc_after))
# Stand up a GhostArm instance for this ad
gs = GhostArm(arm=ad.arm(), mode=ad.res_mode(),
detector_x_bin=ad.detector_x_bin(),
detector_y_bin=ad.detector_y_bin())
if arc_before is None:
# arc = arc_after
arc_after = astrodata.open(arc_after)
wfit = gs.evaluate_poly(arc_after[0].WFIT)
ad.phu.set('ARCIM_A', os.path.abspath(arc_after.path),
"'After' arc image")
elif arc_after is None:
# arc = arc_before
arc_before = astrodata.open(arc_before)
wfit = gs.evaluate_poly(arc_before[0].WFIT)
ad.phu.set('ARCIM_B', os.path.abspath(arc_before.path),
"'Before' arc image")
else:
# Need to weighted-average the wavelength fits from the arcs
# Determine the weights (basically, the inverse time between
# the observation and the arc)
arc_after = astrodata.open(arc_after)
arc_before = astrodata.open(arc_before)
wfit_b = gs.evaluate_poly(arc_before[0].WFIT)
wfit_a = gs.evaluate_poly(arc_after[0].WFIT)
weight_b = np.abs((arc_before.ut_datetime() -
ad.ut_datetime()).total_seconds())
weight_a = np.abs((arc_after.ut_datetime() -
ad.ut_datetime()).total_seconds())
weight_a, weight_b = 1. / weight_a, 1 / weight_b
log.stdinfo('Cominbing wavelength solutions with weights '
'%.3f, %.3f' %
(weight_a / (weight_a + weight_b),
weight_b / (weight_a + weight_b),
))
# Compute weighted mean fit
wfit = wfit_a * weight_a + wfit_b * weight_b
wfit /= (weight_a + weight_b)
ad.phu.set('ARCIM_A', os.path.abspath(arc_after.path),
self.keyword_comments['ARCIM_A'])
ad.phu.set('ARCIM_B', os.path.abspath(arc_before.path),
self.keyword_comments['ARCIM_B'])
ad.phu.set('ARCWT_A', weight_a,
self.keyword_comments['ARCWT_A'])
ad.phu.set('ARCWT_B', weight_b,
self.keyword_comments['ARCWT_B'])
# rebin the wavelength fit to match the rest of the extensions
for _ in range(int(math.log(ad.detector_x_bin(), 2))):
wfit = wfit[:, ::2] + wfit[:, 1::2]
wfit /= 2.0
for ext in ad:
ext.WAVL = wfit
# FIXME Wavelength unit needs to be in output ad
# Timestamp and update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=params["suffix"], strip=True)
return adinputs
def applyFlatBPM(self, adinputs=None, **params):
"""
Find the flat relevant to the file(s) being processed, and merge the
flat's BPM into the target file's.
GHOST does not use flat subtraction in the traditional sense; instead,
the extracted flat profile is subtracted from the extracted object
profile. This means that the BPM from the flat needs to be applied to
the object file before profile extraction, and hence well before actual
flat correction is performed.
The BPM flat is applied by ``bitwise_or`` combining it into the main
adinput(s) BPM.
Parameters
----------
suffix: str
suffix to be added to output files
flat: str/None
Name (full path) of the flatfield to use. If None, try:
flatstream: str/None
Name of the stream containing the flatfield as the first
item in the stream. If None, the calibration service is used
write_result: bool
Denotes whether or not to write out the result of profile
extraction to disk. This is useful for both debugging, and data
quality assurance.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
# No attempt to check if this primitive has already been run -
# re-applying a flat BPM should have no adverse effects, and the
# primitive simply skips if no flat is found.
# CJS: extractProfile() contains comments explaining what's going on here
flat_list = params["flat"]
flat_stream = params["flat_stream"]
if flat_list is None:
if flat_stream is not None:
flat_list = self.streams[flat_stream][0]
else:
self.getProcessedFlat(adinputs, refresh=False)
flat_list = [self._get_cal(ad, 'processed_flat')
for ad in adinputs]
for ad, flat in zip(*gt.make_lists(adinputs, flat_list, force_ad=True)):
if flat is None:
log.warning("No flat identified/provided for {} - "
"skipping".format(ad.filename))
continue
# Re-bin the flat if necessary
# We only need the mask, but it's best to use the full rebin
# helper function in case the mask rebin code needs to change
if flat.detector_x_bin() != ad.detector_x_bin(
) or flat.detector_y_bin() != ad.detector_y_bin():
xb = ad.detector_x_bin()
yb = ad.detector_y_bin()
flat = self._rebin_ghost_ad(flat, xb, yb)
# Re-name the flat so we don't blow away the old one on save
flat_filename_orig = flat.filename
flat.filename = filename_updater(flat,
suffix='_rebin%dx%d' %
(xb, yb,),
strip=True)
flat.write(overwrite=True)
# CJS: Edited here to require that the science and flat frames'
# extensions are the same shape. The original code would no-op
# with a warning for each pair that didn't, but I don't see how
# this would happen in normal operations. The clip_auxiliary_data()
# function in gemini_tools may be an option here.
try:
gt.check_inputs_match(adinput1=ad, adinput2=flat,
check_filter=False)
except ValueError:
log.warning("Input mismatch between flat and {} - "
"skipping".format(ad.filename))
continue
for ext, flat_ext in zip(ad, flat):
if ext.mask is None:
ext.mask = flat_ext.mask
else:
ext.mask |= flat_ext.mask
ad.phu.set('FLATBPM', os.path.abspath(flat.path),
self.keyword_comments['FLATBPM'])
# Timestamp and update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=params["suffix"], strip=True)
if params["write_result"]:
ad.phu.set('PROCIMG', os.path.abspath(ad.path),
keyword_comments.keyword_comments['PROCIMG'])
ad.write(overwrite=True)
return adinputs
def barycentricCorrect(self, adinputs=None, **params):
"""
Perform barycentric correction of the wavelength extension in the input
files.
Barycentric correction is performed by multiplying the wavelength
(``.WAVL``) data extension by a correction factor. This factor can be
supplied manually, or can be left to be calculated based on the
headers in the AstroData input.
Parameters
----------
suffix: str
suffix to be added to output files
correction_factor: float
Barycentric correction factor to be applied. Defaults to None, at
which point a computed value will be applied. The computed value
is based on the recorded position of the Gemini South observatory.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
for ad in adinputs:
if ad.phu.get(timestamp_key):
log.warning("No changes will be made to {}, since it has "
"already been processed by barycentricCorrect".
format(ad.filename))
continue
# FIXME: It is more pythonic to ask forgiveness than permission,
# so a try
# statement is preferred.
if not hasattr(ad[0], 'WAVL'):
log.warning("No changes will be made to {}, since it contains "
"no wavelength information".
format(ad.filename))
continue
# Get or compute the correction factor
if params['correction_factor'] is None:
cf = self._compute_barycentric_correction(ad, return_wavl=True)
else:
cf = [params['correction_factor'], ] * len(ad)
# Multiply the wavelength scale by the correction factor
for i, ext in enumerate(ad):
log.stdinfo('Applying barycentric correction factor of '
'{} to ext {} of {}'.format(cf[i], i, ad.filename))
ext.WAVL *= float(cf[i])
# Timestamp and update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=params["suffix"], strip=True)
return adinputs
def clipSigmaBPM(self, adinputs=None, **params):
"""
Perform a sigma-clipping on the input data frame.
This is a primitive wrapper for the :func:`astropy.stats.sigma_clip`
method. The ``sigma`` and ``iters`` parameters are passed through to the
corresponding keyword arguments.
Parameters
----------
sigma: float/None
The sigma value to be used for clipping.
bpm_value: int/None
The integer value to be applied to the data BPM where the sigma
threshold is exceeded. Defaults to 1 (which is the generic bad
pixel flag). Note that the final output BPM is made using a
bitwise_or operation.
iters : int/None
Number of sigma clipping iterations to perform. Default is None,
which will continue sigma clipping until no further points are
masked.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
sigma = params["sigma"]
bpm_value = params["bpm_value"]
iters = params["iters"]
for ad in adinputs:
if ad.phu.get(timestamp_key):
log.warning("No changes will be made to {}, since it has "
"already been processed by clipSigmaBPM".
format(ad.filename))
continue
for ext in ad:
extver = ext.hdr['EXTVER']
if ext.mask is not None:
# MCW 190218: Form a masked array to operate on
masked_data = np.ma.masked_where(ext.mask != 0,
ext.data, copy=True)
# Perform the sigma clip
clipd = sigma_clip(
# ext.data,
masked_data,
sigma=sigma, maxiters=iters, copy=True)
# Convert the mask from the return into 0s and 1s and
# bitwise OR into the ext BPM
clipd_mask = clipd.mask.astype(ext.mask.dtype)
ext.mask |= clipd_mask * bpm_value
log.stdinfo(' {}:{}: nPixMasked: {:9d} / {:9d}'.format(
ad.filename, extver, np.sum(clipd_mask), ext.data.size))
# Original implementaion
# mean_data = np.mean(ext.data)
# sigma_data = np.std(ext.data)
# mask_map = (np.abs(ext.data-mean_data) > sigma*sigma_data)
# if bpm_value: # might call with None for diagnosis
# ext.mask[mask_map] |= bpm_value
#
# log.stdinfo(' {}:{}: nPixMasked: {:9d} / {:9d}'.format(
# ad.filename, extver, np.sum(mask_map), ext.data.size))
else:
log.warning('No DQ plane in {}:{}'.format(ad.filename,
extver))
# Timestamp; DO NOT update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
return adinputs
def darkCorrect(self, adinputs=None, **params):
"""
Dark-correct GHOST observations.
This primitive, at its core, simply copies the standard
DRAGONS darkCorrect (part of :any:`Preprocess`). However, it has
the ability to examine the binning mode of the requested dark,
compare it to the adinput(s), and re-bin the dark to the
correct format.
To do this, this version of darkCorrect takes over the actual fetching
of calibrations from :meth:`subtractDark`,
manipulates the dark(s) as necessary,
saves the updated dark to the present working directory, and then
passes the updated list of dark frame(s) on to :meth:`subtractDark`.
As a result, :any:`IOError` will be raised if the adinputs do not
all share the same binning mode.
Parameters
----------
suffix: str
suffix to be added to output files
dark: str/list
name(s) of the dark file(s) to be subtracted
do_cal: str
controls the behaviour of this primitive
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
if params['do_cal'] == 'skip':
log.warning("Dark correction has been turned off.")
return adinputs
sfx = params["suffix"]
# Check if all the inputs have matching detector_x_bin and
# detector_y_bin descriptors
if not(all(
[_.detector_x_bin() == adinputs[0].detector_x_bin() for
_ in adinputs])) or not(all(
[_.detector_y_bin() == adinputs[0].detector_y_bin() for
_ in adinputs]
)):
log.stdinfo('Detector x bins: %s' %
str([_.detector_x_bin() for _ in adinputs]))
log.stdinfo('Detector y bins: %s' %
str([_.detector_y_bin() for _ in adinputs]))
raise IOError('Your input list of files contains a mix of '
'different binning modes')
adinputs_orig = list(adinputs)
if isinstance(params['dark'], list):
params['dark'] = [params['dark'][i] for i in range(len(adinputs))
if not adinputs[i].phu.get(timestamp_key)]
adinputs = [_ for _ in adinputs if not _.phu.get(timestamp_key)]
if len(adinputs) != len(adinputs_orig):
log.stdinfo('The following files have already been processed by '
'darkCorrect and will not be further modified: '
'{}'.format(', '.join([_.filename for _ in adinputs_orig
if _ not in adinputs])))
if params['dark']:
pass
else:
# All this line seems to do is check the valid darks can be found
# for the adinputs
self.getProcessedDark(adinputs, refresh=False)
# Here we need to ape the part of subtractDark which creates the
# dark_list, then re-bin as required, and send the updated dark_list
# through to subtractDark
# This is preferable to writing our own subtractDark, as it should
# be stable against algorithm changes to dark subtraction
dark_list = params["dark"] if params["dark"] else [
self._get_cal(ad, 'processed_dark') for ad in adinputs]
# We need to make sure we:
# - Provide a dark AD object for each science frame;
# - Do not unnecessarily re-bin the same dark to the same binning
# multiple times
dark_list_out = []
dark_processing_done = {}
for ad, dark in zip(*gt.make_lists(adinputs, dark_list,
force_ad=True)):
if dark is None:
if 'qa' in self.mode:
log.warning("No changes will be made to {}, since no "
"dark was specified".format(ad.filename))
dark_list_out.append(None)
continue
else:
raise IOError("No processed dark listed for {}".
format(ad.filename))
if dark.detector_x_bin() == ad.detector_x_bin() and \
dark.detector_y_bin() == ad.detector_y_bin():
log.stdinfo('Binning for %s already matches input file' %
dark.filename)
dark_list_out.append(dark.filename)
else:
xb = ad.detector_x_bin()
yb = ad.detector_y_bin()
dark = self._rebin_ghost_ad(dark, xb, yb)
# Re-name the dark so we don't blow away the old one on save
dark_filename_orig = dark.filename
dark.filename = filename_updater(dark,
suffix='_rebin%dx%d' %
(xb, yb, ),
strip=True)
dark.write(overwrite=True)
dark_processing_done[
(dark_filename_orig, xb, yb)] = dark.filename
dark_list_out.append(dark.filename)
log.stdinfo('Wrote out re-binned dark %s' % dark.filename)
# Check the inputs have matching binning, and shapes
# Copied from standard darkCorrect (primitives_preprocess)
# TODO: Check exposure time?
try:
gt.check_inputs_match(ad, dark, check_filter=False)
except ValueError:
# Else try to extract a matching region from the dark
log.warning('AD inputs did not match - attempting to clip dark')
dark = gt.clip_auxiliary_data(ad, aux=dark, aux_type="cal")
# Check again, but allow it to fail if they still don't match
gt.check_inputs_match(ad, dark, check_filter=False)
log.stdinfo("Subtracting the dark ({}) from the input "
"AstroData object {}".
format(dark.filename, ad.filename))
ad.subtract(dark)
# Record dark used, timestamp, and update filename
ad.phu.set('DARKIM',
# os.path.abspath(dark.path),
dark.filename,
self.keyword_comments["DARKIM"])
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=sfx, strip=True)
return adinputs_orig
def extractProfile(self, adinputs=None, **params):
"""
Extract the object profile from a slit or flat image.
This is a primtive wrapper for a collection of :any:`polyfit <polyfit>`
calls. For each AstroData input, this primitive:
- Instantiates a :class:`polyfit.GhostArm` class for the input, and
executes :meth:`polyfit.GhostArm.spectral_format_with_matrix`;
- Instantiate :class:`polyfit.SlitView` and :class:`polyfit.Extractor`
objects for the input
- Extract the profile from the input AstroData, using calls to
:meth:`polyfit.Extractor.one_d_extract` and
:meth:`polyfit.Extractor.two_d_extract`.
Parameters
----------
suffix: str
suffix to be added to output files
slit: str/None
Name of the (processed & stacked) slit image to use for extraction
of the profile. If not provided/set to None, the primitive will
attempt to pull a processed slit image from the calibrations
database (or, if specified, the --user_cal processed_slit
command-line option)
slitflat: str/None
Name of the (processed) slit flat image to use for extraction
of the profile. If not provided, set to None, the RecipeSystem
will attempt to pull a slit flat from the calibrations system (or,
if specified, the --user_cal processed_slitflat command-line
option)
flat: str/None
Name of the (processed) flat image to use for extraction
of the profile. If not provided, set to None, the RecipeSystem
will attempt to pull a slit flat from the calibrations system (or,
if specified, the --user_cal processed_flat command-line
option)
sky_correct: bool
Denotes whether or not to correct for the sky profile during the
object extraction. Defaults to True, although it should be altered
to False when processing flats or arcs.
writeResult: bool
Denotes whether or not to write out the result of profile
extraction to disk. This is useful for both debugging, and data
quality assurance.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
# This primitive modifies the input AD structure, so it must now
# check if the primitive has already been applied. If so, it must be
# skipped.
adinputs_orig = list(adinputs)
adinputs = [_ for _ in adinputs if not _.phu.get(timestamp_key)]
if len(adinputs) != len(adinputs_orig):
log.stdinfo('extractProfile is skipping the following files, which '
'already have extracted profiles: '
'{}'.format(','.join([_.filename for _ in adinputs_orig
if _ not in adinputs])))
# CJS: Heavily edited because of the new AD way
# Get processed slits, slitFlats, and flats (for xmod)
# slits and slitFlats may be provided as parameters
slit_list = params["slit"]
# log.stdinfo('slit_list before processing:')
# log.stdinfo(' {}'.format(slit_list))
if slit_list is not None and isinstance(slit_list, list):
slit_list = [slit_list[i] for i in range(len(slit_list))
if adinputs_orig[i] in adinputs]
if slit_list is None:
# CJS: This populates the calibrations cache (dictionary) with
# "processed_slit" filenames for each input AD
self.getProcessedSlit(adinputs, refresh=False)
# This then gets those filenames
slit_list = [self._get_cal(ad, 'processed_slit')
for ad in adinputs]
# log.stdinfo('slit_list after processing:')
# log.stdinfo(' {}'.format(slit_list))
slitflat_list = params["slitflat"]
if slitflat_list is not None and isinstance(slitflat_list, list):
slitflat_list = [slitflat_list[i] for i in range(len(slitflat_list))
if adinputs_orig[i] in adinputs]
if slitflat_list is None:
self.getProcessedSlitFlat(adinputs, refresh=False)
slitflat_list = [self._get_cal(ad, 'processed_slitflat')
for ad in adinputs]
flat = params['flat']
if flat is None:
self.getProcessedFlat(adinputs, refresh=False)
flat_list = [self._get_cal(ad, 'processed_flat')
for ad in adinputs]
# TODO: Have gt.make_lists handle multiple auxiliary lists?
# CJS: Here we call gt.make_lists. This has only been designed to work
# with one auxiliary list at present, hence the three calls. This
# produces two lists of AD objects the same length, one of the input
# ADs and one of the auxiliary files, from the list
# of filenames (or single passed parameter). Importantly, if multiple
# auxiliary frames are the same, then the file is opened only once and
# the reference to this AD is re-used, saving speed and memory.
_, slit_list = gt.make_lists(adinputs, slit_list, force_ad=True)
_, slitflat_list = gt.make_lists(adinputs, slitflat_list, force_ad=True)
_, flat_list = gt.make_lists(adinputs, flat_list, force_ad=True)
for ad, slit, slitflat, flat in zip(adinputs, slit_list,
slitflat_list, flat_list):
# CJS: failure to find a suitable auxiliary file (either because
# there's no calibration, or it's missing) places a None in the
# list, allowing a graceful continuation.
if slit is None or slitflat is None or flat is None:
log.warning("Unable to find calibrations for {}; "
"skipping".format(ad.filename))
continue
# CJS: Changed to log.debug() and changed the output
log.stdinfo("Slit parameters: ")
log.stdinfo(" processed_slit: {}".format(slit.filename))
log.stdinfo(" processed_slitflat: {}".format(slitflat.filename))
log.stdinfo(" processed_flat: {}".format(flat.filename))
res_mode = ad.res_mode()
arm = GhostArm(arm=ad.arm(), mode=res_mode,
detector_x_bin=ad.detector_x_bin(),
detector_y_bin=ad.detector_y_bin())
# CJS: Heavy refactor. Return the filename for each calibration
# type. Eliminates requirement that everything be updated
# simultaneously.
# key = self._get_polyfit_key(ad)
# log.stdinfo("Polyfit key selected: {}".format(key))
try:
poly_wave = self._get_polyfit_filename(ad, 'wavemod')
poly_spat = self._get_polyfit_filename(ad, 'spatmod')
poly_spec = self._get_polyfit_filename(ad, 'specmod')
poly_rot = self._get_polyfit_filename(ad, 'rotmod')
slitv_fn = self._get_slitv_polyfit_filename(ad)
wpars = astrodata.open(poly_wave)
spatpars = astrodata.open(poly_spat)
specpars = astrodata.open(poly_spec)
rotpars = astrodata.open(poly_rot)
slitvpars = astrodata.open(slitv_fn)
except IOError:
log.warning("Cannot open required initial model files for {};"
" skipping".format(ad.filename))
continue
arm.spectral_format_with_matrix(flat[0].XMOD, wpars[0].data,
spatpars[0].data, specpars[0].data, rotpars[0].data)
sview = SlitView(slit[0].data, slitflat[0].data,
slitvpars.TABLE[0], mode=res_mode,
microns_pix = 4.54 * 180 / 50,
binning = slit.detector_x_bin())
extractor = Extractor(arm, sview, badpixmask=ad[0].mask,
vararray=ad[0].variance)
# FIXED - MCW 190906
# Added a kwarg to one_d_extract (the only Extractor method which
# uses Extractor.vararray), allowing an update to the instance's
# .vararray attribute
corrected_data = deepcopy(ad[0].data)
corrected_var = deepcopy(ad[0].variance)
# Compute the flat correction, and add to bad pixels based on this.
# FIXME: This really could be done as part of flat processing!
if params['flat_precorrect']:
try:
pix_to_correct = flat[0].PIXELMODEL > 0
# Lets find the flat normalisation constant.
# FIXME Should this normalisation be done elsewhere?
mean_flat_flux = np.mean(flat[0].data[pix_to_correct])
mean_pixelmod = np.mean(flat[0].PIXELMODEL[pix_to_correct])
# Now find the correction.
correction = flat[0].PIXELMODEL[pix_to_correct] / \
flat[0].data[pix_to_correct] * \
mean_flat_flux/mean_pixelmod
# Find additional bad pixels where the flat doesn't match PIXELMODEL
# This is important to have somewhere, because otherwise any
# newly dead pixels will result in divide by 0.
smoothed_flat = convolve_with_mask(flat[0].data,
pix_to_correct)
normalised_flat = flat[0].data / smoothed_flat
# Extra bad pixels are where the normalied flat differs from the
# PIXELMODEL, where PIXELMODEL is non-zero and there is a
# non-negligible amount of smoothed flat flux.
# FIXME: the 0.7 on the next line should be significantly lower, but
# requires a model that fits the data well. Re-examine with real
# data.
extra_bad = (
np.abs(
normalised_flat - flat[0].PIXELMODEL/mean_pixelmod
) > 0.7
) & pix_to_correct * (
smoothed_flat > 0.1 * mean_flat_flux
)
# import pdb; pdb.set_trace()
# MCW 190912 - converted to option, default is 'False'
# TODO: MJI to add description of what this (should) do
if params['smooth_flat_spatially']:
correction_2d = np.zeros_like(flat[0].data)
correction_2d[pix_to_correct] = correction
smoothed_correction_2d = convolve_with_mask(
correction_2d, pix_to_correct)
smoothed_correction_2d[
pix_to_correct
] = correction_2d[pix_to_correct]
smoothed_correction_2d = nd.median_filter(
smoothed_correction_2d, size=(7, 1)
)
correction = smoothed_correction_2d[pix_to_correct]
# This is where we add the new bad pixels in. It is needed for
# computing correct weights.
#TODO: These 4 lines (and possibly correction= BLAH) can stay.
#the rest to go to findApertures
extractor.vararray[extra_bad] = np.inf
extractor.badpixmask[extra_bad] |= BAD_FLAT_FLAG
# MJI: Pre-correct the data here.
corrected_data[pix_to_correct] *= correction
corrected_var[pix_to_correct] *= correction**2
# Uncomment to bugshoot finding bad pixels for the flat. Should be
# repeated once models are reasonable for real data as a sanity
# check
#import matplotlib.pyplot as plt
#plt.ion()
#plt.clf()
#plt.imshow(corrected_data, vmin=0, vmax=4*np.percentile(corrected_data,75))
#plt.imshow(plotit)
#import pdb; pdb.set_trace()
except AttributeError as e: # Catch if no PIXELMODEL
if 'PIXELMODEL' in e.message:
e.message = 'The flat {} has no PIXELMODEL extension ' \
'- either run extractProfile without the ' \
'flat_precorrect option, or re-generate ' \
'your flat field without the ' \
'skip_pixel_model option.\n' \
'(Original error message: {})'.format(
flat.filename,
e.message,
)
raise e
else:
raise
# MCW 190830
# MI wants iteration over all possible combinations of sky and
# object(s)
# This should only happen for object files, because:
# - arcs require either "sky only" or "skyless" extraction;
# - standards should only extract the actual profile in single
# object mode.
if 'ARC' in ad.tags:
objs_to_use = [[], [0, 1], ]
use_sky = [True, False, ]
elif 'PARTNER_CAL' in ad.tags:
objs_to_use = [[0, ],[1, ], ]
use_sky = [True, True, ]
else:
objs_to_use = [
[0, ], [0, ], [1, ], [1, ], [0, 1], [0, 1], [],
]
use_sky = [
False, True, False, True, False, True, True,
]
# MJI - Uncomment the lines below for testing in the simplest possible case.
#objs_to_use = [[0], ]
#use_sky = [False, ]
for i, (o, s) in enumerate(zip(objs_to_use, use_sky)):
print("OBJECTS:" + str(o))
print("SKY:" + str(s))
# CJS: Makes it clearer that you're throwing the first two
# returned objects away (get replaced in the two_d_extract call)
# Need to use corrected_data here; the data in ad[0] is
# overwritten with the first extraction pass of this loop
# (see the try-except statement at line 925)
DUMMY, _, extracted_weights = extractor.one_d_extract(
data=corrected_data, vararray=corrected_var,
correct_for_sky=params['sky_correct'],
use_sky=s, used_objects=o,
)
# DEBUG - see Mike's notes.txt, where we want to look at DUMMY
#import matplotlib.pyplot as plt
#import pickle
#pickle.dump( (DUMMY), open( "dummy.p", "wb" ) )
#plt.ion()
#plt.figure(1)
##plt.plot(DUMMY[1,3510:3720,0])
##plt.plot(np.sum(corrected_data[340:410,3510:3720], axis=0))
#plt.plot(np.sum(corrected_data[540:645,2380:3280], axis=0))
#plt.plot(DUMMY[2,2380:3280], label='Extracted')
#plt.ylim([0,6e4])
#plt.legend()
#import pdb; pdb.set_trace()
extracted_flux, extracted_var = extractor.two_d_extract(
corrected_data,
extraction_weights=extracted_weights,
)
# CJS: Since you don't use the input AD any more, I'm going to
# modify it in place, in line with your comment that you're
# considering this.
# MCW now going to add extra EXTVARs to account for different
# extractions, where necessary
# import pdb; pdb.set_trace()
try:
ad[i].reset(extracted_flux, mask=None,
variance=extracted_var)
except IndexError:
new_adi = deepcopy(ad[i - 1])
ad.append(new_adi[0])
ad[i].reset(extracted_flux, mask=None,
variance=extracted_var)
ad[i].WGT = extracted_weights
ad[i].hdr['DATADESC'] = (
'Order-by-order processed science data - '
'objects {}, sky correction = {}'.format(
str(o), str(params['sky_correct'])),
self.keyword_comments['DATADESC'])
# Timestamp and update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=params["suffix"], strip=True)
ad.phu.set("FLATIM", flat.filename, self.keyword_comments["FLATIM"])
# ad[0].hdr['DATADESC'] = ('Order-by-order processed science data',
# self.keyword_comments['DATADESC'])
if params["write_result"]:
ad.write(overwrite=True)
return adinputs_orig
def interpolateAndCombine(self, adinputs=None, **params):
"""
Combine the independent orders from the input ADs into a single,
over-sampled spectrum.
The wavelength scale of the output is determined by finding the
wavelength range of the input, and generating a new
wavelength sampling in accordance with the ``scale`` and
``oversample`` parameters.
The output spectrum is constructed as follows:
- A blank spectrum, corresponding to the new wavelength scale, is
initialised;
- For each order of the input AstroData object:
- The spectrum order is re-gridded onto the output wavelength scale;
- The re-gridded order is averaged with the final output spectrum
to form a new output spectrum.
This process continues until all orders have been averaged into the
final output spectrum.
Note that the un-interpolated data is kept - the interpolated data
is appended to the end of the file as a new extension.
Parameters
----------
scale : str
Denotes what scale to generate for the final spectrum. Currently
available are:
``'loglinear'``
Default is ``'loglinear'``.
skip : bool
Set to ``True`` to skip this primitive. Defaults to ``False``.
oversample : int or float
The factor by which to (approximately) oversample the final output
spectrum, as compared to the input spectral orders. Defaults to 2.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
for ad in adinputs:
if ad.phu.get(timestamp_key):
log.warning("No changes will be made to {}, since it has "
"already been processed by interpolateAndCombine".
format(ad.filename))
continue
if params['skip']:
log.warning('Skipping interpolateAndCombine for {}'.format(
ad.filename
))
continue
# MCW, 180501 - Keep initial data, append interp'd data
ad_interp = deepcopy(ad)
for i, ext in enumerate(ad):
# Determine the wavelength bounds of the file
min_wavl, max_wavl = np.min(ext.WAVL), np.max(ext.WAVL)
logspacing = np.median(
np.log(ext.WAVL[:, 1:]) - np.log(ext.WAVL[:, :-1])
)
# Form a new wavelength scale based on these extremes
if params['scale'] == 'loglinear':
wavl_grid = np.exp(
np.linspace(np.log(min_wavl), np.log(max_wavl),
num=int(
(np.log(max_wavl) - np.log(min_wavl)) /
(logspacing / float(params['oversample']))
))
)
else:
raise ValueError('interpolateAndCombine does not understand '
'the scale {}'.format(params['scale']))
# Create a final spectrum and (inverse) variance to match
# (One plane per object)
no_obj = ext.data.shape[-1]
spec_final = np.zeros(wavl_grid.shape + (no_obj, ))
var_final = np.inf * np.ones(wavl_grid.shape + (no_obj, ))
# Loop over each input order, making the output spectrum the
# result of the weighted average of itself and the order
# spectrum
for order in range(ext.data.shape[0]):
for ob in range(ext.data.shape[-1]):
log.stdinfo('Re-gridding order {:2d}, obj {:1d}'.format(
order, ob,
))
flux_for_adding = np.interp(wavl_grid,
ext.WAVL[order],
ext.data[order, :, ob],
left=0, right=0)
ivar_for_adding = np.interp(wavl_grid,
ext.WAVL[order],
1.0 /
ext.variance[order, :, ob],
left=0, right=0)
spec_comp, ivar_comp = np.ma.average(
np.asarray([spec_final[:, ob], flux_for_adding]),
weights=np.asarray([1.0 / var_final[:, ob],
ivar_for_adding]),
returned=True, axis=0,
)
spec_final[:, ob] = deepcopy(spec_comp)
var_final[:, ob] = deepcopy(1.0 / ivar_comp)
# import pdb;
# pdb.set_trace()
# Can't use .reset without looping through extensions
ad_interp[0].data = spec_final
ad_interp[0].variance = var_final
ad_interp[0].WAVL = wavl_grid
try:
del ad_interp[0].WGT
except AttributeError:
pass
ad_interp[0].hdr['DATADESC'] = (
'Interpolated data',
self.keyword_comments['DATADESC'], )
ad.append(ad_interp[i])
# Timestamp & update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=params["suffix"], strip=True)
return adinputs
def findApertures(self, adinputs=None, **params):
"""
Locate the slit aperture, parametrized by an :any:`polyfit` model.
The primitive locates the slit apertures within a GHOST frame,
and inserts a :any:`polyfit` model into a new extension on each data
frame. This model is placed into a new ``.XMOD`` attribute on the
extension.
Parameters
----------
slitflat: str or :class:`astrodata.AstroData` or None
slit flat to use; if None, the calibration system is invoked
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
# Make no attempt to check if primitive has already been run - may
# have new calibrators we wish to apply.
# CJS: See comment in extractProfile() for handling of calibrations
flat_list = params["slitflat"]
if flat_list is None:
self.getProcessedSlitFlat(adinputs, refresh=False)
flat_list = [self._get_cal(ad, 'processed_slitflat')
for ad in adinputs]
if params['skip_pixel_model']:
log.stdinfo('Skipping adding the pixel model to the flat'
'step')
for ad, slit_flat in zip(*gt.make_lists(adinputs, flat_list,
force_ad=True)):
if not {'PREPARED', 'GHOST', 'FLAT'}.issubset(ad.tags):
log.warning("findApertures is only run on prepared flats: "
"{} will not be processed".format(ad.filename))
continue
try:
poly_xmod = self._get_polyfit_filename(ad, 'xmod')
log.stdinfo('Found xmod: {}'.format(poly_xmod))
poly_spat = self._get_polyfit_filename(ad, 'spatmod')
log.stdinfo('Found spatmod: {}'.format(poly_spat))
slitv_fn = self._get_slitv_polyfit_filename(ad)
log.stdinfo('Found slitvmod: {}'.format(slitv_fn))
xpars = astrodata.open(poly_xmod)
spatpars = astrodata.open(poly_spat)
slitvpars = astrodata.open(slitv_fn)
except IOError:
log.warning("Cannot open required initial model files for {};"
" skipping".format(ad.filename))
continue
arm = ad.arm()
res_mode = ad.res_mode()
ghost_arm = GhostArm(arm=arm, mode=res_mode)
# Create an initial model of the spectrograph
xx, wave, blaze = ghost_arm.spectral_format(xparams=xpars[0].data)
slitview = SlitView(slit_flat[0].data, slit_flat[0].data,
slitvpars.TABLE[0], mode=res_mode,
microns_pix=4.54*180/50,
binning=slit_flat.detector_x_bin())
# This is an attempt to remove the worse cosmic rays
# in the hope that the convolution is not affected by them.
# Start by performing a median filter
medfilt = signal.medfilt2d(ad[0].data, (5,5))
# Now find which pixels have a percentage difference larger than
# a defined value between the data and median filter, and replace
# those in the data with the median filter values. Also, only
# replace values above the data average, so as not to replace low
# S/N values at the edges.
data = ad[0].data.copy()
condit = np.where(np.abs(
(medfilt - data)/(medfilt+1)) > 200
) and np.where(data > np.average(data))
data[condit] = medfilt[condit]
# Convolve the flat field with the slit profile
flat_conv = ghost_arm.slit_flat_convolve(
data,
slit_profile=slitview.slit_profile(arm=arm),
spatpars=spatpars[0].data, microns_pix=slitview.microns_pix,
xpars=xpars[0].data
)
flat_conv = signal.medfilt2d(flat_conv, (5, 5))
# Fit the initial model to the data being considered
fitted_params = ghost_arm.fit_x_to_image(flat_conv,
xparams=xpars[0].data,
decrease_dim=8,
inspect=False)
# CJS: Append the XMOD as an extension. It will inherit the
# header from the science plane (including irrelevant/wrong
# keywords like DATASEC) but that's not really a big deal.
# (The header can be modified/started afresh if needed.)
ad[0].XMOD = fitted_params
#MJI: Compute a pixel-by-pixel model of the flat field from the new XMOD and
#the slit image.
if not params['skip_pixel_model']:
# FIXME: MJI Copied directly from extractProfile. Is this compliant?
try:
poly_wave = self._get_polyfit_filename(ad, 'wavemod')
poly_spat = self._get_polyfit_filename(ad, 'spatmod')
poly_spec = self._get_polyfit_filename(ad, 'specmod')
poly_rot = self._get_polyfit_filename(ad, 'rotmod')
wpars = astrodata.open(poly_wave)
spatpars = astrodata.open(poly_spat)
specpars = astrodata.open(poly_spec)
rotpars = astrodata.open(poly_rot)
except IOError:
log.warning("Cannot open required initial model files "
"for {}; skipping".format(ad.filename))
continue
#Create an extractor instance, so that we can add the pixel model to the
#data.
ghost_arm.spectral_format_with_matrix(ad[0].XMOD, wpars[0].data,
spatpars[0].data, specpars[0].data, rotpars[0].data)
extractor = Extractor(ghost_arm, slitview, badpixmask=ad[0].mask,
vararray=ad[0].variance)
pixel_model = extractor.make_pixel_model()
ad[0].PIXELMODEL = pixel_model
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
return adinputs
def fitWavelength(self, adinputs=None, **params):
"""
Fit wavelength solution to a GHOST ARC frame.
This primitive should only be applied to a reduce GHOST ARC frame. Any
other files passed through this primitive will be skipped.
This primitive works as follows:
- :class:`polyfit.ghost.GhostArm` and `polyfit.extract.Extractor`
classes are instantiated and configured for the data;
- The ``Extractor`` class is used to find the line locations;
- The ``GhostArm`` class is used to fit this line solution to the data.
The primitive will use the arc line files stored in the same location
as the initial :module:`polyfit` models kept in the ``lookups`` system.
This primitive uses no special parameters.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
# import pdb; pdb.set_trace()
# Make no attempt to check if primitive has already been run - may
# have new calibrators we wish to apply.
flat = params['flat']
if not flat:
self.getProcessedFlat(adinputs, refresh=False)
flat_list = [self._get_cal(ad, 'processed_flat') for ad in adinputs]
for ad, flat in zip(*gt.make_lists(adinputs, flat_list, force_ad=True)):
# CJS: Since we're not saving the processed_arc before this, we
# can't check for the tags. Instead, let's look for the WGT extn
if not hasattr(ad[0], 'WGT'):
log.warning("fitWavelength is only run on prepared GHOST arc"
" files - skipping {}".format(ad.filename))
continue
if self.timestamp_keys["extractProfile"] not in ad.phu:
log.warning("extractProfile has not been run on {} - "
"skipping".format(ad.filename))
continue
if flat is None:
log.warning("Could not find processed_flat calibration for "
"{} - skipping".format(ad.filename))
continue
try:
poly_wave = self._get_polyfit_filename(ad, 'wavemod')
poly_spat = self._get_polyfit_filename(ad, 'spatmod')
poly_spec = self._get_polyfit_filename(ad, 'specmod')
poly_rot = self._get_polyfit_filename(ad, 'rotmod')
wpars = astrodata.open(poly_wave)
spatpars = astrodata.open(poly_spat)
specpars = astrodata.open(poly_spec)
rotpars = astrodata.open(poly_rot)
except IOError:
log.warning("Cannot open required initial model files for {};"
" skipping".format(ad.filename))
continue
# CJS: line_list location is now in lookups/__init__.py
arclinefile = os.path.join(os.path.dirname(polyfit_dict.__file__),
line_list)
arcwaves, arcfluxes = np.loadtxt(arclinefile, usecols=[1, 2]).T
arm = GhostArm(arm=ad.arm(), mode=ad.res_mode())
arm.spectral_format_with_matrix(flat[0].XMOD,
wpars[0].data,
spatpars[0].data,
specpars[0].data,
rotpars[0].data)
extractor = Extractor(arm, None) # slitview=None for this usage
# Find lines based on the extracted flux and the arc wavelengths.
# Note that "inspect=True" also requires and input arc file, which has
# the non-extracted data. There is also a keyword "plots".
lines_out = extractor.find_lines(ad[0].data, arcwaves,
arcfile=ad[0].data,
plots=params['plot_fit'])
#lines_out is now a long vector of many parameters, including the
#x and y position on the chip of each line, the order, the expected
#wavelength, the measured line strength and the measured line width.
fitted_params, wave_and_resid = arm.read_lines_and_fit(
wpars[0].data, lines_out)
# CJS: Append the WFIT as an extension. It will inherit the
# header from the science plane (including irrelevant/wrong
# keywords like DATASEC) but that's not really a big deal.
ad[0].WFIT = fitted_params
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
return adinputs
def flatCorrect(self, adinputs=None, **params):
"""
Flat-correct an extracted GHOST profile using a flat profile.
This primitive works by extracting the
profile from the relevant flat field using the object's extracted
weights, and then performs simple division.
.. warning::
While the primitive is working, it has been found that the
underlying algorithm is flawed. A new algorithm is being developed.
Parameters
----------
suffix: str
suffix to be added to output files
flat: str/None
Name of the (processed) standard flat to use for flat profile
extraction. If None, the primitive will attempt to pull a flat
from the calibrations database (or, if specified, the
--user_cal processed_flat command-line option)
slit: str/None
Name of the (processed & stacked) slit image to use for extraction
of the profile. If not provided/set to None, the primitive will
attempt to pull a processed slit image from the calibrations
database (or, if specified, the --user_cal processed_slit
command-line option)
slitflat: str/None
Name of the (processed) slit flat image to use for extraction
of the profile. If not provided, set to None, the RecipeSystem
will attempt to pull a slit flat from the calibrations system (or,
if specified, the --user_cal processed_slitflat command-line
option)
writeResult: bool
Denotes whether or not to write out the result of profile
extraction to disk. This is useful for both debugging, and data
quality assurance.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
sfx = params["suffix"]
if params['skip']:
log.stdinfo('Skipping the flat field correction '
'step')
return adinputs
adinputs_orig = list(adinputs)
adinputs = [_ for _ in adinputs if not _.phu.get(timestamp_key)]
if len(adinputs) != len(adinputs_orig):
log.stdinfo('flatCorrect is skipping the following files, '
'which are already flat corrected: '
'{}'.format(','.join([_ for _ in adinputs_orig
if _ not in adinputs])))
# CJS: See extractProfile() refactoring for explanation of changes
slit_list = params["slit"]
if slit_list is not None and isinstance(slit_list, list):
slit_list = [slit_list[i] for i in range(len(slit_list))
if adinputs_orig[i] in adinputs]
if slit_list is None:
self.getProcessedSlit(adinputs, refresh=False)
slit_list = [self._get_cal(ad, 'processed_slit')
for ad in adinputs]
# CJS: I've renamed flat -> slitflat and obj_flat -> flat because
# that's what the things are called! Sorry if I've overstepped.
slitflat_list = params["slitflat"]
if slitflat_list is not None and isinstance(slitflat_list, list):
slitflat_list = [slitflat_list[i] for i in range(len(slitflat_list))
if adinputs_orig[i] in adinputs]
if slitflat_list is None:
self.getProcessedSlitFlat(adinputs, refresh=False)
slitflat_list = [self._get_cal(ad, 'processed_slitflat')
for ad in adinputs]
flat_list = params["flat"]
if flat_list is not None and isinstance(flat_list, list):
flat_list = [flat_list[i] for i in range(len(flat_list))
if adinputs_orig[i] in adinputs]
if flat_list is None:
self.getProcessedFlat(adinputs, refresh=False)
flat_list = [self._get_cal(ad, 'processed_flat')
for ad in adinputs]
# TODO: Have gt.make_lists handle multiple auxiliary lists?
_, slit_list = gt.make_lists(adinputs, slit_list, force_ad=True)
_, slitflat_list = gt.make_lists(adinputs, slitflat_list, force_ad=True)
_, flat_list = gt.make_lists(adinputs, flat_list, force_ad=True)
for ad, slit, slitflat, flat, in zip(adinputs, slit_list,
slitflat_list, flat_list):
if ad.phu.get(timestamp_key):
log.warning("No changes will be made to {}, since it has "
"already been processed by flatCorrect".
format(ad.filename))
continue
# CJS: failure to find a suitable auxiliary file (either because
# there's no calibration, or it's missing) places a None in the
# list, allowing a graceful continuation.
if slit is None or slitflat is None or flat is None:
log.warning("Unable to find calibrations for {}; "
"skipping".format(ad.filename))
continue
try:
poly_wave = self._get_polyfit_filename(ad, 'wavemod')
poly_spat = self._get_polyfit_filename(ad, 'spatmod')
poly_spec = self._get_polyfit_filename(ad, 'specmod')
poly_rot = self._get_polyfit_filename(ad, 'rotmod')
slitv_fn = self._get_slitv_polyfit_filename(ad)
wpars = astrodata.open(poly_wave)
spatpars = astrodata.open(poly_spat)
specpars = astrodata.open(poly_spec)
rotpars = astrodata.open(poly_rot)
slitvpars = astrodata.open(slitv_fn)
except IOError:
log.warning("Cannot open required initial model files for {};"
" skipping".format(ad.filename))
continue
res_mode = ad.res_mode()
arm = GhostArm(arm=ad.arm(), mode=res_mode,
detector_x_bin= ad.detector_x_bin(),
detector_y_bin= ad.detector_y_bin()
)
arm.spectral_format_with_matrix(flat[0].XMOD,
wpars[0].data,
spatpars[0].data,
specpars[0].data,
rotpars[0].data,
)
sview = SlitView(slit[0].data, slitflat[0].data,
slitvpars.TABLE[0], mode=res_mode,
microns_pix=4.54*180/50,
binning = slit.detector_x_bin())
extractor = Extractor(arm, sview)
#FIXME - Marc and were *going* to try:
#adjusted_data = arm.bin_data(extractor.adjust_data(flat[0].data))
extracted_flux, extracted_var = extractor.two_d_extract(
arm.bin_data(flat[0].data), extraction_weights=ad[0].WGT)
# Normalised extracted flat profile
med = np.median(extracted_flux)
extracted_flux /= med
extracted_var /= med**2
flatprof_ad = deepcopy(ad)
flatprof_ad.update_filename(suffix='_extractedFlatProfile',
strip=True)
flatprof_ad[0].reset(extracted_flux, mask=None,
variance=extracted_var)
if params["write_result"]:
flatprof_ad.write(overwrite=True)
# Record this as the flat profile used
ad.phu.set('FLATPROF', os.path.abspath(flatprof_ad.path),
self.keyword_comments['FLATPROF'])
ad.phu.set('FLATIMG', os.path.abspath(flat.path),
keyword_comments.keyword_comments['FLATIMG'])
ad.phu.set('SLITIMG', os.path.abspath(slit.path),
keyword_comments.keyword_comments['SLITIMG'])
ad.phu.set('SLITFLAT', os.path.abspath(slitflat.path),
keyword_comments.keyword_comments['SLITFLAT'])
# Divide the flat field through the science data
# Arithmetic propagates VAR correctly
ad /= flatprof_ad
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=sfx, strip=True)
# This nomenclature is misleading - this is the list of
# intitially-passed AstroData objects, some of which may have been
# skipped, and others which should have been modified by this
# primitive
return adinputs_orig
def formatOutput(self, adinputs=None, **params):
"""
Generate an output FITS file containing the data requested by the user.
This primitive should not be called until *all* required
processing steps have been performed on the data. THe resulting FITS
file cannot be safely passed through to other primitives.
.. note::
All of the extra data packaged up by this primitive can also be
obtained by using the ``write_result=True`` flag on selected
other primitives. ``formatOutput`` goes and finds those output
files, and then packages them into the main output file for
convenience.
Parameters
----------
detail: str
The level of detail the user would like in their final output file.
Note that, in order to preserve the ordering of FITS file
extensions, the options are sequential; each option will
provide all the data of less-verbose options.
Valid options are:
``default``
Only returns the extracted, fully-processed object(s) and sky
spectra. In effect, this causes ``formatOutput`` to do nothing.
This includes computed variance data for each plane.
``processed_image``
The option returns the data that have been bias and dark
corrected, and has the flat BPM applied (i.e. the state the
data are in immediately prior to profile extraction).
``flat_profile``
This options includes the extracted flat profile used for
flat-fielding the data.
``sensitivity_curve``
This option includes the sensitivity calculated at the
:meth:`responseCorrect <responseCorrect>` step of reduction.
"""
# This should be the list of allowed detail descriptors in order of
# increasing verbosity
ALLOWED_DETAILS = ['default', 'processed_image', 'flat_profile',
'sensitivity_curve', ]
log = self.log
timestamp_key = self.timestamp_keys[self.myself()]
sfx = params['suffix']
if params['detail'] not in ALLOWED_DETAILS:
raise ValueError('formatOutput: detail option {} not known. '
'Please use one of: {}'.format(
params['detail'],
', '.join(ALLOWED_DETAILS),
))
detail_index = ALLOWED_DETAILS.index(params['detail'])
for ad in adinputs:
# Move sequentially through the various levels of detail, adding
# them as we go along
# ad[0].hdr['DATADESC'] = ('Fully-reduced data',
# self.keyword_comments['DATADESC'], )
if ALLOWED_DETAILS.index('processed_image') <= detail_index:
# Locate the processed image data
fn = ad.phu.get('PROCIMG', None)
if fn is None:
raise RuntimeError('The processed image file name for {} '
'has not been '
'recorded'.format(ad.filename))
try:
proc_image = astrodata.open(fn)
except astrodata.AstroDataError:
raise RuntimeError('You appear not to have written out '
'the result of image processing to '
'disk.')
log.stdinfo('Opened processed image file {}'.format(fn))
ad.append(proc_image[0])
ad[-1].hdr['DATADESC'] = ('Processed image',
self.keyword_comments['DATADESC'])
if ALLOWED_DETAILS.index('flat_profile') <= detail_index:
# Locate the flat profile data
fn = ad.phu.get('FLATPROF', None)
if fn is None:
raise RuntimeError('The flat profile file name for {} '
'has not been '
'recorded'.format(ad.filename))
try:
proc_image = astrodata.open(fn)
except astrodata.AstroDataError:
raise RuntimeError('You appear not to have written out '
'the result of flat profiling to '
'disk.')
log.stdinfo('Opened flat profile file {}'.format(fn))
# proc_image[0].WGT = None
try:
del proc_image[0].WGT
except AttributeError:
pass
ad.append(proc_image[0])
ad[-1].hdr['DATADESC'] = ('Flat profile',
self.keyword_comments['DATADESC'])
if ALLOWED_DETAILS.index('sensitivity_curve') <= detail_index:
fn = ad.phu.get('SENSFUNC', None)
if fn is None:
raise RuntimeError('The sensitivity curve file name for {} '
'has not been '
'recorded'.format(ad.filename))
try:
proc_image = astrodata.open(fn)
except astrodata.AstroDataError:
raise RuntimeError('You appear not to have written out '
'the result of sensitivity calcs to '
'disk.')
log.stdinfo('Opened sensitivity curve file {}'.format(fn))
# proc_image[0].WGT = None
try:
del proc_image[0].WGT
except AttributeError:
pass
try:
del proc_image[0].WAVL
except AttributeError:
pass
ad.append(proc_image[0])
ad[-1].hdr['DATADESC'] = ('Sensitivity curve (blaze func.)',
self.keyword_comments['DATADESC'])
# import pdb; pdb.set_trace();
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=sfx, strip=True)
ad.write(overwrite=True)
return adinputs
def rejectCosmicRays(self, adinputs=None, **params):
"""
Reject cosmic rays from GHOST data.
.. warning::
This primitive is now deprecated - cosmic ray rejection is now
handled as part of the profile extraction process.
Parameters
----------
n_steps: int
The number of iterations that the LACosmic algorithm will make.
subsampling: int
The image subsampling factor LACosmic will use to generate the
input images for the algorithm. There is really no reason to
change this value from the default.
sigma_lim: float
The sigma-clipping limit to be applied to the noise map.
f_lim: float
The clipping limit for the fine-structure image.
"""
raise DeprecationWarning('Cosmic ray rejections is now handled '
'as part of the profile extraction process. '
'rejectCosmicRays is *not* being maintained.')
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
n_steps = params["n_steps"]
subsampling = params["subsampling"]
sigma_lim = params["sigma_lim"]
f_lim = params["f_lim"]
# Define the Laplacian and growth kernels for L.A.Cosmic
laplace_kernel = np.array([
[0.0, -1.0, 0.0],
[-1.0, 4.0, -1.0],
[0.0, -1.0, 0.0],
])
growth_kernel = np.ones((3, 3), dtype=np.float64)
for ad in adinputs:
if ad.phu.get(timestamp_key):
log.warning("No changes will be made to {}, since it has "
"already been processed by rejectCosmicRays".
format(ad.filename))
continue
# Define the function for performing the median-replace of cosmic
# ray pixels
# Note that this is different from a straight median filter, as we
# *don't* want to include the central pixel
fp = [[1, 1, 1],
[1, 0, 1],
[1, 1, 1]]
median_replace = functools.partial(scipy.ndimage.generic_filter,
function=np.median, footprint=fp,
mode='constant',
cval=np.nan)
log.stdinfo("Doing CR removal for {}".format(ad.filename))
for ext in ad:
# CJS: Added forced creation of DQ plane
if ext.mask is None:
ext.mask = np.zeros_like(ext.data, dtype=np.uint16)
log.stdinfo('-----')
log.stdinfo("EXTVER {}".format(ext.hdr['EXTVER']))
log.stdinfo('-----')
# Define an array that will hold the cosmic ray flagging
# Note that we're deliberately not using the BPM at this stage,
# otherwise the algorithm will start searching for cosmic rays
# around pixels that have been flagged bad for another reason.
cosmic_bpm = np.zeros_like(ext.data, dtype=np.int16)
# Start with a fresh copy of the data
# Use numpy NaN to cover up any data detected bad so far
# (i.e. 0 < BPM < 8)
clean_data = np.copy(ext.data)
clean_data[ext.mask > 0] = np.nan
no_passes = 0
new_crs = 1
new_cr_pix = None
while new_crs > 0 and no_passes < n_steps:
no_passes += 1
curr_crs = np.count_nonzero(cosmic_bpm)
if curr_crs > 0 and new_cr_pix is not None:
# Median out the pixels already defined as cosmic rays
log.stdinfo('Pass {}: Median over previously '
'found CR pix'.format(no_passes))
# One pass option - slow
# clean_data[new_cr_pix > 0] = median_replace(
# clean_data)[new_cr_pix > 0]
# Loop option - faster for the number of CR (~ few k
# we expect for realistic data
inds = np.argwhere(new_cr_pix)
pad_data = np.pad(clean_data, 1, 'constant',
constant_values=(np.nan, ))
# log.stdinfo('Padded array size: %s' %
# str(pad_data.shape))
# log.stdinfo(
# 'Data array size: %s' % str(clean_data.shape))
# log.stdinfo(
# 'CR array size: %s' % str(new_cr_pix.shape))
for ind in inds:
# log.stdinfo(str(ind))
# Using nanmedian stops nan values being considered
# in the ordering of median values
clean_data[zip(ind)] = np.nanmedian(
fp * pad_data[
ind[0]:ind[0] + 3,
ind[1]:ind[1] + 3
]
)
# Actually do the cosmic ray subtraction here
# ------
# STEP 1
# Construct a model for sky lines to subtract
# TODO: Add option for 'wave' keyword, which parametrizes
# an input wavelength solution function
# ------
log.stdinfo('Pass {}: Building sky model'.format(no_passes))
sky_model = scipy.ndimage.median_filter(clean_data,
size=[7, 1],
mode='constant',
cval=np.nan)
m5_model = scipy.ndimage.median_filter(clean_data,
size=[5, 5],
mode='constant',
cval=np.nan)
subbed_data = clean_data - sky_model
# ------
# STEP 2
# Remove object spectra
# FIXME: Waiting on working find apertures routine
# ------
# ------
# STEP 3
# Compute 2nd-order Laplacian of input frame
# This is 'curly L' in van Dokkum 2001
# ------
# Subsample the data
log.stdinfo('Pass {}: Computing Laplacian'.format(
no_passes)
)
data_shape = ext.data.shape
# log.stdinfo(
# 'data array size: %s' % str(data_shape))
subsampl_data = np.repeat(np.repeat(
ext.data, subsampling, axis=1),
subsampling, axis=0
)
# Convolve the subsampled data with the Laplacian kernel,
# trimming off the edges this introduces
# Bring any negative values up to 0
init_conv_data = scipy.signal.convolve2d(
subsampl_data, laplace_kernel)[1:-1, 1:-1]
init_conv_data[np.nonzero(init_conv_data <= 0.)] = 0.
# Reverse the subsampling, returning the
# correctly-convolved image
conv_data = np.reshape(init_conv_data,
(
data_shape[0],
init_conv_data.shape[0] //
data_shape[0],
data_shape[1],
init_conv_data.shape[1] //
data_shape[1],
)).mean(axis=3).mean(axis=1)
# ------
# STEP 4
# Construct noise model, and use it to generate the
# 'sigma_map' S
# This is the equivalent of equation (11) of van Dokkum 2001
# ------
log.stdinfo('Pass {}: Constructing sigma map'.format(
no_passes
))
gain = ext.gain()
read_noise = ext.read_noise()
noise = (1.0 / gain) * ((gain * m5_model +
read_noise**2)**0.5)
noise_min = 0.00001
noise[np.nonzero(noise <= noise_min)] = noise_min
# div by 2 to correct convolution counting
sigmap = conv_data / (subsampling * noise)
# Remove large structure with a 5x5 median filter
# Equation (13) of van Dokkum 2001, generates S'
sig_smooth = scipy.ndimage.median_filter(sigmap,
size=[5, 5],
mode='constant',
cval=np.nan)
sig_detrend = sigmap - sig_smooth
# ------
# STEP 5
# Identify the potential cosmic rays
# ------
log.stdinfo('Pass {}: Flagging cosmic rays'.format(
no_passes
))
# Construct the fine-structure image
# (F, eqn 14 of van Dokkum)
m3 = scipy.ndimage.median_filter(subbed_data, size=[3, 3],
mode='constant', cval=np.nan)
fine_struct = m3 - scipy.ndimage.median_filter(m3,
size=[7, 7], mode='constant', cval=np.nan)
# Pixels are flagged as being cosmic rays if:
# - The sig_detrend image (S') is > sigma_lim
# - The contrast between the Laplacian image (L+) and the
# fine-structure image (F) is greater than f_lim
new_cr_pix = np.logical_and(sig_detrend > sigma_lim,
(conv_data/fine_struct) > f_lim)
cosmic_bpm[new_cr_pix] = np.uint16(DQ.cosmic_ray)
new_crs = np.count_nonzero(cosmic_bpm) - curr_crs
log.stdinfo('Pass {}: Found {} CR pixels'.format(no_passes,
new_crs))
# For the moment, go with Mike Ireland's suggestion to require
# a BPM update
ext.mask |= cosmic_bpm
log.debug('Flagged pix in BPM: {}'.format(
np.count_nonzero(ext.mask)))
# CJS: Added this because you check for the keyword in
# this primitive!
# Timestamp and update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=params["suffix"], strip=True)
return adinputs
def responseCorrect(self, adinputs=None, **params):
"""
Use a standard star observation and reference spectrum to provide
absolute flux calibration.
This primitive follows the basic pattern for determining absolute flux
from an observed standard with a relative flux scale (e.g. counts) and
an absolute flux-calibrated reference spectrum:
- Dividing the standard star observation (in counts or electrons per
pixel) by
the exposure time (in s), and then by the standard star reference
spectrum (in some unit of flux, e.g. erg/cm:math:`^2`/s/A) gives a
sensitivity curve in units of, in this example, counts / erg.
- Dividing the object spectrum by the exposure time (i.e. converting
to counts per pixel per second) by the sensitivity curve
(counts / flux unit) yields the object spectrum in the original flux
units of the standard star reference spectrum.
Parameters
----------
skip: bool
If True, this primitive will just return the adinputs immediately
std : str, giving a relative or absolute file path
The name of the reduced standard star observation. Defaults to
None, at which point a ValueError is thrown.
std_spec: str, giving a relative or absolute file path
The name of the file where the standard star spectrum (the
reference, not the observed one) is stored. Defaults to None,
at which point a fatal error will be thrown.
Spectral standard references should be in the format provided
by Space Telescope Science Institute, e.g., from
ftp://ftp.stsci.edu/cdbs/current_calspec/. If the standard reference
is taken from elsewhere, it needs to obey the following
format rules:
- The reference data is in the first science extension of the FITS
file;
- The data must be in FITS table format, with columns named
``'FLUX'`` and ``'WAVELENGTH'``;
- The first science extension must have a header card named
``'TUNIT2'``, which should contain the FITS-compatible
flux unit name corresponding to the data in the ``'FLUX'`` column.
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
if params['skip']:
log.stdinfo('Skipping the response (standard star) correction '
'step')
return adinputs
if params['std'] is None:
raise ValueError('No standard star provided')
# Let the astrodata routine handle any issues with actually opening
# the FITS file
std = astrodata.open(params['std'])
# Need to find the reference standard star spectrum
# Use the one passed by the user in the first instance, otherwise
# attempt to locate a remote one
# Throw an error if none found
if params['std_spec']:
# TODO Will need to be modified to use Gemini service
std_spec = astropyio.open(params['std_spec'])
bunit = std_spec[1].header['TUNIT2']
else:
raise ValueError('No standard reference spectrum found/supplied')
# Re-grid the standard reference spectrum onto the wavelength grid of
# the observed standard
regrid_std_ref = np.zeros(std[0].data.shape[:-1])
for od in range(std[0].data.shape[0]):
regrid_std_ref[od] = self._regrid_spect(
std_spec[1].data['FLUX'],
std_spec[1].data['WAVELENGTH'],
std[0].WAVL[od, :],
waveunits='angstrom'
)
# Figure out which object is actually the standard observation
# (i.e. of the dimensions [order, wavl, object], figure which of the
# three objects is actually the spectrum (another will be sky, and
# the third probably empty)
objn = targetn_dict.targetn_dict['object']
target = -1
if std.phu['TARGET1'] == objn: target = 0
if std.phu['TARGET2'] == objn: target = 1
if target < 0:
raise ValueError(
'Cannot determine which IFU contains standard star spectrum.'
)
# Compute the sensitivity function
sens_func = (std[0].data[:, :, target] /
std[0].hdr['EXPTIME']) / regrid_std_ref
sens_func_var = (std[0].variance[:, :, target] /
std[0].hdr['EXPTIME']**2) / regrid_std_ref**2
# MCW 180501
# The sensitivity function requires significant smoothing in order to
# prevent noise from the standard being transmitted into the data
# The easiest option is to perform a parabolic curve fit to each order
# QUADRATIC
# fitfunc = lambda p, x: p[0] + p[2] * ((x - p[1])**2)
# LINEAR
fitfunc = lambda p, x: p[0] + (p[1] * x)
errfunc = lambda p, x, y, yerr: np.abs(fitfunc(p, x) - y) / np.sqrt(yerr)
# import pdb; pdb.set_trace();
sens_func_fits = [
p for p, success in [leastsq(errfunc,
# QUADRATIC
# [np.average(sens_func[od],
# weights=1./np.sqrt(
# sens_func_var[od])),
# np.median(std[0].WAVL[od, :]),
# 1.0],
# LINEAR
[np.average(sens_func[od, :],
weights=1. / np.sqrt(
sens_func_var[od])),
0.],
args=(std[0].WAVL[od, :],
sens_func[od, :],
sens_func_var[od, :])
)
for od in range(sens_func.shape[0])
]
# if success
]
# import pdb; pdb.set_trace();
for ad in adinputs:
if ad.phu.get(timestamp_key):
log.warning("No changes will be made to {}, since it has "
"already been processed by responseCorrect".
format(ad.filename))
continue
# Check that the ad matches the standard
if ad.res_mode() != std.res_mode():
raise ValueError('Resolution modes do not match for '
'{} and {}'.format(ad.filename, std.filename))
if ad.arm() != std.arm():
raise ValueError('Spectrograph arms do not match for '
'{} and {}'.format(ad.filename, std.filename))
if ad.detector_y_bin() != std.detector_y_bin() or \
ad.detector_x_bin() != std.detector_x_bin():
raise ValueError('Binning does not match for '
'{} and {}'.format(ad.filename, std.filename))
# Easiest way to response correct is to stand up a new AstroData
# instance containing the sensitivity function - this will
# automatically handle, e.g., the VAR re-calculation
sens_func_ad = deepcopy(ad)
sens_func_ad.update_filename(suffix='_sensFunc', strip=True)
for i, ext in enumerate(ad):
# Interpolate the sensitivity function onto the wavelength
# grid of this ad
# Note that we can get away with this instead of a more
# in-depth, flux-conserving regrid because:
# (a) The sensitivity curve units do not depend on wavelength;
# (b) The wavelength shifts involved are very small
sens_func_regrid = np.zeros(ext.data.shape)
#sens_func_regrid_var = np.inf * np.ones(ext.data.shape)
for ob in range(ext.data.shape[-1]):
for od in range(ext.data.shape[0]):
# import pdb; pdb.set_trace();
sens_func_regrid[od, :, ob] = fitfunc(
sens_func_fits[od], ext.WAVL[od, :]
)
# if od == 29:
# import pdb; pdb.set_trace();
# sens_func_regrid[od, :, ob] = np.interp(
# ad[0].WAVL[od, :],
# std[0].WAVL[od, :],
# sens_func[od, :],
# left=0, right=0,
# )
# sens_func_regrid_var[od, :, ob] = np.interp(
# ad[0].WAVL[od, :],
# std[0].WAVL[od, :],
# sens_func_var[od, :],
# left=0, right=0,
# )
sens_func_ad[i].data = sens_func_regrid
sens_func_ad[i].variance = None
# Do the response correction
ad /= ad[0].hdr['EXPTIME'] # Should be the same for all exts
ad /= sens_func_ad
# Make the relevant header update
ad.hdr['BUNIT'] = bunit
# Now that we've made the correction, remove the superfluous
# extra dimension from sens_func_ad and write out, if requested
if params['write_result']:
for ext in sens_func_ad:
ext.data = ext.data[:, :, 0]
try:
del ext.WGT
except AttributeError:
pass
sens_func_ad.write(overwrite=True)
ad.phu.set("SENSFUNC", os.path.abspath(sens_func_ad.path),
self.keyword_comments['SENSFUNC'])
# sens_func_ad.reset(sens_func_regrid,
# variance=sens_func_regrid_var)
# Timestamp & suffix updates
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=params["suffix"], strip=True)
return adinputs
def standardizeStructure(self, adinputs=None, **params):
"""
The Gemini-level version of this primitive
will try to attach an MDF because a GHOST image is
tagged as SPECT. Rather than set parameters for that primitive to
stop it from doing so, just override with a no-op primitive.
.. note::
This could go in primitives_ghost.py if the SLITV version
also no-ops.
"""
return adinputs
# CJS: Primitive has been renamed for consistency with other instruments
# The geometry_conf.py file is not needed; all you're doing is tiling
# extensions according to their DETSEC keywords, without gaps or rotations
# so this shouldn't need any extra information.
# def tileArrays(self, adinputs=None, **params):
# """
# Tile GHOST data into a single frame.
#
# This primitive will tile the SCI frames of the input images, along
# with the VAR and DQ frames if they exist.
#
# The tiling for GHOST is much simpler than for most Gemini
# instruments, as there are no tile gaps to account for. Data from the
# four camera amplifiers are simply stiched together, using the
# :class:`gempy.mosaic.mosaicData.MosaicData` and
# :class:`gempy.mosaic.mosaicGeometry.MosaicGeometry` classes.
#
# This primitive takes no additional parameters.
# """
#
# def simple_mosaic_function(ad):
# """
# This will go into MosaicAD as the default function.
# Being discussed within the SUSD team.
# """
# from gempy.mosaic.mosaicData import MosaicData
# from gempy.mosaic.mosaicGeometry import MosaicGeometry
#
# # Calling trim_to_data_section() corrects the WCS if the overscan
# # regions haven't been trimmed yet
# ad = gt.trim_to_data_section(ad, keyword_comments=self.keyword_comments)
#
# md = MosaicData() # Creates an empty object
# md.data_list = [] # Not needed
#
# x_bin = ad.detector_x_bin()
# y_bin = ad.detector_y_bin()
# detsecs = [(k[0]//x_bin, k[1]//x_bin, k[2]//y_bin, k[3]//y_bin)
# for k in ad.detector_section()]
# # One output block
# md.coords = {'amp_mosaic_coord': detsecs,
# 'amp_block_coord': detsecs}
# nxout = max(k[1] for k in detsecs)
# nyout = max(k[3] for k in detsecs)
# mg = MosaicGeometry({'blocksize': (nxout, nyout),
# 'mosaic_grid': (1,1)})
# return md, mg
#
# log = self.log
# log.debug(gt.log_message("primitive", self.myself(), "starting"))
# timestamp_key = self.timestamp_keys[self.myself()]
#
# adoutputs = []
# for ad in adinputs:
# if ad.phu.get(timestamp_key):
# log.warning("No changes will be made to {}, since it has "
# "already been processed by tileArrays".
# format(ad.filename))
# adoutputs.append(ad)
# continue
#
# mo = MosaicAD(ad, mosaic_ad_function=simple_mosaic_function)
# ad_mos = mo.as_astrodata(tile=True)
#
# gt.mark_history(ad_mos, primname=self.myself(),
# keyword=timestamp_key)
# ad_mos.update_filename(suffix=params["suffix"],
# strip=True)
# adoutputs.append(ad_mos)
#
# ad_mos.write(overwrite=True)
# # ad_mos.write(overwrite=True)
#
# return adoutputs
# validateData() removed since inherited Standardize method will handle it
##############################################################################
# Below are the helper functions for the user level functions in this module #
##############################################################################
def _get_polyfit_filename(self, ad, caltype):
"""
Gets the filename of the relevant initial polyfit file for this
input GHOST science image
This primitive uses the arm, resolution mode and observing epoch
of the input AstroData object to determine the correct initial
polyfit model to provide. The model provided matches the arm and
resolution mode of the data, and is the most recent model generated
before the observing epoch.
Parameters
----------
ad : :class:`astrodata.AstroData`
AstroData object to return the relevant initial model filename for
caltype : str
The initial model type (e.g. ``'rotmod'``, ``'spatmod'``, etc.)
requested. An :any:`AttributeError` will be raised if the requested
model type does not exist.
Returns
-------
str/None:
Filename (including path) of the required polyfit file
"""
return polyfit_dict.get_polyfit_filename(self.log, ad.arm(),
ad.res_mode(), ad.ut_date(),
ad.filename, caltype)
def _compute_barycentric_correction(self, ad, return_wavl=True,
loc=GEMINI_SOUTH_LOC):
"""
Compute the baycentric correction factor for a given observation and
location on the Earth.
The barycentric correction compensates for (a) the motion of the Earth
around the Sun, and (b) the motion of the Earth's surface due to
planetary rotation. It can be returned as a line velocity correction,
or a multiplicative factor with which to correct the wavelength scale;
the default is the latter.
The correction will be computed for all extensions of the input
AstroData object.
This method is built using :py:mod:`astropy <astropy>` v2, and is
developed from:
https://github.com/janerigby/jrr/blob/master/barycen.py
Parameters
----------
ad : astrodata.AstroData
The astrodata object from which to extract time and
location information. If the ad is multi-extension, a correction
factor will be returned for each extension.
return_wavl : bool
Denotes whether to return the correction as a wavelength
correction factor (True) or a velocity (False). Defaults to True.
Returns
-------
corr_facts: list of float
The barycentric correction values, one per extension of the input
ad.
"""
# Set up a SkyCoord for this ad
sc = astrocoord.SkyCoord(ad.phu.get('RA'), ad.phu.get('DEC'),
unit=(u.deg, u.deg, ))
# Compute central time of observation
dt_start = datetime.combine(
datetime.strptime(ad.phu.get('DATE-OBS'), '%Y-%m-%d').date(),
datetime.strptime(ad.phu.get('UTSTART'), '%H:%M:%S.%f').time(),
)
corr_facts = []
for ext in ad:
dt_midp = dt_start + timedelta(
seconds=ext.hdr.get('EXPTIME')/2.0
)
dt_midp = Time(dt_midp)
# Jane Rigby implementation
# # ICRS position & vel of Earth geocenter
# ep, ev = astrocoord.solar_system.get_body_barycentric_posvel(
# 'earth', dt_midp
# )
# # GCRS position & vel of observatory (loc)
# op, ov = loc.get_gcrs_posvel(dt_midp)
# # Velocities can be simply added (are axes-aligned)
# vel = ev + ov
#
# # Get unit ICRS vector in direction of observation
# sc_cart = sc.icrs.represent_as(
# astrocoord.UnitSphericalRepresentation
# ).represent_as(
# astrocoord.CartesianRepresentation
# )
#
# corr_fact = sc_cart.dot(vel).to(u.km/u.s)
# Vanilla AstroPy Implementation
corr_fact = sc.radial_velocity_correction('barycentric',
obstime=dt_midp,
location=GEMINI_SOUTH_LOC)
if return_wavl:
corr_fact = 1.0 + (corr_fact / const.c)
else:
corr_fact = corr_fact.to(u.m / u.s)
corr_facts.append(corr_fact)
return corr_facts
def _request_bracket_arc(self, ad, before=None):
"""
Request the 'before' or 'after' arc for the passed ad object.
For maximum accuracy in wavelength calibration, GHOST data is calibrated
the two arcs taken immediately before and after the exposure. However,
the Gemini calibration system is not rigged to perform such logic (it
can return multipled arcs, but cannot guarantee that they straddle
the observation in time).
This helper function works by doing the following:
- Append a special header keyword, 'ARCBEFOR', to the PHU. This keyword
will be True if a before arc is requested, or False if an after arc
is wanted.
- getProcessedArc is the invoked, followed by the _get_cal call. The
arc calibration association rules will see the special descriptor
related to the 'ARCBEFOR' header keyword, and fetch an arc
accordingly.
- The header keyword is then deleted from the ad object, returning it
to its original state.
Parameters
----------
before : bool
Denotes whether to ask for the most recent arc before (True) or
after (False) the input AD was taken. Defaults to None, at which
point :any:`ValueError` will be thrown.
Returns
-------
arc_ad : astrodata.AstroData instance (or None)
The requested arc. Will return None if no suitable arc is found.
"""
if before is None:
raise ValueError('_request_bracket_arc requires that the before '
'kwarg is either True or False. If you wish to '
'do a "standard" arc calibration fetch, simply '
'use getProcessedArc directly.')
ad.phu['ARCBEFOR'] = before
self.getProcessedArc([ad,],
howmany=None,
refresh=True)
arc_ad = self._get_cal(ad, 'processed_arc', )
del ad.phu['ARCBEFOR']
return arc_ad
@staticmethod
def _interp_spect(orig_data, orig_wavl, new_wavl,
interp='linear'):
"""
'Re-grid' a one-dimensional input spectrum by performing simple
interpolation on the data.
This function performs simple linear interpolation between points
on the old wavelength grid, moving the data onto the new
wavelength grid. It makes no attempt to be, e.g., flux-conserving.
The interpolation is performed by
:any:`scipy.interpolate.interp1d <scipy.interpolate.interp1d>`.
Parameters
----------
orig_data : 1D numpy array or list
The original spectrum data
orig_wavl : 1D numpy array or list
The corresponding wavelength values for the original spectrum data
new_wavl : 1D numpy array or list
The new wavelength values to re-grid the spectrum data to
interp : str
The interpolation method to be used. Defaults to 'linear'. Will
accept any valid value of the ``kind`` argument to
:any:`scipy.interpolate.interp1d`.
Returns
-------
regrid_data : 1D numpy array
The spectrum re-gridded onto the new_wavl wavelength points.
Will have the same shape as new_wavl.
"""
# Input checking
orig_data = np.asarray(orig_data, dtype=orig_data.dtype)
orig_wavl = np.asarray(orig_wavl, dtype=orig_wavl.dtype)
new_wavl = np.asarray(new_wavl, dtype=new_wavl.dtype)
if orig_data.shape != orig_wavl.shape:
raise ValueError('_interp_spect received data and wavelength '
'arrays of different shapes')
interp_func = interpolate.interp1d(
orig_wavl,
orig_data,
kind=interp,
fill_value=np.nan,
bounds_error=False,
)
regrid_data = interp_func(new_wavl)
# regrid_data = np.interp(new_wavl, orig_wavl, orig_data, )
return regrid_data
@staticmethod
def _regrid_spect(orig_data, orig_wavl, new_wavl,
waveunits='angstrom'):
"""
Re-grid a one-dimensional input spectrum so as to conserve total flux.
This is a more robust procedure than :meth:`_interp_spect`, and is
designed for data with a wavelength dependence in the data units
(e.g. erg/cm^2/s/A or similar).
This function utilises the :any:`pysynphot` package.
This function has been adapted from:
http://www.astrobetter.com/blog/2013/08/12/python-tip-re-sampling-spectra-with-pysynphot/
Parameters
----------
orig_data : 1D numpy array or list
The original spectrum data
orig_wavl : 1D numpy array or list
The corresponding wavelength values for the original spectrum data
new_wavl : 1D numpy array or list
The new wavelength values to re-grid the spectrum data to
waveunits : str
The units of the wavelength scale. Defaults to 'angstrom'.
Returns
-------
egrid_data : 1D numpy array
The spectrum re-gridded onto the new_wavl wavelength points.
Will have the same shape as new_wavl.
"""
spec = spectrum.ArraySourceSpectrum(wave=orig_wavl, flux=orig_data)
f = np.ones(orig_wavl.shape)
filt = spectrum.ArraySpectralElement(orig_wavl, f, waveunits=waveunits)
obs = observation.Observation(spec, filt, binset=new_wavl,
force='taper')
return obs.binflux
| [
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.970621 | 58,580 |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin developers
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""
This test checks the behaviour of P2SH before and after genesis.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
from test_framework.key import CECKey
from test_framework.script import *
# a little handier version of create_transaction
from test_framework.util import assert_raises_message
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in
# spend_tx
# In this test we are checking behavior of the Wallet when trying to spend pre and post genesis P2SH script
# 1. Importing different P2SH(P2PKH) scripts in wallets on nodes 1 and 2
# 2. Putting tx that funds P2SH on node1 to the block and mine it
# 3. Moving after genesis
# 4. Putting tx that funds P2SH on node2 to the block and mine it
# 5. Check balance on node1 and node2
# 6. Try to spend funds from node1 and node2
if __name__ == '__main__':
P2SH().main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
2177,
383,
6185,
6505,
198,
2,
15069,
357,
66,
8,
13130,
6185,
5396,
198,
2,
4307,
6169,
739,
262,
4946,
24218,
53,
3788,
5964,
11,
766,
262,
19249,
2393,... | 3.630499 | 341 |
import os
from os import listdir
from os.path import isfile, join
import re
from path import Path
import numpy as np
import pandas as pd
from poor_trader import utils
from poor_trader.utils import quotes_range
from poor_trader.config import INDICATORS_OUTPUT_PATH
def SMMA(series, window=14):
""" get smoothed moving average.
:param df: data
:param windows: range
:return: result series
"""
smma = series.ewm(
ignore_na=False, alpha=1.0 / window,
min_periods=0, adjust=True).mean()
return smma
def RSI(df_quotes, period=20, symbol=None, field='Close'):
"""
Relative Strength Index
:param df_quotes:
:param period:
:return:
"""
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_RSI_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
d = df_quotes[field].diff()
df = pd.DataFrame()
p_ema = SMMA((d + d.abs()) / 2, window=period)
n_ema = SMMA((-d + d.abs()) / 2, window=period)
df['RS'] = rs = p_ema / n_ema
df['RSI'] = 100 - 100 / (1.0 + rs)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
| [
11748,
28686,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
11748,
302,
198,
6738,
3108,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,... | 2.30192 | 573 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Programme de vérification des référentiels AMELi
"""
__author__ = 'Frederic Laurent'
__version__ = "1.1"
__copyright__ = 'Copyright 2017, Frederic Laurent'
__license__ = "MIT"
import json
import logging
import os.path
from easy_atom import helpers
class VersionDetector:
"""
Détecteur de modification. Classe abstraite.
Cette classe permet
- de lire les données de la version précédente
- de sauvegarder les données
- une methode abstraite à implémenter pour déterminer si la version actuelle est nouvelle
"""
VERSION_DATA_DIR = 'data'
VERSION_FN_EXT = '.json'
def load_previous(self):
"""
Lecture des données du précédent traitement et calcul du numéro de version
:return: -
"""
self.logger.debug("Loads previous version from {}".format(self.version_fn))
self.version = helpers.load_json(self.version_fn)
# calcule la version la plus recente : max
self.last = float(sorted(set(map(lambda x: x["version"], self.version["versions"])))[-1])
self.logger.info("Derniere version traitee : {}".format(self.last))
def save_current_versions(self):
"""
Sauvegarde des données
:return:
"""
with open(self.version_fn, 'w') as fout:
fout.write(json.dumps(self.version, sort_keys=True, indent=4))
def new_version(self, infos):
"""
Emission d'une nouvelle version
:param version: numéro de la version
:return: données associées à la version
"""
self.logger.info(f"Nouvelle version : {infos}")
# add entry in version list
self.version["versions"].insert(0, infos)
def in_progress_status_changed(self, infos):
"""
Compare le statut de la derniere analyse du statut des fichiers (disponible ou non)
avec la version actuelle
:param infos: information sur la version
:return: si les donnees courantes ne sont pas disponibles : false
si les donnees courants sont disponibles, compare avec le dernier statut de la version
"""
self.logger.debug(f"Version {infos['version']}, statut={infos['available']}")
if "available" in infos and infos["available"]==False:
return False
# version courante totalement disponible
_v = float(infos["version"])
# recuperation des enregistrements ayant la meme version que celle en cours
_same_version = list(filter(
lambda x: "version" in x and float(x["version"])==_v, self.version['versions']))
self.logger.debug(f"Version actuelle {_v}, nb d'enregistrement dans l'historique {len(_same_version)}")
self.logger.debug(f"Dernier statut de {_v} = {_same_version[0]['available']}")
status_changed = infos['available']!=_same_version[0]['available']
self.logger.debug(f"Changement status ? {status_changed}")
return status_changed
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
35232,
390,
410,
2634,
38763,
748,
40560,
69,
2634,
1156,
72,
1424,
3001,
3698,
72,
19... | 2.374611 | 1,284 |
from hypothesis import HealthCheck
from hypothesis import given, settings
from hypothesis.extra import numpy as hnp
from pytiff import *
import hypothesis.strategies as st
import numpy as np
import pytest
import subprocess
import tifffile
from skimage.data import coffee
# scanline integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=20, max_side=20)))
# tile integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
| [
6738,
14078,
1330,
3893,
9787,
198,
6738,
14078,
1330,
1813,
11,
6460,
198,
6738,
14078,
13,
26086,
1330,
299,
32152,
355,
289,
37659,
198,
6738,
12972,
83,
733,
1330,
1635,
198,
11748,
14078,
13,
2536,
2397,
444,
355,
336,
198,
11748,
... | 2.420716 | 782 |
"""Motif object definition.
A single Motif object stores important values, used during the following steps of
GRAFIMO's analysis, such as motif PSSM, p-value matrix, scaling factor, offset,
motif information, etc.
"""
from grafimo.GRAFIMOException import NotValidMotifMatrixException, \
NoDataFrameException, WrongMotifIDException, WrongMotifWidthException, \
WrongMotifNameException, NotValidAlphabetException, ValueException
from grafimo.utils import isListEqual, DNA_ALPHABET
from typing import List, Optional, Dict
import pandas as pd
import numpy as np
class Motif(object):
"""
This class defines a DNA motif object.
In a single object we carry:
* the original count matrix or probability matrix
* the motif scaled scoring matrix
* the P-value matrix used to assign a P-value to each motif
occurrence candidate score
* the parameters used to scale the matrix (to revert the scaled
score to the log-odds score)
* the background probability distribution used, while processing the
PWM values
* the motif width
* the minimum value in the scoring matrix
* the maximum value in the scoring matrix
* the motif name (both ID and extended name)
* the motif alphabet
...
Attributes
----------
_count_matrix : numpy.ndarray
motif probability matrix
_score_matrix : numpy.ndarray
scaled motif scoring matrix
_min_val : int
minimum value of the scaled scoring matrix
_max_value : int
maximum value of the scaled scoring matrix
_scale : int
scaling value
_offset : numpy.double
offset used during motif matrix scaling
_bg : dict
background probability distribution
_width : int
motif width
_motif_id : str
motif ID
_motif_name : str
motif extended name
_alphabet : list()
DNA motif alphabet
_isScaled : bool
flag value to state if the scoring matrix has been scaled
Methods
-------
setMotif_matrix(motif_matrix : pandas.DataFrame)
set the count matrix
setMotif_scoreMatrix(score_matrix : numpy.ndarray)
set the scoring matrix
setMotif_pval_matrix(pval_mat : numpy.array)
set the P-value matrix
setMin_val(min_val : int)
set the scoring matrix minimum value
setMax_val(max_val : int)
set the scoring matrix maximum value
setScale(scale : int)
set the scoring matrix scaling factor
setOffset(offset : numpy.double)
set the scaling offset
setBg(bgs : dict)
set the background probability distribution
setWidth(width : int)
set motif width
setMotifID(motif_id : str)
set motif ID
setMotifName(motif_name : str)
set motif extended name
setAlphabet(alphabet : list)
set DNA motif alphabet
setIsScaled(isScaled : bool)
set the isScaled flag value
getMotif_matrix()
return the motif count matrix
getMotif_scoreMatrix()
return the motif scaled scoring matrix
getMotif_pval_mat()
return the P-value matrix
getMin_val()
return the scoring matrix minimum value
getMax_val()
return the scoring matrix maximum value
getScale()
return the matrix scaling factor
getOffset()
return the offset used while scaling the motif scoring matrix
getBg()
return the background probability distribution
getWidth():
return motif width
getMotifID()
return the motif ID
getMotifName()
return the motif extended name
getAlphabet()
return the DNA motif alphabet
getIsScaled()
return the isScaled flag value
compute_minValue()
compute the minimum value of the scaled scoring motif matrix
print()
print one matrix among the counts one, the scoring one or the
P-value one
"""
#-------------------------------------------------------------------
# Motif attributes
#
_count_matrix: np.ndarray
_score_matrix: np.ndarray
_pval_matrix: np.array
_min_val: np.double
_max_val: np.double
_scale: int
_offset: np.double
_bg: dict
_nucsmap: dict
_width: int
_motif_id: str
_motif_name: str
_alphabet: list
_isScaled: bool
# class attributes value initialization
_min_val = -np.inf
_max_val = np.inf
_scale = -1
_offset = 0
_width = -1
_isScaled = False
#-------------------------------------------------------------------
# Motif methods
#
# these errors should never appear --> no need for error formatting
# can assume that debug mode == True
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
# end of Motif
| [
37811,
47733,
361,
2134,
6770,
13,
198,
198,
32,
2060,
6543,
361,
2134,
7000,
1593,
3815,
11,
973,
1141,
262,
1708,
4831,
286,
220,
198,
38,
3861,
37,
3955,
46,
338,
3781,
11,
884,
355,
32702,
350,
5432,
44,
11,
279,
12,
8367,
175... | 2.695442 | 1,865 |