content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-07 07:54
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1954,
319,
13130,
12,
2919,
12,
2998,
8753,
25,
4051,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198... | 2.757143 | 70 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Defines a class for the UCI datasets."""
import numpy as np
from .base_wrapper import BasePerformanceDatasetWrapper
from .uci_dataset_cleaner import bank_data_parser, bank_data_additional_parser, car_eval_parser, \
adult_data_parser
from tempeh.constants import FeatureType, Tasks, DataTypes, UCIDatasets, ClassVars # noqa
class UCIPerformanceDatasetWrapper(BasePerformanceDatasetWrapper):
"""UCI Datasets"""
dataset_map = {
UCIDatasets.BANK: (bank_data_parser, "y",
[FeatureType.CONTINUOUS] * 10 + [FeatureType.NOMINAL] * 39),
UCIDatasets.BANK_ADD: (bank_data_additional_parser, "y",
[FeatureType.CONTINUOUS] * 10 + [FeatureType.NOMINAL] * 54),
UCIDatasets.CAR: (car_eval_parser, "CAR", [FeatureType.NOMINAL] * 22),
UCIDatasets.ADULT: (adult_data_parser, "y", [FeatureType.CONTINUOUS] +
[FeatureType.NOMINAL] * 7 + [FeatureType.CONTINUOUS] * 3 +
[FeatureType.NOMINAL]),
}
metadata_map = {
UCIDatasets.BANK: (Tasks.BINARY, DataTypes.TABULAR, (45211, 48)),
UCIDatasets.BANK_ADD: (Tasks.BINARY, DataTypes.TABULAR, (41188, 63)),
UCIDatasets.CAR: (Tasks.MULTICLASS, DataTypes.TABULAR, (1728, 21)),
UCIDatasets.ADULT: (Tasks.BINARY, DataTypes.TABULAR, (32561, 13)),
}
load_function = None
feature_type = None
target_col = None
def __init__(self):
"""Initializes the UCI dataset """
bunch = type(self).load_function()
target = bunch[self._target_col].astype(int)
bunch.drop(self._target_col, axis=1, inplace=True)
bunch = bunch.astype(float)
super().__init__(bunch, target, nrows=self._size[0], data_t=self._feature_type)
self._features = list(bunch)
self._target_names = np.unique(target)
@classmethod
def generate_dataset_class(cls, name, nrows=None):
"""Generate a dataset class.
:param name: the name of the dataset
:type name: str
:param nrows: number of rows to resize the dataset to
:type nrows: int
:rtype: cls
"""
load_function, target_col, feature_type = cls.dataset_map[name]
task, data_type, size = cls.metadata_map[name]
if nrows is not None:
size = (nrows, size[1])
class_name = "".join((x.title() for x in name.split("-"))) + "PerformanceDatasetWrapper"
return type(class_name, (cls, ), {ClassVars.LOAD_FUNCTION: load_function,
ClassVars.FEATURE_TYPE: feature_type,
ClassVars.TASK: task,
ClassVars.DATA_TYPE: data_type,
ClassVars.SIZE: size, ClassVars.TARGET_COL: target_col})
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
198,
37811,
7469,
1127,
257,
1398,
329,
262,
14417,
40,
40522,
526,
15931,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6... | 2.108108 | 1,406 |
from .async_database import AsyncDatabase
__all__ = ["AsyncDatabase"]
| [
6738,
764,
292,
13361,
62,
48806,
1330,
1081,
13361,
38105,
198,
198,
834,
439,
834,
796,
14631,
42367,
38105,
8973,
198
] | 3.380952 | 21 |
from __future__ import division
import affine
import pyproj
import numpy as np
def get_tiled_transform_shape(src_transform, src_shape, dst_res):
"""Get transform and shape of tile grid with resolution dst_res
Paramters
---------
src_transform : affine.Affine
source transform
src_shape : int, int
source shape
dst_res : float or tuple (float, float)
destination resolution
Returns
-------
affine.Affine
target transform
tuple (int, int)
target shape
"""
src_res = np.array((src_transform.a, src_transform.e))
scale = np.abs(dst_res / src_res)
dst_transform = src_transform * affine.Affine(scale[0], 0, 0, 0, scale[1], 0)
dst_shape = tuple(np.ceil(np.array(src_shape) / scale).astype(int))
return dst_transform, dst_shape
def _get_corner_coordinates(transform, height, width):
"""Get coordinates of all four pixel corners of an image of given transform and shape
Parameters
----------
transform : affine.Affine
image transform
height, width : int
image shape
Returns
-------
ndarray of shape (2, 4, height, width)
x, y corner coordinates
ul, ur, lr, ll
"""
# j index top-first to get bottom-up image with negative transform.e
i = np.arange(width + 1)
j = np.arange(height + 1)[::-1]
jj, ii = np.meshgrid(j, i, indexing='ij')
xx, yy = transform * (ii, jj)
ul = np.stack((xx[:-1, :-1], yy[:-1, :-1]), axis=0)
ur = np.stack((xx[:-1, 1:], yy[:-1, 1:]), axis=0)
lr = np.stack((xx[1:, 1:], yy[1:, 1:]), axis=0)
ll = np.stack((xx[1:, :-1], yy[1:, :-1]), axis=0)
corners = np.zeros((2, 4, height, width))
corners[:, 0, ...] = ul
corners[:, 1, ...] = ur
corners[:, 2, ...] = lr
corners[:, 3, ...] = ll
return corners
def _transform_corners(corners, src_crs, dst_crs):
"""Transform corners from array indices to dst_crs coordinates
Parameters
----------
corners : ndarray shape(2, N, ...) dtype(int)
x,y pairs for N corners
src_crs : dict or rasterio.crs.CRS
source coordinate reference system
dst_crs : dict or rasterio.crs.CRS
destination coordinate reference system
Returns
-------
ndarray, ndarray
projected coordinates
"""
transformer = pyproj.Transformer.from_crs(src_crs, dst_crs, always_xy=True)
xs, ys = corners
xout, yout = transformer.transform(xs, ys)
return xout, yout
def _corners_to_extents(xs, ys):
"""Convert arrays of corner coordinates to an extent record array
Parameters
----------
xs, ys : ndarray shape(N, ...)
x and y coordinates of N corners
Returns
-------
np.recarray shape(...)
xmin, xmax, ymin, ymax
"""
extent_rec = np.core.records.fromarrays(
[
np.min(xs, axis=0),
np.max(xs, axis=0),
np.min(ys, axis=0),
np.max(ys, axis=0)
],
names=['xmin', 'xmax', 'ymin', 'ymax']
)
return extent_rec
def get_projected_extents(transform, height, width, src_crs, dst_crs='epsg:4326'):
"""Get extents of pixels in WGS84 or other projection
Parameters
----------
transform : affine.Affine
image transform
height, width : int
image shape
src_crs : dict or rasterio.crs.CRS
source coordinate reference system
dst_crs : dict or rasterio.crs.CRS
destination coordinate reference system
default: WGS84 (lon, lat)
Returns
-------
np.recarray shape(...)
xmin, xmax, ymin, ymax
"""
corners = _get_corner_coordinates(transform, height, width)
xproj, yproj = _transform_corners(corners, src_crs, dst_crs=dst_crs)
return _corners_to_extents(xproj, yproj)
def bounds_to_projected_extents(left, bottom, right, top, src_crs, dst_crs='epsg:4326'):
"""Get extents record array from bounds
Parameters
----------
left, bottom, right, top : float
extents
src_crs, dst_crs : dict
source and destination coordinate reference systems
Returns
-------
np.recarray shape (1, 1)
with names xmin, xmax, ymin, ymax
"""
transformer = pyproj.Transformer.from_crs(src_crs, dst_crs, always_xy=True)
xs = np.array([left, left, right, right])
ys = np.array([bottom, top, top, bottom])
xproj, yproj = transformer.transform(xs, ys)
return _corners_to_extents(xproj, yproj)[np.newaxis, np.newaxis]
def get_projected_image_extent(transform, height, width, src_crs, dst_crs='epsg:4326'):
"""Get extents of a whole image in WGS84 or other projection
Parameters
----------
transform : affine.Affine
image transform
height, width : int
image shape
src_crs : dict or rasterio.crs.CRS
source coordinate reference system
dst_crs : dict or rasterio.crs.CRS
destination coordinate reference system
default: WGS84 (lon, lat)
Returns
-------
np.recarray shape (1, 1)
with names xmin, xmax, ymin, ymax
"""
left, top = transform * (0, 0)
right, bottom = transform * (height, width)
return bounds_to_projected_extents(
left, bottom, right, top, src_crs, dst_crs=dst_crs)
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
1527,
500,
198,
11748,
12972,
1676,
73,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
651,
62,
83,
3902,
62,
35636,
62,
43358,
7,
10677,
62,
35636,
11,
12351,
62,
43358,
11,
... | 2.374104 | 2,232 |
import numpy as np
import matlab
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
23912,
198
] | 3.3 | 10 |
from tests.test_base import TestBase
from pathlib import Path
from capanno_utils.helpers.get_paths import get_tool_metadata
from capanno_utils.validate import metadata_validator_factory
from capanno_utils.classes.metadata.tool_metadata import ParentToolMetadata, SubtoolMetadata
from capanno_utils.classes.metadata.workflow_metadata import WorkflowMetadata
# def test_validate_workflow_metadata(self):
# validate_workflow_metadata = metadata_validator_factory(WorkflowMetadata)
# metadata_path = Path('/vagrant/capanno/workflows/ENCODE-DCC/chip-seq-pipeline2/v1.6.0/chip-seq-pipeline2-metadata.yaml')
# validate_workflow_metadata(metadata_path)
| [
198,
6738,
5254,
13,
9288,
62,
8692,
1330,
6208,
14881,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
1451,
1236,
78,
62,
26791,
13,
16794,
364,
13,
1136,
62,
6978,
82,
1330,
651,
62,
25981,
62,
38993,
198,
6738,
1451,
1236,
78,
62... | 2.918103 | 232 |
import komand
from .schema import SearchCellsInput, SearchCellsOutput
# Custom imports below
from komand_wigle.util.utils import clear_empty_values
| [
11748,
479,
296,
392,
198,
6738,
764,
15952,
2611,
1330,
11140,
34,
19187,
20560,
11,
11140,
34,
19187,
26410,
198,
198,
2,
8562,
17944,
2174,
198,
6738,
479,
296,
392,
62,
28033,
293,
13,
22602,
13,
26791,
1330,
1598,
62,
28920,
62,
... | 3.409091 | 44 |
import pickle
import tempfile
from collections import defaultdict, Counter
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout, pygraphviz_layout
import os, sys
from synonymes.GeneOntology import GeneOntology
from utils.tmutils import normalize_gene_names
sys.path.insert(0, str(os.path.dirname("/mnt/d/dev/git/poreSTAT/")))
from porestat.utils.DataFrame import DataFrame, DataRow, ExportTYPE
from synonymes.mirnaID import miRNA, miRNAPART, miRNACOMPARISONLEVEL
from textdb.makeNetworkView import DataBasePlotter
from utils.cytoscape_grapher import CytoscapeGrapher
import matplotlib.pyplot as plt
from natsort import natsorted
if __name__ == '__main__':
cellObo = GeneOntology("/mnt/d/owncloud/data/miRExplore/obodir/meta_cells.obo")
cellTypeName2Terms = {
"EC": ["META:52"],
"MC": ["META:148", "META:99"],
"FC": ["CL:0000891"],
"SMC": ["META:83"],
}
cellType2AccTerms = {}
for cellT in cellTypeName2Terms:
cellType2AccTerms[cellT] = set()
for et in cellTypeName2Terms[cellT]:
oboT = cellObo.getID(et)
if oboT != None:
cellType2AccTerms[cellT].add(et)
for x in oboT.getAllChildren():
cellType2AccTerms[cellT].add(x.termid)
print(cellT, x.term.name)
else:
print("No such obo term:", et)
for ct in cellType2AccTerms:
print(ct, len(cellType2AccTerms[ct]))
networks = {}
# endothelial cell activation
targetMirsECA = [
'miR-21',
'miR-92a',
'miR-217',
'miR-663',
'miR-712',
'miR-7g',
'let-7g',
'miR-10a',
'miR-17-3p',
'miR-31',
'miR-124a',
'miR-125',
'miR-126',
'miR-126-5p',
'miR-143',
'miR-145',
'miR-146',
'miR-155',
'miR-181b',
'miR-221',
'miR-222']
networks['targetMirsECA'] = targetMirsECA
# monocyte
targetMirsMonocyte = [
'miR-222',
'miR-323',
'miR-503',
'miR-125b',
'miR-155',
'miR-342-5p',
'miR-17',
'miR-20a',
'miR-106a',
'miR-9',
'miR-21',
'miR-124',
'miR-125a-5p',
'miR-146a',
'miR-146b',
'miR-147',
'miR-223']
networks['targetMirsMonocyte'] = targetMirsMonocyte
# foam cell formation
targetMirsFCF = [
'miR-9',
'miR-125a-5p',
'miR-146a-5p',
'miR-155'
]
networks['targetMirsFCF'] = targetMirsFCF
# Angiogenesis
targetMirsAngio = [
'let-7f',
'miR-7f',
'miR-23',
'miR-24',
'miR-27',
'miR-126',
'miR-130a',
'miR-132',
'miR-150',
'miR-210',
'miR-218',
'miR-378',
'miR-15b',
'miR-16',
'miR-20a',
'miR-21',
'miR-26a',
'miR-17',
'miR-92',
'miR-100',
'miR-200',
'miR-221',
'miR-222',
'miR-223']
networks['targetMirsAngio'] = targetMirsAngio
# Vascular remodeling
targetMirsVasRemod = [
'miR-21',
'miR-155',
'miR-222',
'miR-126',
'miR-143',
'miR-145']
networks['targetMirsVasRemod'] = targetMirsVasRemod
# T - cell differentiation and activation
targetMirsTCell = [
'miR-17',
'miR-92',
'miR-146a',
'miR-155',
'miR-182',
'miR-326',
'miR-125b',
'miR-181a']
networks['targetMirsTCell'] = targetMirsTCell
# Cholestrol efflux
targetMirsCholEfflux = [
'miR-10b',
'miR-26',
'miR-27',
'miR-33a',
'miR-106b',
'miR-144',
'miR-145',
'miR-155',
'miR-302a',
'miR-758',
'miR-223',
'miR-378']
networks['targetMirsCholEfflux'] = targetMirsCholEfflux
# SMC proliferation / migration
targetMirsSMCProlif = [
'miR-24',
'miR-26a',
'miR-31',
'miR-146a',
'miR-155',
'miR-208',
'miR-221',
'miR-222',
'miR-7d',
'let-7d',
'miR-1',
'miR-10a',
'miR-21',
'miR-29',
'miR-100',
'miR-132',
'miR-133',
'miR-143',
'miR-145',
'miR-195',
'miR-204',
'miR-424',
'miR-638',
'miR-663']
networks['targetMirsSMCProlif'] = targetMirsSMCProlif
summaryDF = DataFrame()
summaryDF.addColumns(["Network", "Accepted miRNAs", 'Additional miRNAs', "Missing miRNAs"])
networkGraphs = {}
makeStory = [
]
allNetworks = [x for x in networks]
print(allNetworks)
#exit()
ignoreNetworks = []
networkRestrictions = {
'targetMirsECA': {
"cells": [
{"group": "cells", "name": "endothelial cell", "termid": "META:52"}
]
#, "go": [{"group": "go", "name": "", "termid": "GO:0006915"},{"group": "go", "name": "", "termid": "GO:0001775"},{"group": "go", "name": "", "termid": "GO:0006954"}]
},
'targetMirsMonocyte': {
"cells": [
{"group": "cells", "name": "monocyte", "termid": "META:148"},
{"group": "cells", "name": "macrophage", "termid": "META:99"}
]
#, "go": [{"group": "go", "name": "", "termid": "GO:0030224"}, {"group": "go", "name": "", "termid": "GO:0042116"}]
},
'targetMirsFCF': {
"cells": [{"group": "cells", "name": "foam cell", "termid": "CL:0000891"}]
#, "go": [{"group": "go", "name": "", "termid": "GO:0090077"}]
},
'targetMirsAngio': {
#"cells": [{"group": "cells", "name": "blood vessel", "termid": "UBERON:0001981"}, {"group": "cells", "name": "blood vessel elastic tissue", "termid": "UBERON:0003614"} , {"group": "cells", "name": "arterial blood vessel", "termid": "UBERON:0003509"}],
"go": [{"group": "go", "name": "angiogenesis", "termid": "GO:0001525"}]
},
'targetMirsVasRemod': {
#"disease": [], #{"group": "disease", "name": "vascular disease", "termid": "DOID:178"}
"go": [
{"group": "go", "name": "tissue remodeling", "termid": "GO:0048771"},
{"group": "go", "name": "regulation of tissue remodeling", "termid": "GO:0034103"},
{"group": "go", "name": "regulation of blood vessel remodeling", "termid": "GO:0060312"}
]
}
,'targetMirsTCell': {
"cells": [{"group": "cells", "name": "T cell", "termid": "META:44"}],
#,"go": [{"group": "go", "name": "", "termid": "GO:0030217"}]
},
'targetMirsCholEfflux': {
"cells": [{"group": "cells", "name": "foam cell", "termid": "CL:0000891"}]
#,"go": [{"group": "go", "name": "", "termid": "GO:0033344"}]
},
'targetMirsSMCProlif': {
"cells": [{"group": "cells", "name": "smooth muscle cell", "termid": "META:83"}]
#,"go": [{"group": "go", "name": "", "termid": "GO:0048659"},{"group": "go", "name": "", "termid": "GO:0014909"}]
}
}
networkToTitle = {
"targetMirsECA": "Endothelial cell activation\\\\and inflammation",
"targetMirsMonocyte": "Monocyte differentiation\\\\Macrophage activation",
"targetMirsFCF": "Foam cell formation",
"targetMirsAngio": "Angiogenesis",
"targetMirsVasRemod": "Vascular remodeling",
"targetMirsTCell": "T-cell differentiation\\\\and activation",
"targetMirsCholEfflux": "Cholesterol efflux",
"targetMirsSMCProlif": "SMC proliferation\\\\SMC migration"
}
restrictDF = DataFrame()
restrictDF.addColumns(["Network", "Cells", "Disease", "Other"], "")
for x in networkRestrictions:
nrestricts = defaultdict(list)
for rt in networkRestrictions[x]:
nrestricts[rt] = networkRestrictions[x][rt]
nrestricts['disease'] += [{'group':'disease', 'termid': 'DOID:1936', 'name': 'atherosclerosis'}]
restricts = nrestricts
networkDRdict = defaultdict(str)
networkDRdict["Network"] = networkToTitle[x]
diseaseElems = []
cellElems = []
otherElems = []
for restrictType in restricts:
if restrictType == "sentences":
continue
if restrictType in ["disease"]:
for elem in restricts[restrictType]:
diseaseElems.append( elem['name'] + " ("+elem['termid']+")")
elif restrictType in ["cells"]:
for elem in restricts[restrictType]:
cellElems.append( elem['name'] + " ("+elem['termid']+")")
else:
for elem in restricts[restrictType]:
otherElems.append( elem['name'] + " ("+elem['termid']+")")
networkDRdict['Cells'] = "\makecell[l]{" +"\\\\".join(sorted(cellElems)) + "}"
networkDRdict['Disease'] = "\makecell[l]{" + "\\\\".join(sorted(diseaseElems)) + "}"
networkDRdict['Other'] = "\makecell[l]{" + "\\\\".join(sorted(otherElems)) + "}"
dr = DataRow.fromDict(networkDRdict)
restrictDF.addRow(dr)
print(restrictDF._makeLatex())
#exit()
allMissing = {}
figidx = 0
mirna2cellOut = open("/mnt/d/yanc_network/important_process.txt", 'w')
for network in networks:
figidx+= 1
networkGraph = nx.Graph()
if network in ignoreNetworks:
continue
interactions = defaultdict(set)
acceptedInteractions = defaultdict(set)
typeByGene = defaultdict(lambda: Counter())
elemsByGene = defaultdict(lambda: defaultdict(set))
allMirna = set(networks[network])
miStr2mirna = {}
allTargetMirna = []
mirnaObj2str = {}
mirna2evs = defaultdict(set)
newAllMirna = set()
for x in allMirna:
try:
oMirna = miRNA(x)
allTargetMirna.append( oMirna )
miStr = oMirna.getStringFromParts([miRNAPART.MATURE, miRNAPART.ID, miRNAPART.PRECURSOR])
miStr2mirna[miStr] = oMirna
mirnaObj2str[oMirna] = miStr
newAllMirna.add( miStr )
except:
pass
allMirna = newAllMirna
#allMirna = set([str(x) for x in allTargetMirna])
requestData = None
if network in networkRestrictions:
requestData = networkRestrictions[network]
else:
requestData = {
'sentences': "false",
}
requestData['sentences'] = "false"
requestData["mirna"]= list(allMirna)
print(allMirna)
if not 'disease' in requestData and not network in ['targetMirsVasRemod']:
requestData['disease'] = [{'group': 'disease', 'termid': 'DOID:1936', 'name': 'atherosclerosis'}]#[{'group': 'disease', 'termid': 'DOID:1287', 'name': 'cardiovascular system disease'},{'group': 'disease', 'termid': 'DOID:2349', 'name': 'arteriosclerosis'}]
#requestData['disease'] += [
# {'group': 'disease', 'termid': 'DOID:1287', 'name': 'cardiovascular system disease'},
# {'group': 'disease', 'termid': 'DOID:2349', 'name': 'arteriosclerosis'}
# ]
print(requestData)
graph, nodeCounter, edge2datasourceCount, jsonRes = DataBasePlotter.fetchGenes(requestData)
print(len(jsonRes['rels']))
htmlDF = DataFrame()
htmlDF.addColumns(['gene rel', 'gene', 'miRNA Group', 'miRNA', 'Original Network', 'PubMed', 'MIRECORD', 'MIRTARBASE', 'DIANA', 'Disease', 'Cells', 'GO'])
for rel in jsonRes['rels']:
orderedEdge = [None, None]
if rel['ltype'] == "gene":
orderedEdge[0] = rel['lid']
elif rel['ltype'] == "mirna":
orderedEdge[1] = rel['lid']
if rel['rtype'] == "gene":
orderedEdge[0] = rel['rid']
elif rel['rtype'] == "mirna":
orderedEdge[1] = rel['rid']
orderedEdges = set()
if orderedEdge[1].startswith("microRNAS"):
continue
wasAccepted = False
"""
for tMirna in allTargetMirna:
if tMirna.accept(orderedEdge[1]):
wasAccepted = True
orderedEdges.add(
(orderedEdge[0], str(tMirna))
)
"""
if not wasAccepted:
orderedEdges.add(tuple(orderedEdge))
for oEdge in orderedEdges:
origEdge = tuple(oEdge)
edgeStatus = None
oEdge = list(oEdge)
wasFound = False
for miObj in mirnaObj2str:
if miObj.accept(oEdge[1], compLevel=miRNACOMPARISONLEVEL.PRECURSOR):
oEdge[1] = mirnaObj2str[miObj]
wasFound = True
break
if not wasFound:
try:
miObj = miRNA(oEdge[1])
miStr = miObj.getStringFromParts([miRNAPART.MATURE, miRNAPART.ID, miRNAPART.PRECURSOR])
miRNA(miStr)
oEdge[1] = miStr
except:
print("Could not read/load", oEdge, miStr)
continue
allGeneMirna = interactions[oEdge[0]]
miAccepted = False
allAcceptedStr = set()
for strMirna in allGeneMirna:
miObj = miStr2mirna.get(strMirna, None)
if miObj == None:
continue
miObjAccepts = miObj.accept(oEdge[1], compLevel=miRNACOMPARISONLEVEL.PRECURSOR)
miAccepted = miAccepted or miObjAccepts
if miObjAccepts:
acceptedInteractions[oEdge[0]].add(strMirna)
edgeStatus = "accepted"
allAcceptedStr.add(strMirna)
#print(oEdge[0], oEdge[1], strMirna)
if not miAccepted:
edgeStatus = "additional"
networkGraph.add_edge(oEdge[0], oEdge[1], color= 'g' if edgeStatus == "accepted" else "b")
typeByGene[oEdge[0]][edgeStatus] += 1
elemsByGene[oEdge[0]][edgeStatus].add(oEdge[1])
objMirna = miRNA(oEdge[1])
pmidEvs = set()
mirtarbaseEvs = set()
mirecordsEvs = set()
dianaEvs = set()
docDiseases = set()
docCells = set()
docGOs = set()
for ev in rel['evidences']:
docid = ev['docid']
mirna2evs[oEdge[1]].add(docid)
disEvs = jsonRes['pmidinfo'].get('disease', {}).get(docid, {})
for disEv in disEvs:
did = disEv['termid']
dname = disEv['termname']
docDiseases.add((dname, did, docid))
cellEvs = jsonRes['pmidinfo'].get('cells', {}).get(docid, {})
for cellEv in cellEvs:
did = cellEv['termid']
dname = cellEv['termname']
for ct in cellType2AccTerms:
ctTerms = cellType2AccTerms[ct]
if did in ctTerms:
print(network, oEdge[0], oEdge[1], ct, docid, sep="\t", file=mirna2cellOut)
docCells.add((dname, did, docid))
goEvs = jsonRes['pmidinfo'].get('go', {}).get(docid, {})
for goEv in goEvs:
did = goEv['termid']
dname = goEv['termname']
docGOs.add((dname, did, docid))
if ev['data_source'] == "DIANA":
dianaEvs.add( (ev['method'], ev['direction']) )
elif ev['data_source'] == "miRTarBase":
mirtarbaseEvs.add(
(ev['data_id'], ",".join(ev['exp_support']), ev['functional_type'], ev['docid'])
)
elif ev['data_source'] == "pmid":
pmidEvs.add((ev['docid'],))
elif ev['data_source'] == "mirecords":
mirecordsEvs.add((ev['docid']))
else:
print("Unhandled data source", ev['data_source'])
dianaLink = "http://carolina.imis.athena-innovation.gr/diana_tools/web/index.php?r=tarbasev8%2Findex&miRNAs%5B%5D=&genes%5B%5D={geneCap}&genes%5B%5D={geneLow}&sources%5B%5D=1&sources%5B%5D=7&sources%5B%5D=9&publication_year=&prediction_score=&sort_field=&sort_type=&query=1".format(
geneCap=oEdge[0].upper(), geneLow=oEdge[1].upper())
pmidStr = "<br/>".join(
[
"<a href=\"https://www.ncbi.nlm.nih.gov/pubmed/{pmid}\" target=\"_blank\">{pmid}</a>".format(pmid=elem[0]) for elem in pmidEvs
]
)
mirtarbaseStr = "<br/>".join(
[
"<a href=\"http://mirtarbase.mbc.nctu.edu.tw/php/detail.php?mirtid={mtbid}\">{mtbid}</a>".format(mtbid=elem[0]) for elem in mirtarbaseEvs
]
)
mirecordStr = "<br/>".join(
[
"<a href=\"https://www.ncbi.nlm.nih.gov/pubmed/{pmid}\" target=\"_blank\">{pmid}</a>".format(pmid=elem[0]) for elem in mirecordsEvs
]
)
dianaStr = "<br/>".join(
[
"{method} {direction}".format(method=elem[0], direction=elem[1]) for elem in dianaEvs
]
)
goStr = "<br/>".join(
[
"{method} ({direction}, {docid})".format(method=elem[0], direction=elem[1], docid=elem[2]) for
elem
in docGOs
]
)
cellStr = "<br/>".join(
[
"{method} ({direction}, {docid})".format(method=elem[0], direction=elem[1], docid=elem[2]) for
elem
in docCells
]
)
diseaseStr = "<br/>".join(
[
"{method} ({direction}, {docid})".format(method=elem[0], direction=elem[1], docid=elem[2]) for
elem
in docDiseases
]
)
addRow = {
'gene rel': oEdge[0] + "<br/>" + oEdge[1],
'gene': oEdge[0],
'miRNA Group': objMirna.getStringFromParts([miRNAPART.MATURE, miRNAPART.ID, miRNAPART.PRECURSOR]),
'miRNA': "<br/>".join(allAcceptedStr),
'Original Network': "{edgestate}</br>".format(edgestate=edgeStatus) +
"<a href=\"https://www.ncbi.nlm.nih.gov/pubmed/?term={miRes}+{miShort}\">Search PUBMED</a>".format(miRes=oEdge[1], miShort=objMirna.getStringFromParts([miRNAPART.MATURE, miRNAPART.ID]))+
"</br><a href=\"{dianaLink}\">Search DIANA</a>".format(dianaLink=dianaLink)
,
'PubMed': pmidStr,
'MIRECORD': mirecordStr,
'MIRTARBASE': mirtarbaseStr,
'DIANA': dianaStr,
'Disease': diseaseStr,
'Cells': cellStr,
'GO': goStr
}
row = DataRow.fromDict(addRow)
htmlDF.addRow(row)
for gene in interactions:
for mirna in interactions[gene]:
edgeWasFound = mirna in acceptedInteractions[gene]
if edgeWasFound:
continue
edgeStatus = "missing"
networkGraph.add_edge(gene, mirna, color='r')
typeByGene[gene][edgeStatus] += 1
elemsByGene[gene][edgeStatus].add(mirna)
objMirna = miRNA(mirna)
dianaLink = "http://carolina.imis.athena-innovation.gr/diana_tools/web/index.php?r=tarbasev8%2Findex&miRNAs%5B%5D=&genes%5B%5D={geneCap}&genes%5B%5D={geneLow}&sources%5B%5D=1&sources%5B%5D=7&sources%5B%5D=9&publication_year=&prediction_score=&sort_field=&sort_type=&query=1".format(
geneCap=gene.upper(), geneLow=gene.upper())
addRow = {
'gene rel': gene,
'gene': gene,
'miRNA Group': objMirna.getStringFromParts([miRNAPART.MATURE, miRNAPART.ID, miRNAPART.PRECURSOR]),
'miRNA': mirna,
'Original Network': "{edgestate}</br>".format(edgestate=edgeStatus) +
"<a href=\"https://www.ncbi.nlm.nih.gov/pubmed/?term={gene} {miRes}+{miShort}\">Search PUBMED</a>".format(gene=gene,miRes=mirna, miShort=objMirna.getStringFromParts([miRNAPART.MATURE, miRNAPART.ID]))+
"</br><a href=\"{dianaLink}\">Search DIANA</a>".format(dianaLink=dianaLink)
,
'PubMed': "",
'MIRECORD': "",
'MIRTARBASE': "",
'DIANA': ""
}
row = DataRow.fromDict(addRow)
htmlDF.addRow(row)
elemsByMirna = defaultdict(set)
for gene in elemsByGene:
for expMirna in allMirna:
for category in elemsByGene[gene]:
if category == 'missing':
continue
for foundMirna in elemsByGene[gene][category]:
elemsByMirna[foundMirna].add(gene)
foundMirnas = set([x for x in elemsByMirna])
minEvs = 1
while True:
addMirnas = [x for x in foundMirnas.difference(allMirna) if len(mirna2evs[x]) >= minEvs]
if len(addMirnas) > 50:
minEvs += 1
else:
break
print(network)
print("Found Mirnas", len(foundMirnas), list(foundMirnas))
print("Expected Mirnas", len(allMirna), list(allMirna))
print("Intersected Mirnas", len(foundMirnas.intersection(allMirna)), list(foundMirnas.intersection(allMirna)))
print("Missing Mirnas", len(allMirna.difference(foundMirnas)), allMirna.difference(foundMirnas))
print("Additional Mirnas", len(foundMirnas.difference(allMirna)), foundMirnas.difference(allMirna))
print("Additional Mirnas filtered", len(addMirnas))
print("Filter level", minEvs)
allMissing[network] = allMirna.difference(foundMirnas)
rowDict = {}
rowDict['Network'] = "\makecell[l]{"+networkToTitle[network] + "\\\\(min evidences: "+str(minEvs) + ", additionals: "+str(len(foundMirnas.difference(allMirna)))+")" + "}"
rowDict['Accepted miRNAs'] = "\makecell[l]{" +"\\\\".join(natsorted(foundMirnas.intersection(allMirna), key=lambda x: x.split("-")[1])) + "}"
rowDict['Additional miRNAs'] = "\makecell[l]{" + "\\\\".join(natsorted(addMirnas, key=lambda x: x.split("-")[1])) + "}"
rowDict['Missing miRNAs'] = "\makecell[l]{" + "\\\\".join(natsorted(allMirna.difference(foundMirnas), key=lambda x: x.split("-")[1])) + "}"
newRow = DataRow.fromDict(rowDict)
#["Network", "Accepted miRNAs", "Missing miRNAs"]
summaryDF.addRow( newRow )
if False:
print(network)
for gene in sorted([x for x in typeByGene]):
print(gene, typeByGene[gene], elemsByGene[gene]['missing'])
print()
print()
print(network)
for gene in sorted([x for x in typeByGene]):
print("Gene:", gene, "Status: ", ", ".join([": ".join([x, str(typeByGene[gene][x])]) for x in typeByGene[gene]]), "Missing miRNAs: "+",".join(elemsByGene[gene]['missing']))
print()
print()
print()
print()
networkGraphs[network] = networkGraph
htmlDF.export("/mnt/d/yanc_network/" + network.replace(" ", "_") + ".html", ExportTYPE.HTML)
htmlDF.export("/mnt/d/yanc_network/" + network.replace(" ", "_") + ".tsv", ExportTYPE.TSV)
figidx = 0
for stages in makeStory:
mergedGraph = networkGraphs[stages[0]]
for i in range(1, len(stages)):
mergedGraph = nx.compose(mergedGraph, networkGraphs[stages[i]])
hasLargeStage = any(['large' in stage for stage in stages])
pos = nx.spring_layout(mergedGraph)
for stage in stages:
networkGraph = networkGraphs[stage]
edges = networkGraph.edges()
colors = [networkGraph[u][v]['color'] for u, v in edges]
d = nx.degree(networkGraph)
nodes = networkGraph.nodes()
nodeColors = []
nodeSizes = []
allSizes = [x[1] for x in d]
minSize = min(allSizes)
maxSize = max(allSizes)
diffSize = maxSize-minSize
fontSize = 16
minNodeSize = 1200
figSize = (20, 14)
edgeWidth = 3
if hasLargeStage:
fontSize = 8
minNodeSize = 100
figSize = (20,30)
edgeWidth = 0.75
plt.figure(figidx, figsize=figSize)
figidx += 1
maxNodeSize = 3000
diffNodeSize = maxNodeSize-minNodeSize
nodeList = []
for x in nodes:
if any([x.lower().startswith(y) for y in ['mir', 'let']]):
nodeColors.append('blue')
else:
nodeColors.append('green')
nodeDegree = d[x]
nodeDegree -= minSize
nodeDegree = nodeDegree / diffSize
nodeSize = minNodeSize + diffNodeSize * nodeDegree
nodeSizes.append( nodeSize )
nodeList.append(x)
nx.draw(networkGraph, pos, font_size=fontSize, with_labels=False, node_color=nodeColors, edges=edges, edge_color=colors, nodelist=nodeList, node_size=nodeSizes, width=edgeWidth, font_weight='bold', dpi=1000)
for p in pos: # raise text positions
clist = list(pos[p])
if p in nodeList:
if nodeSizes[nodeList.index(p)] < 1000:
clist[1] = clist[1] + 0.005
else:
clist[1] = clist[1] + 0.02
pos[p] = tuple(clist)
nx.draw_networkx_labels(networkGraph, pos, font_weight='bold', font_size=fontSize)
plt.suptitle(stage)
plt.savefig("/mnt/d/yanc_network/" + stage.replace(" ", "_") + ".png")
plt.savefig("/mnt/d/yanc_network/" + stage.replace(" ", "_") + ".pdf")
#plt.show()
print(summaryDF._makeLatex())
for x in allMissing:
for mirna in allMissing[x]:
print(x, mirna)
print()
print()
| [
11748,
2298,
293,
201,
198,
11748,
20218,
7753,
201,
198,
201,
198,
6738,
17268,
1330,
4277,
11600,
11,
15034,
201,
198,
11748,
3127,
87,
355,
299,
87,
201,
198,
201,
198,
6738,
3127,
87,
13,
19334,
278,
13,
77,
87,
62,
6111,
1330,
... | 1.743185 | 16,545 |
# MenuTitle: Centers all components in the middle of the layer.
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
# Ricard Garcia (@Typerepublic) - 28.12.2020
# ------------------------------------------
__doc__="""
From all selected layers, centers all components in the middle of the layer.
"""
# Clearing Macro Panel
Glyphs.clearLog()
# ---------------------
# Modules
# ---------------------
from Foundation import NSMidX
# ---------------------
# Variables
# ---------------------
f = Glyphs.font
# ---------------------
# Engine
# ---------------------
for l in f.selectedLayers:
for c in l.components:
compPosition = c.position
compPosition.x += l.width/2.0 - NSMidX(c.bounds)
c.position = compPosition
# ---------------------
# Test
# ---------------------
print("Done!")
| [
2,
21860,
19160,
25,
22223,
477,
6805,
287,
262,
3504,
286,
262,
7679,
13,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
87... | 3.434959 | 246 |
from httpx import AsyncClient
import pytest
from pymusas_web_api.server import SpacyToken, SupportedLanguages, app
@pytest.fixture
@pytest.mark.anyio
@pytest.mark.anyio
| [
6738,
2638,
87,
1330,
1081,
13361,
11792,
198,
11748,
12972,
9288,
198,
198,
6738,
279,
4948,
385,
292,
62,
12384,
62,
15042,
13,
15388,
1330,
1338,
1590,
30642,
11,
36848,
43,
33213,
11,
598,
628,
198,
31,
9078,
9288,
13,
69,
9602,
... | 2.793651 | 63 |
from heapq import heappop, heappush
sol = Solution0378()
matrix = [[1,5,9],[10,11,13],[12,13,15]]
k = 8
res = sol.kthSmallest(matrix, k)
print(res)
| [
6738,
24575,
80,
1330,
339,
1324,
404,
11,
339,
1324,
1530,
628,
198,
34453,
796,
28186,
15,
30695,
3419,
198,
6759,
8609,
796,
16410,
16,
11,
20,
11,
24,
38430,
940,
11,
1157,
11,
1485,
38430,
1065,
11,
1485,
11,
1314,
11907,
198,
... | 2.106667 | 75 |
"""Test WebSocket Connection class."""
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api import const
async def test_send_big_result(hass, websocket_client):
"""Test sending big results over the WS."""
@websocket_api.websocket_command({"type": "big_result"})
@websocket_api.async_response
hass.components.websocket_api.async_register_command(send_big_result)
await websocket_client.send_json({"id": 5, "type": "big_result"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == {"big": "result"}
| [
37811,
14402,
5313,
39105,
26923,
1398,
526,
15931,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
1330,
2639,
5459,
62,
15042,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
13,
732,
1443,
5459,
62,
15042,
1330,
1500,
628,
198,
292,
13361,... | 2.858333 | 240 |
#!/usr/bin/env python
"""
Contour panel of ncvue.
The panel allows plotting contour or mesh plots of 2D-variables.
This module was written by Matthias Cuntz while at Institut National de
Recherche pour l'Agriculture, l'Alimentation et l'Environnement (INRAE), Nancy,
France.
Copyright (c) 2020-2021 Matthias Cuntz - mc (at) macu (dot) de
Released under the MIT License; see LICENSE file for details.
History:
* Written Nov-Dec 2020 by Matthias Cuntz (mc (at) macu (dot) de)
* Open new netcdf file, communicate via top widget, Jan 2021, Matthias Cuntz
* Write coordinates and value on bottom of plotting canvas,
May 2021, Matthias Cuntz
.. moduleauthor:: Matthias Cuntz
The following classes are provided:
.. autosummary::
ncvContour
"""
from __future__ import absolute_import, division, print_function
import sys
import tkinter as tk
try:
import tkinter.ttk as ttk
except Exception:
print('Using the themed widget set introduced in Tk 8.5.')
sys.exit()
from tkinter import filedialog
import os
import numpy as np
import netCDF4 as nc
from .ncvutils import clone_ncvmain, format_coord_contour
from .ncvutils import set_axis_label, vardim2var
from .ncvmethods import analyse_netcdf, get_slice_miss
from .ncvmethods import set_dim_x, set_dim_y, set_dim_z
from .ncvwidgets import add_checkbutton, add_combobox, add_entry, add_imagemenu
from .ncvwidgets import add_spinbox, add_tooltip
import matplotlib
# matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
# plt.style.use('seaborn-darkgrid')
plt.style.use('seaborn-dark')
# plt.style.use('fast')
__all__ = ['ncvContour']
class ncvContour(ttk.Frame):
"""
Panel for contour plots.
Sets up the layout with the figure canvas, variable selectors, dimension
spinboxes, and options in __init__.
Contains various commands that manage what will be drawn or redrawn if
something is selected, changed, checked, etc.
"""
#
# Panel setup
#
#
# Bindings
#
def checked(self):
"""
Command called if any checkbutton was checked or unchecked.
Redraws plot.
"""
self.redraw()
def entered_z(self, event):
"""
Command called if values for `zmin`/`zmax` were entered.
Triggering `event` was bound to entry.
Redraws plot.
"""
self.redraw()
def next_z(self):
"""
Command called if next button for the plotting variable was pressed.
Resets `zmin`/`zmax` and z-dimensions, resets `x` and `y` variables
as well as their options and dimensions. Redraws plot.
"""
z = self.z.get()
cols = self.z["values"]
idx = cols.index(z)
idx += 1
if idx < len(cols):
self.z.set(cols[idx])
self.zmin.set('None')
self.zmax.set('None')
set_dim_z(self)
self.x.set('')
self.y.set('')
self.inv_x.set(0)
self.inv_y.set(0)
set_dim_x(self)
set_dim_y(self)
self.redraw()
def prev_z(self):
"""
Command called if previous button for the plotting variable was
pressed.
Resets `zmin`/`zmax` and z-dimensions, resets `x` and `y` variables
as well as their options and dimensions. Redraws plot.
"""
z = self.z.get()
cols = self.z["values"]
idx = cols.index(z)
idx -= 1
if idx > 0:
self.z.set(cols[idx])
self.zmin.set('None')
self.zmax.set('None')
set_dim_z(self)
self.x.set('')
self.y.set('')
self.inv_x.set(0)
self.inv_y.set(0)
set_dim_x(self)
set_dim_y(self)
self.redraw()
def newnetcdf(self):
"""
Open a new netcdf file and connect it to top.
"""
# get new netcdf file name
ncfile = filedialog.askopenfilename(
parent=self, title='Choose netcdf file', multiple=False)
if ncfile:
# close old netcdf file
if self.top.fi:
self.top.fi.close()
# reset empty defaults of top
self.top.dunlim = '' # name of unlimited dimension
self.top.time = None # datetime variable
self.top.tname = '' # datetime variable name
self.top.tvar = '' # datetime variable name in netcdf
self.top.dtime = None # decimal year
self.top.latvar = '' # name of latitude variable
self.top.lonvar = '' # name of longitude variable
self.top.latdim = '' # name of latitude dimension
self.top.londim = '' # name of longitude dimension
self.top.maxdim = 0 # maximum num of dims of all variables
self.top.cols = [] # variable list
# open new netcdf file
self.top.fi = nc.Dataset(ncfile, 'r')
analyse_netcdf(self.top)
# reset panel
self.reinit()
self.redraw()
def selected_cmap(self, value):
"""
Command called if cmap was chosen from menu.
`value` is the chosen colormap.
Sets text and image on the menubutton.
"""
self.cmap['text'] = value
self.cmap['image'] = self.imaps[self.cmaps.index(value)]
self.redraw()
def selected_x(self, event):
"""
Command called if x-variable was selected with combobox.
Triggering `event` was bound to the combobox.
Resets `x` options and dimensions. Redraws plot.
"""
self.inv_x.set(0)
set_dim_x(self)
self.redraw()
def selected_y(self, event):
"""
Command called if y-variable was selected with combobox.
Triggering `event` was bound to the combobox.
Resets `y` options and dimensions. Redraws plot.
"""
self.inv_y.set(0)
set_dim_y(self)
self.redraw()
def selected_z(self, event):
"""
Command called if plotting variable was selected with combobox.
Triggering `event` was bound to the combobox.
Resets `zmin`/`zmax` and z-dimensions, resets `x` and `y` variables
as well as their options and dimensions. Redraws plot.
"""
self.x.set('')
self.y.set('')
self.inv_x.set(0)
self.inv_y.set(0)
self.zmin.set('None')
self.zmax.set('None')
set_dim_x(self)
set_dim_y(self)
set_dim_z(self)
self.redraw()
def spinned_x(self, event=None):
"""
Command called if spinbox of x-dimensions was changed.
Triggering `event` was bound to the spinbox.
Redraws plot.
"""
self.redraw()
def spinned_y(self, event=None):
"""
Command called if spinbox of y-dimensions was changed.
Triggering `event` was bound to the spinbox.
Redraws plot.
"""
self.redraw()
def spinned_z(self, event=None):
"""
Command called if spinbox of z-dimensions was changed.
Triggering `event` was bound to the spinbox.
Redraws plot.
"""
self.redraw()
#
# Methods
#
def reinit(self):
"""
Reinitialise the panel from top.
"""
# reinit from top
self.fi = self.top.fi
self.miss = self.top.miss
self.dunlim = self.top.dunlim
self.time = self.top.time
self.tname = self.top.tname
self.tvar = self.top.tvar
self.dtime = self.top.dtime
self.latvar = self.top.latvar
self.lonvar = self.top.lonvar
self.latdim = self.top.latdim
self.londim = self.top.londim
self.maxdim = self.top.maxdim
self.cols = self.top.cols
# reset dimensions
for ll in self.zdlbl:
ll.destroy()
for ll in self.zd:
ll.destroy()
self.zdlblval = []
self.zdlbl = []
self.zdval = []
self.zd = []
self.zdtip = []
for i in range(self.maxdim):
zdlblval, zdlbl, zdval, zd, zdtip = add_spinbox(
self.rowzd, label=str(i), values=(0,), wrap=True,
command=self.spinned_z, state=tk.DISABLED, tooltip="None")
self.zdlblval.append(zdlblval)
self.zdlbl.append(zdlbl)
self.zdval.append(zdval)
self.zd.append(zd)
self.zdtip.append(zdtip)
for ll in self.xdlbl:
ll.destroy()
for ll in self.xd:
ll.destroy()
self.xdlblval = []
self.xdlbl = []
self.xdval = []
self.xd = []
self.xdtip = []
for i in range(self.maxdim):
xdlblval, xdlbl, xdval, xd, xdtip = add_spinbox(
self.rowxd, label=str(i), values=(0,), wrap=True,
command=self.spinned_x, state=tk.DISABLED, tooltip="None")
self.xdlblval.append(xdlblval)
self.xdlbl.append(xdlbl)
self.xdval.append(xdval)
self.xd.append(xd)
self.xdtip.append(xdtip)
for ll in self.ydlbl:
ll.destroy()
for ll in self.yd:
ll.destroy()
self.ydlblval = []
self.ydlbl = []
self.ydval = []
self.yd = []
self.ydtip = []
for i in range(self.maxdim):
ydlblval, ydlbl, ydval, yd, ydtip = add_spinbox(
self.rowyd, label=str(i), values=(0,), wrap=True,
command=self.spinned_y, state=tk.DISABLED, tooltip="None")
self.ydlblval.append(ydlblval)
self.ydlbl.append(ydlbl)
self.ydval.append(ydval)
self.yd.append(yd)
self.ydtip.append(ydtip)
# set variables
columns = [''] + self.cols
self.z['values'] = columns
self.z.set(columns[0])
self.zmin.set('None')
self.zmax.set('None')
self.x['values'] = columns
self.x.set(columns[0])
self.y['values'] = columns
self.y.set(columns[0])
#
# Plotting
#
def redraw(self):
"""
Redraws the plot.
Reads `x`, `y`, `z` variable names, the current settings of
their dimension spinboxes, as well as all other plotting options.
Then redraws the plot.
"""
# get all states
# rowz
z = self.z.get()
trans_z = self.trans_z.get()
zmin = self.zmin.get()
if zmin == 'None':
zmin = None
else:
zmin = float(zmin)
zmax = self.zmax.get()
if zmax == 'None':
zmax = None
else:
zmax = float(zmax)
# rowxy
x = self.x.get()
y = self.y.get()
inv_x = self.inv_x.get()
inv_y = self.inv_y.get()
# rowcmap
cmap = self.cmap['text']
rev_cmap = self.rev_cmap.get()
mesh = self.mesh.get()
grid = self.grid.get()
# Clear figure instead of axes because colorbar is on figure
# Have to add axes again.
self.figure.clear()
self.axes = self.figure.add_subplot(111)
xlim = [None, None]
ylim = [None, None]
# set x, y, axes labels
vx = 'None'
vy = 'None'
vz = 'None'
if (z != ''):
# z axis
vz = vardim2var(z)
if vz == self.tname:
# should throw an error later
if mesh:
zz = self.dtime
zlab = 'Year'
else:
zz = self.time
zlab = 'Date'
else:
zz = self.fi.variables[vz]
zlab = set_axis_label(zz)
zz = get_slice_miss(self, self.zd, zz)
# both contourf and pcolormesh assume (row,col),
# so transpose by default
if not trans_z:
zz = zz.T
if (y != ''):
# y axis
vy = vardim2var(y)
if vy == self.tname:
if mesh:
yy = self.dtime
ylab = 'Year'
else:
yy = self.time
ylab = 'Date'
else:
yy = self.fi.variables[vy]
ylab = set_axis_label(yy)
yy = get_slice_miss(self, self.yd, yy)
if (x != ''):
# x axis
vx = vardim2var(x)
if vx == self.tname:
if mesh:
xx = self.dtime
xlab = 'Year'
else:
xx = self.time
xlab = 'Date'
else:
xx = self.fi.variables[vx]
xlab = set_axis_label(xx)
xx = get_slice_miss(self, self.xd, xx)
# set z to nan if not selected
if (z == ''):
if (x != ''):
nx = xx.shape[0]
else:
nx = 1
if (y != ''):
ny = yy.shape[0]
else:
ny = 1
zz = np.ones((ny, nx)) * np.nan
zlab = ''
if zz.ndim < 2:
estr = 'Contour: z (' + vz + ') is not 2-dimensional:'
print(estr, zz.shape)
return
# set x and y to index if not selected
if (x == ''):
nx = zz.shape[1]
xx = np.arange(nx)
xlab = ''
if (y == ''):
ny = zz.shape[0]
yy = np.arange(ny)
ylab = ''
# plot options
if rev_cmap:
cmap = cmap + '_r'
# plot
# cc = self.axes.imshow(zz[:, ::-1], aspect='auto', cmap=cmap,
# interpolation='none')
# cc = self.axes.matshow(zz[:, ::-1], aspect='auto', cmap=cmap,
# interpolation='none')
extend = 'neither'
if zmin is not None:
zz = np.maximum(zz, zmin)
if zmax is None:
extend = 'min'
else:
extend = 'both'
if zmax is not None:
zz = np.minimum(zz, zmax)
if zmin is None:
extend = 'max'
else:
extend = 'both'
if mesh:
try:
# zz is matrix notation: (row, col)
cc = self.axes.pcolormesh(xx, yy, zz, vmin=zmin, vmax=zmax,
cmap=cmap, shading='nearest')
cb = self.figure.colorbar(cc, fraction=0.05, shrink=0.75,
extend=extend)
except Exception:
estr = 'Contour: x (' + vx + '), y (' + vy + '),'
estr += ' z (' + vz + ') shapes do not match for'
estr += ' pcolormesh:'
print(estr, xx.shape, yy.shape, zz.shape)
return
else:
try:
# if 1-D then len(x)==m (columns) and len(y)==n (rows): z(n,m)
cc = self.axes.contourf(xx, yy, zz, vmin=zmin, vmax=zmax,
cmap=cmap, extend=extend)
cb = self.figure.colorbar(cc, fraction=0.05, shrink=0.75)
except Exception:
estr = 'Contour: x (' + vx + '), y (' + vy + '),'
estr += ' z (' + vz + ') shapes do not match for'
estr += ' contourf:'
print(estr, xx.shape, yy.shape, zz.shape)
return
# help(self.figure)
cb.set_label(zlab)
self.axes.xaxis.set_label_text(xlab)
self.axes.yaxis.set_label_text(ylab)
self.axes.format_coord = lambda x, y: format_coord_contour(
x, y, self.axes, xx, yy, zz)
# # Does not work
# # might do it by hand, i.e. get ticks and use axhline and axvline
# self.axes.grid(True, lw=5, color='k', zorder=100)
# self.axes.set_zorder(100)
# self.axes.xaxis.grid(True, zorder=999)
# self.axes.yaxis.grid(True, zorder=999)
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
# invert axes
if inv_x:
if (xlim[0] is not None):
xlim = xlim[::-1]
self.axes.set_xlim(xlim)
if inv_y:
if (ylim[0] is not None):
ylim = ylim[::-1]
self.axes.set_ylim(ylim)
# draw grid lines
self.axes.grid(False)
xticks = np.array(self.axes.get_xticks())
yticks = np.array(self.axes.get_yticks())
if grid:
ii = np.where((xticks > min(xlim)) & (xticks < max(xlim)))[0]
if ii.size > 0:
ggx = self.axes.vlines(xticks[ii], ylim[0], ylim[1],
colors='w', linestyles='solid',
linewidth=0.5)
ii = np.where((yticks > min(ylim)) & (yticks < max(ylim)))[0]
if ii.size > 0:
ggy = self.axes.hlines(yticks[ii], xlim[0], xlim[1],
colors='w', linestyles='solid',
linewidth=0.5)
# redraw
self.canvas.draw()
self.toolbar.update()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
4264,
454,
6103,
286,
299,
33967,
518,
13,
198,
198,
464,
6103,
3578,
29353,
542,
454,
393,
19609,
21528,
286,
362,
35,
12,
25641,
2977,
13,
198,
198,
1212,
8265,
373,
31... | 1.832898 | 9,563 |
from .fake_quantize import * # noqa: F403
from .fuse_modules import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize import * # noqa: F403
| [
6738,
764,
30706,
62,
40972,
1096,
1330,
1635,
220,
1303,
645,
20402,
25,
376,
31552,
201,
198,
6738,
764,
69,
1904,
62,
18170,
1330,
1635,
220,
1303,
645,
20402,
25,
376,
31552,
201,
198,
6738,
764,
40972,
62,
4906,
1330,
1635,
220,
... | 2.569231 | 65 |
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tensorflow.python.framework import load_library
import os
lib_name = r"libsok_unit_test.so"
paths = [r"/usr/local/lib"]
lib_file = None
for path in paths:
try:
file = open(os.path.join(path, lib_name))
file.close()
lib_file = os.path.join(path, lib_name)
break
except FileNotFoundError:
continue
if lib_file is None:
raise FileNotFoundError("Could not find %s" %lib_name)
plugin_unit_test_ops = load_library.load_op_library(lib_file)
# for op in dir(plugin_unit_test_ops):
# print(op)
all_gather_dispatcher = plugin_unit_test_ops.all_gather_dispatcher
csr_conversion_distributed = plugin_unit_test_ops.csr_conversion_distributed
reduce_scatter_dispatcher = plugin_unit_test_ops.reduce_scatter_dispatcher | [
37811,
198,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
220,
198,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.941432 | 461 |
#!/usr/bin/env python
from distutils.core import setup
from setuptools import find_packages
setup(name='srvrlss-commons',
version='0.0.2',
description='Common functionality for serverless arch demo',
author='Mateusz Korzeniowski',
author_email='mkorzeniowski93@gmail.com',
url='https://github.com/emkor/serverless-pwr-inz',
packages=find_packages()
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
198,
198,
40406,
7,
3672,
11639,
27891,
37020,
75,
824,
12,
9503,
684,
3256,
198,
220,
220... | 2.558442 | 154 |
__author__ = 'Bohdan Mushkevych'
from synergy.db.dao.base_dao import BaseDao
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.system.decorator import thread_safe
from synergy.scheduler.scheduler_constants import COLLECTION_MANAGED_PROCESS
class ManagedProcessDao(BaseDao):
""" Thread-safe Data Access Object for managed_process table/collection """
@thread_safe
def clear(self):
""" removes all documents in this collection """
collection = self.ds.connection(COLLECTION_MANAGED_PROCESS)
return collection.delete_many(filter={})
| [
834,
9800,
834,
796,
705,
33,
1219,
25604,
24257,
365,
7670,
354,
6,
198,
198,
6738,
42193,
13,
9945,
13,
67,
5488,
13,
8692,
62,
67,
5488,
1330,
7308,
35,
5488,
198,
6738,
42193,
13,
9945,
13,
19849,
13,
39935,
62,
14681,
62,
130... | 3.02 | 200 |
# coding: utf-8
# pylint: disable = C0103
"""Compatibility"""
from __future__ import absolute_import
import inspect
import sys
import numpy as np
is_py3 = (sys.version_info[0] == 3)
"""compatibility between python2 and python3"""
if is_py3:
string_type = str
numeric_types = (int, float, bool)
integer_types = (int, )
range_ = range
def argc_(func):
"""return number of arguments of a function"""
return len(inspect.signature(func).parameters)
else:
string_type = basestring
numeric_types = (int, long, float, bool)
integer_types = (int, long)
range_ = xrange
def argc_(func):
"""return number of arguments of a function"""
return len(inspect.getargspec(func).args)
"""json"""
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
"""sklearn"""
try:
from sklearn.base import BaseEstimator
from sklearn.base import RegressorMixin, ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import deprecated
try:
from sklearn.model_selection import StratifiedKFold, GroupKFold
except ImportError:
from sklearn.cross_validation import StratifiedKFold, GroupKFold
SKLEARN_INSTALLED = True
LGBMModelBase = BaseEstimator
LGBMRegressorBase = RegressorMixin
LGBMClassifierBase = ClassifierMixin
LGBMLabelEncoder = LabelEncoder
LGBMDeprecated = deprecated
LGBMStratifiedKFold = StratifiedKFold
LGBMGroupKFold = GroupKFold
except ImportError:
SKLEARN_INSTALLED = False
LGBMModelBase = object
LGBMClassifierBase = object
LGBMRegressorBase = object
LGBMLabelEncoder = None
LGBMStratifiedKFold = None
LGBMGroupKFold = None
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
279,
2645,
600,
25,
15560,
796,
327,
486,
3070,
198,
37811,
7293,
25901,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
10104,
198,
11748,
25064,
198,
198,
11748,
... | 2.683264 | 723 |
import requests
token = input("token:")
guild = input("guild:")
channel = input("channel")
message = input("message:")
auth(token)
| [
11748,
7007,
628,
628,
198,
30001,
796,
5128,
7203,
30001,
25,
4943,
198,
70,
3547,
796,
5128,
7203,
70,
3547,
25,
4943,
198,
17620,
796,
5128,
7203,
17620,
4943,
198,
20500,
796,
5128,
7203,
20500,
25,
4943,
198,
18439,
7,
30001,
8,
... | 3.162791 | 43 |
from datetime import datetime
from nose.tools import eq_
import factory
from kitsune.questions.models import Question, QuestionVote, Answer, AnswerVote, QuestionLocale
from kitsune.sumo.tests import LocalizingClient, TestCase, FuzzyUnicode
from kitsune.users.tests import UserFactory
class TestCaseBase(TestCase):
"""Base TestCase for the Questions app test cases."""
client_class = LocalizingClient
def tags_eq(tagged_object, tag_names):
"""Assert that the names of the tags on tagged_object are tag_names."""
eq_(sorted([t.name for t in tagged_object.tags.all()]),
sorted(tag_names))
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
9686,
13,
31391,
1330,
37430,
62,
198,
11748,
8860,
198,
198,
6738,
19183,
1726,
13,
6138,
507,
13,
27530,
1330,
18233,
11,
18233,
37394,
11,
23998,
11,
23998,
37394,
11,
18233,
33711,... | 3.206186 | 194 |
import argparse
import sys
from os import makedirs
from os.path import isfile, join
import numpy as np
from tqdm import tqdm
rng_seed = 399
np.random.seed(rng_seed)
sys.path.append("..")
from topological_data_analysis.tda_utils import generate_points_in_spheres # noqa: E402
from topological_data_analysis.topological_polysemy import ( # noqa: E402
tps_multiple_point_cloud,
)
def parse_args() -> argparse.Namespace:
"""
Parses arguments sent to the python script.
Returns
-------
parsed_args : argparse.Namespace
Parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--tps_neighbourhood_size",
type=int,
default="",
help="TPS neighbourhood size",
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Output directory where processed files will be saved to",
)
return parser.parse_args()
def prepare_spheres_data(noisy_spheres: bool, output_dir: str) -> list:
"""
Prepares spheres data.
Parameters
----------
noisy_spheres : bool
Whether or not to create noisy sphere data
output_dir : str
Output directory where processed files will be saved to.
Returns
-------
sphere_data_filepaths : list
List of sphere data filepaths.
"""
# Generate sphere data
sphere_point_shift = 2
space_dimensionality = 300
sphere_dimensionalities = [2, 3, 4, 5, 10, 20, 50, 300]
point_in_each_sphere_gen = 1000000
sphere_sample_num_intervals = 20
sphere_sample_size = 1000
sphere_points_data_filepaths = []
sphere_noisy_str = "_noisy" if noisy_spheres else ""
for sphere_dimensionality in sphere_dimensionalities:
print(f"Sphere dimensionality: {sphere_dimensionality}")
sphere_points_data_filepath = join(
output_dir,
f"sphere_points_data_{sphere_dimensionality}{sphere_noisy_str}.npy",
)
sampled_sphere_points_data_filepath = join(
output_dir,
f"sampled_sphere_points_data_{sphere_dimensionality}{sphere_noisy_str}.npy",
)
sphere_points_data_filepaths.append(
(
sphere_dimensionality,
sphere_points_data_filepath,
sampled_sphere_points_data_filepath,
)
)
if isfile(sphere_points_data_filepath) and isfile(
sampled_sphere_points_data_filepath
):
continue
print("Generating points...")
sphere_points, sphere_point_labels = generate_points_in_spheres(
num_points=point_in_each_sphere_gen,
sphere_dimensionality=sphere_dimensionality,
space_dimensionality=space_dimensionality,
create_intersection_point=True,
noisy_spheres=noisy_spheres,
random_state=rng_seed,
)
sphere_point_shift_arr = np.repeat(sphere_point_shift, space_dimensionality)
sphere_points += sphere_point_shift_arr
shpere_points_intersection = sphere_point_shift_arr
distances_to_intersection_point = np.zeros(sphere_points.shape[0])
print("Computing distances...")
for i, sphere_point in enumerate(tqdm(sphere_points)):
distances_to_intersection_point[i] = np.linalg.norm(
sphere_point - shpere_points_intersection
)
distances_to_intersection_point_sorted_indices = np.argsort(
distances_to_intersection_point
)
# Sample sphere points from intervals, sorted by distance to intersection point
sampled_sphere_point_indices = [
distances_to_intersection_point_sorted_indices[0] # <-- Intersection point
]
interval_width = (sphere_points.shape[0] - 1) // sphere_sample_num_intervals
for i in range(sphere_sample_num_intervals):
min_interval_idx = max(i * interval_width, 1)
max_interval_idx = (i + 1) * interval_width
interval_indices = distances_to_intersection_point_sorted_indices[
np.arange(min_interval_idx, max_interval_idx)
]
sampled_indices = np.random.choice(
interval_indices, size=sphere_sample_size, replace=False
)
sampled_sphere_point_indices.extend(sampled_indices)
sampled_sphere_point_indices = np.array(sampled_sphere_point_indices)
sphere_points_data = np.column_stack(
(
sphere_points,
sphere_point_labels,
distances_to_intersection_point,
)
)
sampled_sphere_points_data = np.column_stack(
(
sphere_points[sampled_sphere_point_indices],
sphere_point_labels[sampled_sphere_point_indices],
distances_to_intersection_point[sampled_sphere_point_indices],
sampled_sphere_point_indices,
)
)
# Save data
print("Saving data...")
np.save(sphere_points_data_filepath, sphere_points_data)
np.save(sampled_sphere_points_data_filepath, sampled_sphere_points_data)
# Free resources
del sphere_points_data
del sphere_points
del sphere_point_labels
del distances_to_intersection_point
del sampled_sphere_point_indices
del sampled_sphere_points_data
return sphere_points_data_filepaths
def compute_tps_scores(
sphere_data_filepaths: list, tps_neighbourhood_size: int, output_dir: str
) -> None:
"""
Computes TPS scores of sphere data.
Parameters
----------
sphere_data_filepaths : list
List of sphere dimensionalities and data filepaths.
tps_neighbourhood_size : int
TPS neighbourhood size.
output_dir : str
Output directory where processed files will be saved to.
"""
for (
sphere_dimensionality,
sphere_points_filepath,
sphere_point_indices_filepath,
) in sphere_data_filepaths:
# Check if TPS scores are computed already
tps_scores_filepath = join(
output_dir,
f"sphere_points_data_{sphere_dimensionality}_tps_{tps_neighbourhood_size}_scores.npy",
)
if isfile(tps_scores_filepath):
continue
print(f"Sphere dimensionality: {sphere_dimensionality}")
print("Loading data...")
sphere_points_data = np.load(sphere_points_filepath)
sphere_points = sphere_points_data[:, :-2]
sphere_points_normalized = sphere_points / np.linalg.norm(
sphere_points, axis=1
).reshape(-1, 1)
sampled_sphere_points_data = np.load(sphere_point_indices_filepath)
sampled_sphere_point_indices = sampled_sphere_points_data[:, -1].astype(int)
print("Done!")
# Compute TPS scores
print("Computing TPS...")
tps_scores_point_in_spheres = tps_multiple_point_cloud(
point_indices=sampled_sphere_point_indices,
neighbourhood_size=tps_neighbourhood_size,
point_cloud_normalized=sphere_points_normalized,
return_persistence_diagram=False,
n_jobs=-1,
progressbar_enabled=True,
)
np.save(tps_scores_filepath, tps_scores_point_in_spheres)
# Free resources
del sphere_points_data
del sampled_sphere_points_data
del sampled_sphere_point_indices
del sphere_points
del sphere_points_normalized
del tps_scores_point_in_spheres
def tps_spheres_experiment_data_preprocessing(
tps_neighbourhood_size: int, output_dir: str
) -> None:
"""
Preprocesses data for the TPS spheres experiment.
Parameters
----------
tps_neighbourhood_size : int
TPS neighbourhood size.
output_dir : str
Output directory where processed files will be saved to.
"""
for is_noisy in [False, True]:
print(f"Noisy: {is_noisy}")
noisy_str = "_noisy" if is_noisy else ""
experiment_output_dir = join(output_dir, f"tps_spheres_experiment{noisy_str}")
makedirs(experiment_output_dir, exist_ok=True)
print("Preparing spheres data...")
sphere_data_filepaths = prepare_spheres_data(noisy_spheres=is_noisy, output_dir=experiment_output_dir)
print("Computing TPS scores...")
compute_tps_scores(
tps_neighbourhood_size=tps_neighbourhood_size,
sphere_data_filepaths=sphere_data_filepaths,
output_dir=experiment_output_dir,
)
if __name__ == "__main__":
args = parse_args()
tps_spheres_experiment_data_preprocessing(
tps_neighbourhood_size=args.tps_neighbourhood_size, output_dir=args.output_dir
)
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
6738,
28686,
1330,
285,
4335,
17062,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
... | 2.232676 | 3,954 |
import pycuda.driver as drv
from pycuda.compiler import SourceModule
import time
import cv2
import numpy as np
import sys
import math
sys.path.append(".")
from subroutine import Subroutine
from data_class import Data
from data_source import Source
from lane_cam import LaneCam
from parabola import Parabola
ACTUAL_RADIUS = 300 # 부채살의 실제 반경
CLEAR_RADIUS = 500 # 전방 항시 검사 반경 (부채살과 차선 모드를 넘나들기 위함)
ARC_ANGLE = 110 # 부채살 적용 각도
OBSTACLE_OFFSET = 70 # 부채살 적용 시 장애물의 offset (cm 단위)
U_TURN_ANGLE = 10 # 유턴시 전방 scan 각도. 기준은 90도를 기준으로 좌우 대칭.
U_TURN_LIDAR_CIRCLE_SIZE = 6
U_TURN_LIDAR_LINE_SIZE = 6
RED = (0, 0, 255)
BLUE = (255, 0, 0)
# i.e) U_TURN_ANGLE=30 이면 75도~105도를 읽는다
if __name__ == "__main__":
import threading
from control import Control
from car_platform import CarPlatform
from monitoring import Monitoring
testDT = Data()
"""
test code
특정 미션 번호에서 시작하도록 함
"""
testDT.current_mode = 1
testDS = Source(testDT)
car = CarPlatform('COM5', testDT)
testMP = MotionPlanner(testDS, testDT)
test_control = Control(testDT)
monitor = Monitoring(testDS, testDT)
lidar_source_thread = threading.Thread(target=testDS.lidar_stream_main)
left_cam_source_thread = threading.Thread(target=testDS.left_cam_stream_main)
right_cam_source_thread = threading.Thread(target=testDS.right_cam_stream_main)
mid_cam_source_thread = threading.Thread(target=testDS.mid_cam_stream_main)
planner_thread = threading.Thread(target=testMP.main)
control_thread = threading.Thread(target=test_control.main)
car_thread = threading.Thread(target=car.main)
monitoring_thread = threading.Thread(target=monitor.main)
lidar_source_thread.start()
planner_thread.start()
time.sleep(3)
car_thread.start()
control_thread.start()
left_cam_source_thread.start()
right_cam_source_thread.start()
mid_cam_source_thread.start()
monitoring_thread.start()
| [
11748,
12972,
66,
15339,
13,
26230,
355,
1553,
85,
198,
6738,
12972,
66,
15339,
13,
5589,
5329,
1330,
8090,
26796,
198,
11748,
640,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
10688,
198,
... | 2.014493 | 966 |
import time
rate_limit={
"second_use":20,
"second":1,
"minute_use":200,
"minute":0,
'200':True
}
data=request()
print(data) | [
11748,
640,
628,
198,
4873,
62,
32374,
34758,
198,
220,
220,
220,
366,
12227,
62,
1904,
1298,
1238,
11,
198,
220,
220,
220,
366,
12227,
1298,
16,
11,
198,
220,
220,
220,
366,
11374,
62,
1904,
1298,
2167,
11,
198,
220,
220,
220,
36... | 2.128571 | 70 |
from simple_package import module1
| [
6738,
2829,
62,
26495,
1330,
8265,
16,
198
] | 4.375 | 8 |
"""
Copyright 2017-2019 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from tempfile import NamedTemporaryFile
from time import sleep, time
from von_anchor.error import AbsentGenesis, AbsentPool, ExtantPool, JSONValidation
from von_anchor.frill import Ink
from von_anchor.nodepool import NodePool, NodePoolManager, Protocol
@pytest.mark.skipif(False, reason='short-circuiting')
@pytest.mark.asyncio
@pytest.mark.skipif(False, reason='short-circuiting')
@pytest.mark.asyncio
@pytest.mark.skipif(False, reason='short-circuiting')
@pytest.mark.asyncio
| [
37811,
198,
15269,
2177,
12,
23344,
5070,
286,
3340,
532,
5094,
6168,
290,
31345,
495,
434,
3340,
532,
2822,
392,
7255,
13,
36484,
13,
6888,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156... | 3.43769 | 329 |
import time
import threading
gen = None # 全局生成器,共long_io使用
@gen_coroutine
if __name__ == "__main__":
main()
| [
11748,
640,
198,
11748,
4704,
278,
198,
198,
5235,
796,
6045,
220,
1303,
10263,
227,
101,
161,
109,
222,
37955,
22755,
238,
161,
247,
101,
171,
120,
234,
17739,
109,
6511,
62,
952,
45635,
18796,
101,
628,
628,
198,
31,
5235,
62,
102... | 1.90625 | 64 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 12:18:58 2019
@author: f.divruno
"""
import numpy as np
import matplotlib.pyplot as plt
#%%
# A class that will downsample the data and recompute when zoomed in a figure.
# The function plot_max_peak() should be used to plot very big amounts of data. | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2758,
1367,
1105,
25,
1507,
25,
3365,
13130,
198,
198,
31,
9800,
25,
277,
13,
7146,
5143,
78,
198,
37811,
198,
11748,
299,
32152,
355,
4... | 2.990099 | 101 |
"""
Module with constants for Cassandra type codes.
These constants are useful for
a) mapping messages to cqltypes (cassandra/cqltypes.py)
b) optimized dispatching for (de)serialization (cassandra/encoding.py)
Type codes are repeated here from the Cassandra binary protocol specification:
0x0000 Custom: the value is a [string], see above.
0x0001 Ascii
0x0002 Bigint
0x0003 Blob
0x0004 Boolean
0x0005 Counter
0x0006 Decimal
0x0007 Double
0x0008 Float
0x0009 Int
0x000A Text
0x000B Timestamp
0x000C Uuid
0x000D Varchar
0x000E Varint
0x000F Timeuuid
0x0010 Inet
0x0011 SimpleDateType
0x0012 TimeType
0x0013 ShortType
0x0014 ByteType
0x0015 DurationType
0x0020 List: the value is an [option], representing the type
of the elements of the list.
0x0021 Map: the value is two [option], representing the types of the
keys and values of the map
0x0022 Set: the value is an [option], representing the type
of the elements of the set
"""
CUSTOM_TYPE = 0x0000
AsciiType = 0x0001
LongType = 0x0002
BytesType = 0x0003
BooleanType = 0x0004
CounterColumnType = 0x0005
DecimalType = 0x0006
DoubleType = 0x0007
FloatType = 0x0008
Int32Type = 0x0009
UTF8Type = 0x000A
DateType = 0x000B
UUIDType = 0x000C
VarcharType = 0x000D
IntegerType = 0x000E
TimeUUIDType = 0x000F
InetAddressType = 0x0010
SimpleDateType = 0x0011
TimeType = 0x0012
ShortType = 0x0013
ByteType = 0x0014
DurationType = 0x0015
ListType = 0x0020
MapType = 0x0021
SetType = 0x0022
UserType = 0x0030
TupleType = 0x0031
| [
37811,
198,
26796,
351,
38491,
329,
46750,
2099,
12416,
13,
198,
198,
4711,
38491,
389,
4465,
329,
628,
220,
220,
220,
257,
8,
16855,
6218,
284,
269,
13976,
19199,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 1.966734 | 992 |
import urllib2
baseUrl="http://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&sensor=false"
geoInfo=open("geoInfoFile","w")
lat=28.412593
while(lat<=28.881338):
lng=76.83806899999999
while(lng<=77.3484579):
url=baseUrl%(lat,lng)
print lat,lng
geoInfo.write(urllib2.urlopen(url).read()+"\n")
lng+=0.001
lat+=0.001
| [
11748,
2956,
297,
571,
17,
198,
8692,
28165,
2625,
4023,
1378,
31803,
13,
13297,
499,
271,
13,
785,
14,
31803,
14,
15042,
14,
469,
420,
1098,
14,
17752,
30,
15460,
75,
782,
28,
4,
82,
11,
4,
82,
5,
82,
22854,
28,
9562,
1,
198,
... | 1.964912 | 171 |
from sys import exit,argv
from memory_manager import MemoryManager
def print_help():
""" Print help for user """
response="""RESERVAR <nombre> <cantidad>
Representa una reserva de espacio de <cantidad> bloques,
asociados al identificador <nombre>.
LIBERAR <nombre>
Representa una liberación del espacio que contiene el
identificador <nombre>.
MOSTRAR
Debe mostrar una representación gráfica (en texto) de las
listas de bloques libres, así como la información de nombres
y la memoria que tienen asociada a los mismos.
SALIR
Debe salir del simulador.\n"""
print(response)
if __name__ == "__main__":
try:
my_space=int(argv[1])
except:
my_space=input("¿Cuánta memoria desea?: ")
my_space=int(my_space)
main(my_space) | [
6738,
25064,
1330,
8420,
11,
853,
85,
198,
6738,
4088,
62,
37153,
1330,
14059,
13511,
198,
198,
4299,
3601,
62,
16794,
33529,
198,
220,
220,
220,
37227,
12578,
1037,
329,
2836,
37227,
628,
220,
220,
220,
2882,
2625,
15931,
19535,
1137,
... | 2.170792 | 404 |
def epoch_time(start_time, end_time):
"""Compute epoch time"""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
| [
4299,
36835,
62,
2435,
7,
9688,
62,
2435,
11,
886,
62,
2435,
2599,
198,
220,
220,
220,
37227,
7293,
1133,
36835,
640,
37811,
198,
220,
220,
220,
42118,
62,
2435,
796,
886,
62,
2435,
532,
923,
62,
2435,
198,
220,
220,
220,
42118,
6... | 2.655914 | 93 |
# Copyright 2020 Jitsuin, inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is API SAMPLE CODE, not for production use.
# pylint: disable=missing-docstring
import logging
from sys import exit as sys_exit
from sys import stdout as sys_stdout
from archivist.parser import common_parser
from ..testing.parser import common_endpoint
from .run import run
LOGGER = logging.getLogger(__name__)
| [
2,
220,
220,
15069,
12131,
449,
19831,
259,
11,
753,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.413284 | 271 |
#import the module that will help create a connection to the API URL and send a request
#import json modules that will format the JSON response to a dict
import urllib.request, json
from .models import Source,Top_Headlines,Everything
#get the api key
api_key = None
#get the source base url
base_url = None
#get the top headlines url
headlines_url = None
#get everything url
everything_url = None
def get_sources() :
'''
get the json response to our url request
'''
get_sources_url = base_url.format(api_key)
with urllib.request.urlopen(get_sources_url) as url :
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
source_results = None
if get_sources_response['sources'] :
source_results_list = get_sources_response['sources']
source_results = process_source_results(source_results_list)
return source_results
def process_source_results(source_list) :
'''
process source result and transform them to a list of objects
'''
source_results = []
for source_item in source_list :
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
source_object = Source(id, name, description, url, category, language, country)
source_results.append(source_object)
return source_results
def process_top_headlines_results(top_headlines_results_list) :
'''
process Top_headlines results and transform them to a list of objects
'''
top_headlines_results = []
for top_headlines_item in top_headlines_results_list :
author = top_headlines_item.get('author')
title = top_headlines_item.get('title')
description = top_headlines_item.get('description')
url = top_headlines_item.get('url')
urlToImage = top_headlines_item.get('urlToImage')
publishedAt = top_headlines_item.get('publishedAt')
content = top_headlines_item.get('content')
top_headlines_object = Top_Headlines(author, title, description, url, urlToImage, publishedAt, content)
top_headlines_results.append(top_headlines_object)
return top_headlines_results
def get_everything() :
'''
get the json response to our url request
'''
get_everything_url = everything_url.format(api_key)
with urllib.request.urlopen(get_everything_url) as url :
get_everything_data = url.read()
get_everything_response = json.loads(get_everything_data)
everything_results = None
if get_everything_response['articles'] :
everything_results_list = get_everything_response['articles']
everything_results = process_everything_results(everything_results_list)
return everything_results
def process_everything_results(everything_results_list) :
'''
process everything result and transform them to a list of objects
'''
everything_results = []
for everything_item in everything_results_list :
author = everything_item.get('author')
title = everything_item.get('title')
description = everything_item.get('description')
url = everything_item.get('url')
urlToImage = everything_item.get('urlToImage')
publishedAt = everything_item.get('publishedAt')
content = everything_item.get('content')
everything_object = Everything(author, title, description, url, urlToImage, publishedAt, content)
everything_results.append(everything_object)
return everything_results
| [
2,
11748,
262,
8265,
326,
481,
1037,
2251,
257,
4637,
284,
262,
7824,
10289,
290,
3758,
257,
2581,
198,
2,
11748,
33918,
13103,
326,
481,
5794,
262,
19449,
2882,
284,
257,
8633,
198,
11748,
2956,
297,
571,
13,
25927,
11,
33918,
198,
... | 3.12511 | 1,135 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zipfile,os
import pdf_tools
dev = 0
def force_to_unicode(text):
"If text is unicode, it is returned as is. If it's str, convert it to Unicode using UTF-8 encoding"
return text if isinstance(text, unicode) else text.decode('utf8')
if dev ==1:
zips=[u'../tmp/sel_2015_fisica.zip', u'../tmp/sel_2016_fisica.zip', u'../tmp/sel_2017_fisica.zip']
merge(zips,0)
#pdf_tools.pdf_cat(pdf_tools.find_files("../tmp/","pdf"),"test.pdf")
#pdf_tools.merge(["A.pdf","B.pdf","C.pdf"],"1.pdf")
"""
path="/home/roberto/Programación/Python/PROJECTS/BUFFERPDF/tmp/test/"
for myzip in zip_files:
myfile=zipfile.ZipFile(path+myzip)
file_names = myfile.namelist()
nfname=[]
for x in file_names:
nfname.append(path+x)
myfile.extractall(path)
print pdf_tools.merge(nfname,path+"new.pdf")
""" | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
19974,
7753,
11,
418,
198,
11748,
37124,
62,
31391,
220,
198,
198,
7959,
796,
657,
198,
198,
4299,
2700,
... | 2.231343 | 402 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from warehouse import db
from warehouse.cli import sponsors
from warehouse.sponsors.models import Sponsor
def raise_(ex):
"""
Used by lambda functions to raise exception
"""
raise ex
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.843434 | 198 |
import scrapy
from scrapy_splash import SplashRequest
| [
11748,
15881,
88,
198,
6738,
15881,
88,
62,
22018,
1077,
1330,
45275,
18453,
628
] | 3.928571 | 14 |
from distutils.core import setup
setup(
name='coleto',
version='0.1.0dev',
date="2021-02-07",
author="Christof Schöch"
author_email="c.schoech@gmail.com"
description="Tool for text comparison."
programming_language="Python3"
license='MIT Licence',
long_description=open('README.md').read(),
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
1073,
293,
1462,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
7959,
3256,
198,
220,
220,
220,
3128,
2625,
1238,
2481,
12,
... | 2.55814 | 129 |
# -*- coding: utf-8 -*-
from django.template import loader
from django.utils import formats
from django.utils.text import Truncator
import django_tables2 as tables
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from ..html import AttributeDict, Icon
def merge_attrs(base_attrs, attrs):
"""
Merge attrs based in attribute dict.
"""
td = AttributeDict(base_attrs.get('td', {}))
th = AttributeDict(base_attrs.get('th', {}))
# merge td
for key, value in attrs.get('td', {}).items():
td.attr(key, value)
# merge th
for key, value in attrs.get('th', {}).items():
th.attr(key, value)
return {'td': td, 'th': th}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
28243,
1330,
40213,
198,
6738,
42625,
14208,
13,
26791,
1330,
17519,
198,
6738,
42625,
14208,
13,
26791,
13,
5239,
1330,
833,
19524,
1352,
198... | 2.58156 | 282 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.backend.connection import H2OResponse
def h2oparse_setup():
"""
Python API test: h2o.parse_setup(raw_frames, destination_frame=None, header=0, separator=None, column_names=None,
column_types=None, na_strings=None)
"""
col_types=['enum','numeric','enum','enum','enum','numeric','numeric','numeric']
col_headers = ["CAPSULE","AGE","RACE","DPROS","DCAPS","PSA","VOL","GLEASON"]
hex_key = "training_data.hex"
fraw = h2o.import_file(pyunit_utils.locate("smalldata/prostate/prostate_cat.csv"), parse=False)
setup = h2o.parse_setup(fraw, destination_frame=hex_key, header=1, separator=',', column_names=col_headers,
column_types=col_types, na_strings=["NA"])
assert_is_type(setup, H2OResponse)
assert setup["number_columns"]==len(col_headers), "h2o.parse_setup() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oparse_setup)
else:
h2oparse_setup()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
16,
553,
40720,
40720,
40720,
4943,
198,
6738,
5254,
1330,
12972,
20850,
62,
26791,
198,
11748,
289,
17,
78,
198,
6738,
289,
17,
78,... | 2.519912 | 452 |
import sys
from src.krpsim.utils import split_need_result_delay, build_process_dic
class Parser:
"""
Parsing Class, heart of the parsing is here.
-> stocks is a list of Stock class instances
-> content is a list of Process class instances
-> optimize is a list of Optimize class instances
-> delay corresponds to the maximal delay given as a parameter
"""
def main_parsing(self):
"""
Main parsing loop, the goal here is to iterate over
the fd content, and to parse every line we encounter to
determine its type
"""
curr_line = None
for line in self.fd:
if line[0] == '#':
print("Found a comment") if self.verbose == 1 or self.verbose == 3 else 0
continue
elif len(line) == 1 and line[0] == '\n':
print("Skipping empty line") if self.verbose == 1 or self.verbose == 3 else 0
continue
else:
curr_line = self.parse_line(line)
self.fill_parser_lists(curr_line)
print(curr_line) if self.verbose == 1 or self.verbose == 3 else 0
self.fd = self.fd.close()
def fill_parser_lists(self, line):
"""
Comparing the line type after parse_line,
we compare class instances with the base classes
"""
if type(line) is Process:
self.content[line.name] = line
elif type(line) is Optimize:
self.optimize.append(line)
elif type(line) is Stock:
self.stocks[line.name] = line
def verify_parsing_content(self):
"""
Afterward check method for the parsing content
"""
if not self.optimize:
sys.exit("Missing optimize content.")
elif not self.stocks:
sys.exit("Missing initial stocks.")
elif not self.content:
sys.exit("No process detected inside {}, please provide at least one".format(self.path))
#Check if what need to be optimized is indeed inside at least one process and is accesible
#like if the process never gets called because of stocks that can never be filled, then
#the optimize values are not valid.
def parse_line(self, line):
"""
Method used to parse a line and extract the corresponding elem
tmp -> Used for splitting the line and removing some junk from the list
res -> Class instance, either Stock, Process or Optimize
every instance is filled with the corresponding params
"""
tmp = None
res = None
line = line.replace('\n', '')
tmp = [i for i in line.split(':')]
tmp.pop(tmp.index('')) if '' in tmp else tmp
# Parsing for stock elem
if '(' not in line:
if tmp[0].isalpha() and tmp[1].isdecimal() or\
tmp[0].replace('_', '').isalpha() and tmp[1].isdecimal():
res = Stock(tmp[0], int(tmp[1]))
else:
res = 'Error'
# Parsing for optimize elem
elif 'optimize:' in line:
if tmp[-1].isdigit():
sys.exit("You can't specify a delay for an optimize element, error with \033[4m{}\033[0m"
.format(line))
tmp = str(tmp[1]).replace('(', '').replace(')', '')
res = Optimize(tmp.split(';'))
# Parsing for process elem
elif tmp[-1].isdigit():
tmp = [i.replace(')', '') for i in line.split('(')]
name, need, result, delay = split_need_result_delay(tmp, line)
res = Process(name, build_process_dic(need), build_process_dic(result), delay)
# Invalid elem
elif not tmp[-1].isdigit():
sys.exit("Error with \033[4m{}\033[0m, invalid element.".format(line))
return res
class Stock:
"""
Stock elem associated Class
-> name is obviously the stock name
-> qty is the quantity available for this stock
"""
class Process:
"""
Process elem associated Class
-> name is obviously the process name
-> need is a list of stocks (name & qty) needed to run this process
-> result is a list of resulting stocks after running the process
-> delay is the delay needed to run the process
"""
class Optimize:
"""
Optimize elem associated Class
-> opti_elems is a list of name associated with what is
to optimize, like client and time
"""
| [
11748,
25064,
198,
198,
6738,
12351,
13,
38584,
862,
320,
13,
26791,
1330,
6626,
62,
31227,
62,
20274,
62,
40850,
11,
1382,
62,
14681,
62,
67,
291,
198,
198,
4871,
23042,
263,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
23042... | 2.358005 | 1,905 |
""" Wrapper functions for TensorFlow layers.
Author: Charles R. Qi
Date: November 2016
Upadted by Yue Wang and Yongbin Sun
Further improved by Liang PAN
"""
import numpy as np
import tensorflow as tf
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../tf_ops/grouping'))
sys.path.append(os.path.join(BASE_DIR, '../tf_ops/sampling'))
from tf_grouping import select_top_k
from tf_sampling import principal_feature_sample
def _variable_on_cpu(name, shape, initializer, use_fp16=False, trainable=True):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
return var
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
use_xavier: bool, whether to use xavier initializer
Returns:
Variable Tensor
"""
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu(name, shape, initializer)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def conv1d(inputs,
num_output_channels,
kernel_size,
scope,
stride=1,
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 1D convolution with non-linear operation.
Args:
inputs: 3-D tensor variable BxLxC
num_output_channels: int
kernel_size: int
scope: string
stride: int
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_size,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel,
stride=stride,
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 2D convolution with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
outputs = tf.nn.conv2d(inputs, kernel,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d_transpose(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 2D convolution transpose with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_output_channels, num_in_channels] # reversed to conv2d
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
# from slim.convolution2d_transpose
# caculate output shape
batch_size = inputs.get_shape()[0].value
height = inputs.get_shape()[1].value
width = inputs.get_shape()[2].value
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = [batch_size, out_height, out_width, num_output_channels]
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv3d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 3D convolution with non-linear operation.
Args:
inputs: 5-D tensor variable BxDxHxWxC
num_output_channels: int
kernel_size: a list of 3 ints
scope: string
stride: a list of 3 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_d, kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_d, stride_h, stride_w = stride
outputs = tf.nn.conv3d(inputs, kernel,
[1, stride_d, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv3d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def fully_connected(inputs,
num_outputs,
scope,
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" Fully connected layer with non-linear operation.
Args:
inputs: 2-D tensor BxN
num_outputs: int
Returns:
Variable tensor of size B x num_outputs.
"""
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[-1].value
weights = _variable_with_weight_decay('weights',
shape=[num_input_units, num_outputs],
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def max_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D max pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D avg pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def max_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D max pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.max_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D avg pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.avg_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
# Operator that maintains moving averages of variables.
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
# Update moving average and return current batch's avg and var.
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
def batch_norm_dist_template(inputs, is_training, scope, moments_dims, bn_decay):
""" The batch normalization for distributed training.
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = _variable_on_cpu('beta', [num_channels], initializer=tf.zeros_initializer())
gamma = _variable_on_cpu('gamma', [num_channels], initializer=tf.ones_initializer())
pop_mean = _variable_on_cpu('pop_mean', [num_channels], initializer=tf.zeros_initializer(), trainable=False)
pop_var = _variable_on_cpu('pop_var', [num_channels], initializer=tf.ones_initializer(), trainable=False)
normed = tf.cond(is_training,
train_bn_op,
test_bn_op)
return normed
def batch_norm_for_fc(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)
def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 1D convolutional maps.
Args:
inputs: Tensor, 3D BLC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay)
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 2D convolutional maps.
Args:
inputs: Tensor, 4D BHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1,2], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay)
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 3D convolutional maps.
Args:
inputs: Tensor, 5D BDHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
def dropout(inputs,
is_training,
scope,
keep_prob=0.5,
noise_shape=None):
""" Dropout layer.
Args:
inputs: tensor
is_training: boolean tf.Variable
scope: string
keep_prob: float in [0,1]
noise_shape: list of ints
Returns:
tensor variable
"""
with tf.variable_scope(scope) as sc:
outputs = tf.cond(is_training,
lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
lambda: inputs)
return outputs
def pairwise_distance(point_cloud):
"""Compute pairwise distance of a point cloud.
Args:
point_cloud: tensor (batch_size, num_points, num_dims)
Returns:
pairwise distance: (batch_size, num_points, num_points)
"""
og_batch_size = point_cloud.get_shape().as_list()[0]
num_points = point_cloud.get_shape().as_list()[1]
point_cloud = tf.squeeze(point_cloud)
if og_batch_size == 1:
point_cloud = tf.expand_dims(point_cloud, 0)
if num_points == 1:
point_cloud = tf.expand_dims(point_cloud, 1)
point_cloud_transpose = tf.transpose(point_cloud, perm=[0, 2, 1])
point_cloud_inner = tf.matmul(point_cloud, point_cloud_transpose)
point_cloud_inner = -2*point_cloud_inner
point_cloud_square = tf.reduce_sum(tf.square(point_cloud), axis=-1, keepdims=True)
point_cloud_square_tranpose = tf.transpose(point_cloud_square, perm=[0, 2, 1])
return point_cloud_square + point_cloud_inner + point_cloud_square_tranpose
def knn(adj_matrix, k=20):
"""Get KNN based on the pairwise distance.
Args:
pairwise distance: (batch_size, num_points, num_points)
k: int
Returns:
nearest neighbors: (batch_size, num_points, k)
"""
neg_adj = -adj_matrix
_, nn_idx = tf.nn.top_k(neg_adj, k=k)
return nn_idx
def get_edge_feature(point_cloud, nn_idx, k=20):
"""Construct edge feature for each point
Args:
point_cloud: (batch_size, num_points, 1, num_dims)
nn_idx: (batch_size, num_points, k)
k: int
Returns:
edge features: (batch_size, num_points, k, num_dims)
"""
og_batch_size = point_cloud.get_shape().as_list()[0]
point_cloud = tf.squeeze(point_cloud)
if og_batch_size == 1:
point_cloud = tf.expand_dims(point_cloud, 0)
point_cloud_central = point_cloud
point_cloud_shape = point_cloud.get_shape()
batch_size = point_cloud_shape[0].value
num_points = point_cloud_shape[1].value
num_dims = point_cloud_shape[2].value
idx_ = tf.range(batch_size) * num_points
idx_ = tf.reshape(idx_, [batch_size, 1, 1])
point_cloud_flat = tf.reshape(point_cloud, [-1, num_dims])
point_cloud_neighbors = tf.gather(point_cloud_flat, nn_idx+idx_)
point_cloud_central = tf.expand_dims(point_cloud_central, axis=-2)
point_cloud_central = tf.tile(point_cloud_central, [1, 1, k, 1])
edge_feature = tf.concat([point_cloud_central, point_cloud_neighbors-point_cloud_central], axis=-1)
return edge_feature
def get_atrous_knn(adj_matrix, k, dilation, dist_matrix=None, min_radius=0, max_radius=0):
""" Select samples based on the feature distance, dilation, metric distance and search radius
Args:
feature distance: (batch_size, num_points, num_points)
k: int
dilation: int
metric distance: (batch_size, num_points, num_points)
radius: float
Returns:
selected samples: (batch_size, num_points, k)
"""
point_cloud_shape = adj_matrix.get_shape()
batch_size = point_cloud_shape[0].value
num_points = point_cloud_shape[1].value
# Bug Notice: if the maximum is selected, then chaos.
# Hence, need double check
if (dist_matrix is not None):
invalid_mask1 = tf.greater(dist_matrix, max_radius)
invalid_mask2 = tf.less(dist_matrix, min_radius)
invalid_mask = tf.logical_or(invalid_mask1, invalid_mask2)
valid_mask = tf.logical_not(invalid_mask)
# adj_maximum = tf.reduce_max(adj_matrix, axis=2, keepdims=True)
# maximum = tf.reduce_max(tf.reduce_max(adj_maximum, axis=1, keepdims=True), axis=0, keepdims=True) + 0.1
# # adj_matrix[invalid_mask] = -1
# # False => 0; True => 1
# invalid_maskf = tf.to_float(invalid_mask)
# valid_maskf = tf.to_float(valid_mask)
# # adj_matrix = adj_matrix * valid_mask - invalid_mask
# # adj_matrix[invalid_mask] = maximum
# # adj_matrix = adj_matrix + (invalid_mask * (maximum + 1))
# # adj_matrix = tf.minimum(adj_matrix, tf.expand_dims(adj_maximum, 2) )
# adj_matrix = adj_matrix * valid_maskf + maximum * invalid_maskf
maximum = tf.reduce_max(adj_matrix, axis=None, keepdims=True) + 0.1
maximum = tf.tile(maximum, [batch_size, num_points, num_points])
adj_matrix = tf.where(valid_mask, adj_matrix, maximum, name='value')
# neg_adj = -adj_matrix
max_index = k * dilation
_, nn_idx_altrous = tf.nn.top_k(-adj_matrix, k=max_index)
# nn_idx_altrous, _ = select_top_k(max_index, adj_matrix)
# nn_idx_altrous = tf.slice(nn_idx_altrous, [0,0,0], [-1,-1,max_index])
if dilation > 1:
selected_sequence = tf.range(k) * dilation
selected_sequence = tf.expand_dims( tf.expand_dims(selected_sequence, axis=0), axis=0 )
selected_sequence = tf.tile(selected_sequence, [batch_size, num_points, 1])
idx_ = tf.range(batch_size) * num_points * max_index
idx_ = tf.reshape(idx_, [batch_size, 1, 1])
idy_ = tf.range(num_points) * max_index
idy_ = tf.reshape(idy_, [1, num_points, 1])
nn_idx_flat = tf.reshape(nn_idx_altrous, [-1, 1])
nn_idx_altrous = tf.gather(nn_idx_flat, selected_sequence + idx_ + idy_)
nn_idx_altrous = tf.squeeze(nn_idx_altrous)
if batch_size == 1:
nn_idx_altrous = tf.expand_dims(nn_idx_altrous, 0)
if (dist_matrix is not None):
idx_ = tf.range(batch_size) * num_points * num_points
idx_ = tf.reshape(idx_, [batch_size, 1, 1])
idy_ = tf.range(num_points) * num_points
idy_ = tf.reshape(idy_, [1, num_points, 1])
invalid_mask_flat = tf.reshape(invalid_mask, [-1, 1])
selected_invalid_mask=tf.gather(invalid_mask_flat, nn_idx_altrous + idx_ + idy_)
selected_invalid_mask = tf.squeeze(selected_invalid_mask)
if batch_size == 1:
selected_invalid_mask = tf.expand_dims(selected_invalid_mask, 0)
selected_valid_mask = tf.logical_not(selected_invalid_mask)
idn_ = tf.expand_dims(tf.expand_dims(tf.range(num_points), axis=-1), axis=0)
idn_ = tf.tile(idn_, [batch_size, 1, k])
# selected_invalid_maskf = tf.to_float(selected_invalid_mask)
# selected_valid_maskf = tf.to_float(selected_valid_mask)
# nn_idx_altrous = tf.to_float(nn_idx_altrous)
# idn_ = tf.to_float(idn_)
# nn_idx_altrous = nn_idx_altrous * selected_valid_maskf + idn_ * selected_invalid_maskf
# nn_idx_altrous = tf.to_int32(nn_idx_altrous)
idn_ = tf.to_int32(idn_)
nn_idx_altrous = tf.where(selected_valid_mask, nn_idx_altrous, idn_, name='value')
return nn_idx_altrous
def gather_principal_feature(featrue_map, n):
""" Select points with most principal features in all point features
Args:
featrue_map: (batch_size, num_points, channels)
n: int
Returns:
selected index: (batch_size, n)
"""
feature_map_shape = featrue_map.get_shape()
batch_size = feature_map_shape[0]
num_points = feature_map_shape[1]
feature_dist_matrix = pairwise_distance(featrue_map)
feature_dist_sum = tf.reduce_sum(feature_dist_matrix, axis=-1, keepdims=False)
# naive method
# _, nn_idx = tf.nn.top_k(feature_dist_sum, k=n)
# novel method
cur_selected_index = tf.to_int32(tf.argmax(feature_dist_sum, axis=-1))
# cur_selected_index = tf.expand_dims(cur_selected_index, axis=-1)
nn_idx = principal_feature_sample(n, feature_dist_matrix, cur_selected_index)
# # nn_idx = np.zeros((batch_size, n), dtype=np.int32)
# nn_idx = tf.zeros((batch_size, n), tf.int32)
# # org_mesh = tf.constant(list(range(num_points)))
# # feature_mesh = tf.tile(tf.expand_dims(tf.expand_dims(org_mesh, 0), 0), [batch_size, num_points, 1])
# # points_mesh = tf.tile(tf.expand_dims(tf.expand_dims(org_mesh, 0), -1), [batch_size, 1, num_points])
# feature_mesh, points_mesh = tf.meshgrid(list(range(num_points)), list(range(num_points)))
# feature_mesh = tf.tile(tf.expand_dims(feature_mesh, axis=0), [batch_size, 1, 1])
# points_mesh = tf.tile(tf.expand_dims(points_mesh, axis=0), [batch_size, 1, 1])
# index_mesh, _ = tf.meshgrid(list(range(n)), list(range(batch_size)))
# for i in range(n):
# cur_selected_index = tf.to_int32(tf.expand_dims(cur_selected_index, axis=-1))
# # tf.assign(tf.slice(nn_idx, [0, i], [batch_size, 1]), cur_selected_index)
# update_index = tf.ones([batch_size, n], tf.int32) * i
# valid_mask = tf.equal(index_mesh, update_index)
# valid_maskf = tf.to_int32(valid_mask)
# nn_idx = nn_idx + valid_maskf * cur_selected_index
# cur_selected_index = tf.expand_dims(cur_selected_index, axis=-1)
# valid_mask = tf.equal(feature_mesh, cur_selected_index)
# invalid_mask = tf.logical_not(valid_mask)
# invalid_maskf = tf.to_float(invalid_mask)
# feature_dist_matrix = feature_dist_matrix * invalid_maskf
# valid_mask = tf.equal(points_mesh, cur_selected_index)
# valid_maskf = tf.to_float(valid_mask)
# cur_feature_dist_matrix = feature_dist_matrix * valid_maskf
# feature_dist_sum = tf.reduce_sum(cur_feature_dist_matrix, axis=1, keepdims=False)
# cur_selected_index = tf.argmax(feature_dist_sum, axis=-1)
return nn_idx
# def get_atrous_knn(adj_matrix, k, dilation):
# """ Select KNN based on the pairwise distance and dilation
# Args:
# pairwise distance: (batch_size, num_points, num_points)
# k: int
# dilation: int
# Returns:
# selected neighbors: (batch_size, num_points, k)
# """
# neg_adj = -adj_matrix
# max_index = k * dilation
# _, nn_idx = tf.nn.top_k(neg_adj, k=max_index)
# # selected_sequence = (np.arange(k) * dilation).astype(np.int32)
# selected_sequence = tf.range(k) * dilation
# # nn_idx_altrous = nn_idx[ :, :, selected_sequence ]
# point_cloud_shape = adj_matrix.get_shape()
# batch_size = point_cloud_shape[0].value
# num_points = point_cloud_shape[1].value
# selected_sequence = tf.expand_dims( tf.expand_dims(selected_sequence, axis=0), axis=0 )
# # print(selected_sequence.get_shape())
# selected_sequence = tf.tile(selected_sequence, [batch_size, num_points, 1])
# # print(selected_sequence.get_shape())
# idx_ = tf.range(batch_size) * num_points * max_index
# idx_ = tf.reshape(idx_, [batch_size, 1, 1])
# idy_ = tf.range(num_points) * max_index
# idy_ = tf.reshape(idy_, [1, num_points, 1])
# # print(idx_.get_shape())
# nn_idx_flat = tf.reshape(nn_idx, [-1, 1])
# nn_idx_altrous = tf.gather(nn_idx_flat, selected_sequence + idx_ + idy_)
# nn_idx_altrous = tf.squeeze(nn_idx_altrous)
# if batch_size == 1:
# nn_idx_altrous = tf.expand_dims(nn_idx_altrous, 0)
# # print(nn_idx_altrous.get_shape())
# return nn_idx_altrous
# def get_atrous_knn(adj_matrix, k, dilation, dist_matrix=None, radius=0):
# """ Select samples based on the feature distance, dilation, metric distance and search radius
# Args:
# feature distance: (batch_size, num_points, num_points)
# k: int
# dilation: int
# metric distance: (batch_size, num_points, num_points)
# radius: float
# Returns:
# selected samples: (batch_size, num_points, k)
# """
# if (dist_matrix != None) and (radius > 0):
# invalid_mask = tf.greater(dist_matrix, radius)
# valid_mask = tf.logical_not(invalid_mask)
# # adj_matrix[invalid_mask] = -1
# # False => 0; True => 1
# invalid_mask = tf.to_float(invalid_mask)
# valid_mask = tf.to_float(valid_mask)
# adj_matrix = adj_matrix * valid_mask - invalid_mask
# adj_maximum = tf.reduce_max(adj_matrix, axis=2, keepdims=False)
# maximum = tf.reduce_max(tf.reduce_max(adj_maximum, axis=1, keepdims=False), axis=0, keepdims=False)
# # adj_matrix[invalid_mask] = maximum
# adj_matrix = adj_matrix + (invalid_mask * (maximum + 1))
# adj_matrix = tf.minimum(adj_matrix, tf.expand_dims(adj_maximum, 2) )
# neg_adj = -adj_matrix
# max_index = k * dilation
# _, nn_idx = tf.nn.top_k(neg_adj, k=max_index)
# selected_sequence = tf.range(k) * dilation
# point_cloud_shape = adj_matrix.get_shape()
# batch_size = point_cloud_shape[0].value
# num_points = point_cloud_shape[1].value
# selected_sequence = tf.expand_dims( tf.expand_dims(selected_sequence, axis=0), axis=0 )
# selected_sequence = tf.tile(selected_sequence, [batch_size, num_points, 1])
# idx_ = tf.range(batch_size) * num_points * max_index
# idx_ = tf.reshape(idx_, [batch_size, 1, 1])
# idy_ = tf.range(num_points) * max_index
# idy_ = tf.reshape(idy_, [1, num_points, 1])
# nn_idx_flat = tf.reshape(nn_idx, [-1, 1])
# nn_idx_altrous = tf.gather(nn_idx_flat, selected_sequence + idx_ + idy_)
# nn_idx_altrous = tf.squeeze(nn_idx_altrous)
# if batch_size == 1:
# nn_idx_altrous = tf.expand_dims(nn_idx_altrous, 0)
# return nn_idx_altrous | [
37811,
27323,
2848,
5499,
329,
309,
22854,
37535,
11685,
13,
198,
198,
13838,
25,
7516,
371,
13,
21924,
198,
10430,
25,
3389,
1584,
198,
198,
4933,
324,
1513,
416,
32854,
15233,
290,
40633,
8800,
3825,
198,
198,
13518,
6596,
416,
43322,... | 2.317641 | 13,191 |
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from flask_cors import CORS
from utils import waifugen
from utils.security import apiKeyIsValid
from utils.config import domainName
app = Flask(__name__,
static_url_path='',
static_folder='static',
template_folder='templates')
CORS(app)
@app.route('/')
@app.route('/settings')
@app.route('/profile', methods = ['GET', 'POST'])
@app.route('/api/profile', methods = ['GET', 'POST'])
# A small Easter Egg
@app.route('/admin') | [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
18941,
11,
19016,
62,
1640,
11,
7644,
11,
33918,
1958,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
3384,
4487,
1330,
2082,
361,
42740,
198,
6738,
3384,
4487,... | 2.68932 | 206 |
import unittest
import psycopg2
from UnoCPI import sqlfiles, settings
# Initializing the sql files
sql = sqlfiles
| [
11748,
555,
715,
395,
198,
11748,
17331,
22163,
70,
17,
198,
6738,
791,
78,
8697,
40,
1330,
19862,
1652,
2915,
11,
6460,
198,
198,
2,
20768,
2890,
262,
44161,
3696,
198,
25410,
796,
19862,
1652,
2915,
628
] | 3.135135 | 37 |
'''
Created on Jun 22, 2018
@author: moffat
'''
from django.test import TestCase, tag
from django.core.exceptions import ValidationError
from django.utils import timezone
from ..forms import IneligibleSubjectFormValidator
@tag('T') | [
7061,
6,
198,
41972,
319,
7653,
2534,
11,
2864,
198,
198,
31,
9800,
25,
285,
2364,
265,
198,
7061,
6,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
7621,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24... | 3.295775 | 71 |
import json
| [
11748,
33918,
628
] | 4.333333 | 3 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright
# Author:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
smorest_sfs.modules.roles.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
角色权限的资源模块
"""
from typing import Any, Dict, List
from flask.views import MethodView
from flask_jwt_extended import current_user
from flask_sqlalchemy import BaseQuery
from loguru import logger
from smorest_sfs.extensions import db
from smorest_sfs.extensions.api.decorators import paginate
from smorest_sfs.extensions.marshal.bases import (
BaseIntListSchema,
BaseMsgSchema,
GeneralParam,
)
from smorest_sfs.modules.auth import PERMISSIONS
from smorest_sfs.modules.auth.decorators import doc_login_required, permission_required
from smorest_sfs.plugins.samanager import SqlaManager
from . import blp, models, schemas
samanager: SqlaManager[models.Role] = SqlaManager(db.session)
@blp.route("/options")
@blp.route("")
@blp.route(
"/<int:role_id>",
parameters=[{"in": "path", "name": "role_id", "description": "角色权限id"}],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
198,
2,
6434,
25,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
... | 2.926829 | 533 |
# -*- coding: utf-8 -*-
"""
@author: Guilherme Esgario
"""
import os
import sys
import imageio
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from skimage.transform import resize
from utils.measures import ChlMeasures
from utils.plot import plot_multiple_indices
from utils.seg_methods import im_threshold
import warnings
warnings.filterwarnings("ignore")
IMG_SIZE = (512, 512)
PATH = 'dataset'
BACKGROUND = 'natural_bg' # natural_bg or white_bg
CSV_PATH = os.path.join(PATH,'spad_502_measures.csv')
PLANT_NAME = ('golden papaya', 'tainung papaya')
# open dataset csv
dataset = pd.read_csv(CSV_PATH, encoding='UTF-8')
# golden papaya
spad_gp = dataset[dataset['plant']==PLANT_NAME[0]]
spad_gp = spad_gp['spad_measure'].values
# tainung papaya
spad_tp = dataset[dataset['plant']==PLANT_NAME[1]]
spad_tp = spad_tp['spad_measure'].values
# Golden papaya
images, leaf_masks, bg_masks = load_images(PATH + '/golden/' + BACKGROUND)
cme = ChlMeasures(images, leaf_masks, bg_masks, white_balance=(True if BACKGROUND == 'white_bg' else False))
result_gp, index_names = cme.compute_all_indices()
# Tainung papaya
images, leaf_masks, bg_masks = load_images(PATH + '/tainung/' + BACKGROUND)
cme.set_images(images, leaf_masks, bg_masks)
result_tp, index_names = cme.compute_all_indices()
#select_indices = ( 10, 26 )
select_indices = list(range(27))
results = (result_gp[:, select_indices], result_tp[:, select_indices])
spad_measures = (spad_gp, spad_tp)
index_names_new = [ index_names[i] for i in select_indices ]
# Plotting results
plot_multiple_indices(results, spad_measures, index_names_new, (16, 20), ('gp','tp'), 0)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
1962,
346,
372,
1326,
8678,
4563,
952,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
2939,
952,
198,
11748,
2603,
29487,
8... | 2.626183 | 634 |
#!/usr/bin/python3
# 2021 Collegiate eCTF
# SCEWL Security Server
# Ben Janis
#
# (c) 2021 The MITRE Corporation
#
# This source file is part of an example system for MITRE's 2021 Embedded System CTF (eCTF).
# This code is being provided only for educational purposes for the 2021 MITRE eCTF competition,
# and may not meet MITRE standards for quality. Use this code at your own risk!
import socket
import select
import struct
import argparse
import logging
import threading
import os
from typing import NamedTuple
from Crypto.Cipher import AES
from hashlib import sha256
AES_KEY = 'secrets/AES.key'
IV = 'secrets/IV.data'
CERT = 'secrets/register.valid'
SSS_IP = 'localhost'
SSS_ID = 1
# mirroring scewl enum at scewl.c:4
ALREADY, REG, DEREG = -1, 0, 1
logging.basicConfig(level=logging.DEBUG)
Device = NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket), ('UUID', int), ('nUUID', int)])
# Helper functions
if __name__ == '__main__':
logging.debug('Starting main function.')
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
33448,
34890,
9386,
304,
4177,
37,
198,
2,
311,
5222,
54,
43,
4765,
9652,
198,
2,
3932,
2365,
271,
198,
2,
198,
2,
357,
66,
8,
33448,
383,
17168,
2200,
10501,
198,
2,
198,
... | 2.976945 | 347 |
import numpy as np # Matrix operations
import numpy.linalg as la # Get principal components
import pandas as pd # Open .csv files
import os # Path tools
dir = os.getcwd() + "..\\data\\"
"""
get_cov
-------
Returns the covariance matrix for a folder .csv files
Parameters:
- folder: The name (not the whole file path!) of the child folder of games to process.
Preconditions:
- All .csv files in folder are 19x19 matrices
- The first row and column of all .csv files contain the numbers 0-18 in order
"""
"""
cov_to_cor
----------
Returns the correlation matrix associated with a covariance matrix
Parameters:
- cov: A 361x361 real matrix with positive diagonal entries
"""
"""
principal_components
--------------------
Returns the principal components of a covariance matrix
"""
generate_cov = False # Whether to compute covariance matrix anew or load from save
if generate_cov:
cov_9d = get_cov("9d_csv_norm")
cov_18k = get_cov("18k_csv_norm")
frame_9d = pd.DataFrame(cov_9d)
frame_18k = pd.DataFrame(cov_18k)
frame_9d.to_csv(dir + "9d_cov_all_norm.csv")
frame_18k.to_csv(dir + "18k_cov_all_norm.csv")
else:
cov_9d = pd.read_csv(dir+"9d_cov_all_norm.csv").values[:,1:]
cov_18k = pd.read_csv(dir+"18k_cov_all_norm.csv").values[:,1:]
cor_18k = cov_to_cor(cov_18k) # Correlation matrix of 18k dataset
cor_9d = cov_to_cor(cov_9d) # Correlation matrix of 9d dataset
| [
11748,
299,
32152,
355,
45941,
1303,
24936,
4560,
201,
198,
11748,
299,
32152,
13,
75,
1292,
70,
355,
8591,
1303,
3497,
10033,
6805,
201,
198,
11748,
19798,
292,
355,
279,
67,
1303,
4946,
764,
40664,
3696,
201,
198,
11748,
28686,
1303,
... | 2.535163 | 583 |
"""
A module with a simple Point class.
This module has a simpler version of the Point class than
what we saw in previous labs. It shows off the minimum
that we need to get started with a class.
Author: Walker M. White (wmw2)
Date: October 1, 2017 (Python 3 Version)
"""
import math
class Point(object):
"""
A class to represent a point in 3D space
Attribute x: The x-coordinate
Invariant: x is a float
Attribute y: The y-coordinate
Invariant: y is a float
Attribute z: The z-coordinate
Invariant: z is a float
"""
def __init__(self,x=0.0,y=0.0,z=4.0):
"""
Initializers a new Point3
Parameter x: The x-coordinate
Precondition: x is a float
Parameter y: The y-coordinate
Precondition: y is a float
Parameter z: The z-coordinate
Precondition: z is a float
"""
self.x = x # x is parameter, self.x is attribute
self.y = y # y is parameter, self.y is attribute
self.z = z # z is parameter, self.z is attribute
def __str__(self):
"""
Returns this Point as a string '(x, y, z)'
"""
return '('+str(self.x)+', '+str(self.y)+', '+str(self.z)+')'
def __repr__(self):
"""
Returns an unambiguous representation of this point
"""
return str(self.__class__)+str(self)
def __eq__(self,other):
"""
Returns True if other is a point equal to this one.
Parameter other: The point to compare
Precondition: other is a Point
"""
assert type(other) == Point, repr(other)+' is not a Point'
return self.x == other.x and self.y == other.y and self.z == other.z
def __add__(self,other):
"""
Returns a new point that is the pointwise sum of self and other
Parameter other: The point to add
Precondition: other is a Point
"""
assert type(other) == Point, repr(other)+' is not a Point'
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
def distance(self,other):
"""
Returns the distance from self to other
Parameter other: The point to compare
Precondition: other is a Point
"""
assert type(other) == Point, repr(other)+' is not a Point'
dx = (self.x-other.x)*(self.x-other.x)
dy = (self.y-other.y)*(self.y-other.y)
dz = (self.z-other.z)*(self.z-other.z)
return math.sqrt(dx+dy+dz)
| [
37811,
198,
32,
8265,
351,
257,
2829,
6252,
1398,
13,
198,
198,
1212,
8265,
468,
257,
18599,
2196,
286,
262,
6252,
1398,
621,
198,
10919,
356,
2497,
287,
2180,
27887,
13,
220,
632,
2523,
572,
262,
5288,
198,
5562,
356,
761,
284,
651... | 2.360377 | 1,060 |
# -*- coding: utf-8 -*-
import scrapy
import urllib2
import json
import errno
from DianpingSpider.items import Restaurant, Food, User, Preference
from socket import error as SocketError
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
15881,
88,
198,
11748,
2956,
297,
571,
17,
198,
11748,
33918,
198,
11748,
11454,
3919,
198,
198,
6738,
19326,
13886,
41294,
13,
23814,
1330,
26078,
11,
7318,
... | 3.216667 | 60 |
from analizer.abstract import instruction
from analizer.typechecker import Checker
from analizer.typechecker.Metadata import Struct
from analizer.reports import Nodo
from storage.storageManager import jsonMode
| [
6738,
2037,
7509,
13,
397,
8709,
1330,
12064,
198,
6738,
2037,
7509,
13,
4906,
9122,
263,
1330,
6822,
263,
198,
6738,
2037,
7509,
13,
4906,
9122,
263,
13,
9171,
14706,
1330,
32112,
198,
6738,
2037,
7509,
13,
48922,
1330,
399,
24313,
1... | 3.711864 | 59 |
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
if __name__ == "__main__":
create_payment_instrument_bank_account()
| [
6738,
15101,
7416,
1330,
1635,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
1330,
8019,
13,
76,
620,
15451,
1330,
8090,
8979,
17401,
198,
198,
11250,
62,
7753,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
1136,
66,
16993,
22784,
3... | 3.327273 | 110 |
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
import pymorphy2
import collections
from scipy import spatial
| [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
2528,
74,
13,
30001,
1096,
1330,
1573,
62,
30001,
1096,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
198,
11748,
302,
198,
11748,
12972,
4491,
6883,
17,
198,
11748,
17... | 3.3 | 50 |
import helpers
import random
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
@pytest.fixture(scope='module')
def test_df():
"""
Test Pandas Dataframe.
"""
data = {'page': [1,1,2,2],
'bounds': [[1,2,3,4], [5,6,7,8], [9,1,2,3], [4,5,6,7]],
'text': ['testing', 'one', 'two', 'testing']}
df = pd.DataFrame(data)
return df
@pytest.fixture(scope='module')
def test_im_data():
"""
Test image data (randomised pixel values).
"""
im_data = np.random.randint(0, 255, (2339, 1653, 3))
return im_data
@pytest.fixture(scope='module')
def test_boxes():
"""
Test boundary boxes.
"""
red_boxes = [[random.randrange(1, 10, 1) for i in range(4)] for j in range(10)]
green_boxes = [[random.randrange(1, 10, 1) for i in range(4)] for j in range(10)]
return red_boxes, green_boxes
| [
11748,
49385,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
18747,
62,
40496,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
69,
9... | 2.319797 | 394 |
from dbnd._core.tracking.schemas.base import ApiObjectSchema
from dbnd._vendor.marshmallow import fields, validate
jobs_set_archive_schema = JobsSetArchiveSchema()
| [
6738,
20613,
358,
13557,
7295,
13,
36280,
13,
1416,
4411,
292,
13,
8692,
1330,
5949,
72,
10267,
27054,
2611,
198,
6738,
20613,
358,
13557,
85,
18738,
13,
76,
5406,
42725,
1330,
7032,
11,
26571,
628,
628,
198,
43863,
62,
2617,
62,
1747... | 3.111111 | 54 |
"""
Contains KWS DAO implementations.
"""
from django.conf import settings
from restclients.mock_http import MockHTTP
from restclients.dao_implementation import get_timeout
from restclients.dao_implementation.live import get_con_pool, get_live_url
from restclients.dao_implementation.mock import get_mockdata_url
KWS_MAX_POOL_SIZE = 10
class File(object):
"""
The File DAO implementation returns generally static content. Use this
DAO with this configuration:
RESTCLIENTS_KWS_DAO_CLASS = 'restclients.dao_implementation.kws.File'
"""
class Live(object):
"""
This DAO provides real data. It requires further configuration, e.g.
RESTCLIENTS_KWS_CERT_FILE='/path/to/an/authorized/cert.cert',
RESTCLIENTS_KWS_KEY_FILE='/path/to/the/certs_key.key',
RESTCLIENTS_KWS_HOST='https://ucswseval1.cac.washington.edu:443',
"""
pool = None
| [
37811,
198,
4264,
1299,
509,
19416,
17051,
46,
25504,
13,
198,
37811,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
1334,
565,
2334,
13,
76,
735,
62,
4023,
1330,
44123,
40717,
198,
6738,
1334,
565,
2334,
13,
67,
5488... | 2.723077 | 325 |
#!/Users/Anas/Desktop/MakeSchool/Term_2/BEW1.2/projects/makewiki/wikienv/bin/python3.7
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
2,
48443,
14490,
14,
2025,
292,
14,
36881,
14,
12050,
26130,
14,
40596,
62,
17,
14,
33,
6217,
16,
13,
17,
14,
42068,
14,
15883,
15466,
14,
20763,
2013,
85,
14,
8800,
14,
29412,
18,
13,
22,
198,
6738,
42625,
14208,
13,
7295,
1330,
... | 2.680556 | 72 |
import unittest
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419
] | 2.423077 | 26 |
"""Configuration is a base class that has default values that you can change
during the instance of the client class"""
from typing import Callable
from .interface import Cache
from .lru_cache import LRUCache
BASE_URL = "https://config.ff.harness.io/api/1.0"
EVENTS_URL = "https://events.ff.harness.io/api/1.0"
MINUTE = 60
PULL_INTERVAL = 1 * MINUTE
PERSIST_INTERVAL = 1 * MINUTE
EVENTS_SYNC_INTERVAL = 1 * MINUTE
default_config = Config()
| [
37811,
38149,
318,
257,
2779,
1398,
326,
468,
4277,
3815,
326,
345,
460,
1487,
198,
42122,
262,
4554,
286,
262,
5456,
1398,
37811,
198,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
6738,
764,
39994,
1330,
34088,
198,
6738,
764,
75,
62... | 2.980132 | 151 |
#!/mnt/workspace/00-GITHUB/05-Python/django-web/real-estate-linux/lenv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
2,
48443,
76,
429,
14,
5225,
10223,
14,
405,
12,
38,
10554,
10526,
14,
2713,
12,
37906,
14,
28241,
14208,
12,
12384,
14,
5305,
12,
44146,
12,
23289,
14,
11925,
85,
14,
8800,
14,
29412,
18,
198,
6738,
42625,
14208,
13,
7295,
1330,
... | 2.661972 | 71 |
import pygame as pg
pg.init()
| [
11748,
12972,
6057,
355,
23241,
198,
6024,
13,
15003,
3419,
628,
198
] | 2.666667 | 12 |
from .traintest import * | [
6738,
764,
2213,
2913,
395,
1330,
1635
] | 3.428571 | 7 |
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Body63(object):
"""Implementation of the 'body_63' model.
TODO: type model description here.
Attributes:
shortcode (string): List of valid shortcode to your Ytel account
friendly_name (string): User generated name of the shortcode
callback_url (string): URL that can be requested to receive
notification when call has ended. A set of default parameters will
be sent here once the call is finished.
callback_method (string): Specifies the HTTP method used to request
the required StatusCallBackUrl once call connects.
fallback_url (string): URL used if any errors occur during execution
of InboundXML or at initial request of the required Url provided
with the POST.
fallback_url_method (string): Specifies the HTTP method used to
request the required FallbackUrl once call connects.
"""
# Create a mapping from Model property names to API property names
_names = {
"shortcode":'Shortcode',
"friendly_name":'FriendlyName',
"callback_url":'CallbackUrl',
"callback_method":'CallbackMethod',
"fallback_url":'FallbackUrl',
"fallback_url_method":'FallbackUrlMethod'
}
def __init__(self,
shortcode=None,
friendly_name=None,
callback_url=None,
callback_method=None,
fallback_url=None,
fallback_url_method=None):
"""Constructor for the Body63 class"""
# Initialize members of the class
self.shortcode = shortcode
self.friendly_name = friendly_name
self.callback_url = callback_url
self.callback_method = callback_method
self.fallback_url = fallback_url
self.fallback_url_method = fallback_url_method
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
shortcode = dictionary.get('Shortcode')
friendly_name = dictionary.get('FriendlyName')
callback_url = dictionary.get('CallbackUrl')
callback_method = dictionary.get('CallbackMethod')
fallback_url = dictionary.get('FallbackUrl')
fallback_url_method = dictionary.get('FallbackUrlMethod')
# Return an object of this model
return cls(shortcode,
friendly_name,
callback_url,
callback_method,
fallback_url,
fallback_url_method)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
37811,
201,
198,
220,
220,
220,
331,
37524,
15042,
201,
198,
201,
198,
220,
220,
220,
770,
2393,
373,
6338,
7560,
416,
3486,
3955,
1404,
2149,
410,
17,
... | 2.319573 | 1,405 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from Tkinter import *
from base64 import b64encode
import json
LANGUAGE = "en"
with open('language.json') as json_file:
data = json.load(json_file, encoding="utf-8")
root = Tk()
root.title('Codepass')
root.iconbitmap('key.ico')
Label(text=data[LANGUAGE]['label']).pack(side=TOP,padx=10,pady=10)
entry = Entry(root, width=30)
entry.pack(side=TOP,padx=5,pady=5)
Button(root, text=data[LANGUAGE]['codeBtn'], command=encode).pack(side='left')
Button(root, text=data[LANGUAGE]['cleanBtn'], command=clean).pack(side='left')
Button(root, text=data[LANGUAGE]['cpclipboardBtn'], command=cpclipboard).pack(side='left')
entry.focus()
root.bind("<Return>", lambda event: encode())
root.resizable(0,0)
root.mainloop()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
309,
74,
3849,
1330,
1635,
198,
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
198,
11748,
33918,
198,
19... | 2.472313 | 307 |
"""
Formatando Valores com modificadores - Aula 5
:s - Texto (strings)
:d - Inteiros (int)
:f - Números de pronto flutuante (float)
:.(NÚMERO) Quantidade de casas decimais (float)
:(cARACTERE) (< ou > ou ^) (QUANTIDADE) (TIPO - s,d, ou f)
> - Esquerda
< - Direita
^- Centro
# num1 = input('Digite um numero: ')
# num2 = input('Digite outro numero: ')
# print(num1, num2) # contatenação
__________________________________________________________________
num_1 = 10
num_2 = 3
divisao = (num_1 / num_2)
print('{:.2f}'.format(divisao)) # O : sinaliza para o Python que vai haver uma formatação. .2 significa duas casas decimais e o f é de float.
print( f'{divisao:.2f}')
"""
nome = 'Katia'
print(f'{nome:s}') | [
37811,
201,
198,
26227,
25440,
3254,
2850,
401,
953,
811,
324,
2850,
532,
317,
4712,
642,
201,
198,
201,
198,
25,
82,
532,
8255,
78,
357,
37336,
8,
201,
198,
25,
67,
532,
2558,
20295,
4951,
357,
600,
8,
201,
198,
25,
69,
532,
39... | 2.263804 | 326 |
import requests
res = requests.post('http://127.0.0.1:8000/rest-auth/login/', data={'username':'ddd', 'password':'ddd'})
key = res.json()['key']
# res = requests.post('http://127.0.0.1:8000/profile/sys_mail_lists/', headers={'Authorization': 'Token ' + key}, data={'username':"antispam"})
data = {'username': 'ddd', 'lastLearn': '0101', 'totalTime': '0202', 'VolumeInbox': 3, 'VolumeSpam': 4}
res = requests.put('http://127.0.0.1:8000/profile/sys_last_learn/', headers={'Authorization': 'Token ' + key}, data=data)
print (res.text)
res = requests.get('http://127.0.0.1:8000/profile/last_learn/', headers={'Authorization': 'Token ' + key})
print (res.text)
| [
11748,
7007,
198,
198,
411,
796,
7007,
13,
7353,
10786,
4023,
1378,
16799,
13,
15,
13,
15,
13,
16,
25,
33942,
14,
2118,
12,
18439,
14,
38235,
14,
3256,
1366,
34758,
6,
29460,
10354,
6,
1860,
67,
3256,
705,
28712,
10354,
6,
1860,
6... | 2.66129 | 248 |
# -*- coding: utf-8 -*-
""" Module: models as part of: todo_list
Created by: Reinier on 22-10-2017. A model is the single, definitive source of information about your data.
It contains the essential fields and behaviors of the data you’re storing. Generally, each model maps to a
single database table.
TODO:
- Nothing for this moment.
"""
from django.db import models
from django.urls import reverse
# Create your models here.
class Action(models.Model):
""": The class: "Action", is part of module: "models".
A Action model to represent the database table Action. A action that needs to be done to fulfill a particular
goal.
Note:
- Do not include the `self` parameter in the ``Args`` section.
- The __init__ method is documented as a docstring on the __init__ method itself.
- Class attributes, variables owned by the class itself. All values of class attributes are the same
for each Instance.
"""
#: description(CharField): Action description.
description = models.CharField(max_length=200)
#: subject(CharField): Action subject.
subject = models.CharField(max_length=64)
#: created(DateField): Creation date of action.
created = models.DateField(auto_now_add=True)
#: filed(DateField): Date when action is filed.
filed = models.DateField(null=True)
#: completed(BooleanField): Action is final.
completed = models.BooleanField(default=0)
class Detail(models.Model):
""": The class: "Detail", is part of module: "models".
A Detail model to represent the database table Detail. details holds the steps that explains how a certain
action is solved or what its actual status is.
Note:
- Do not include the `self` parameter in the ``Args`` section.
- The __init__ method is documented as a docstring on the __init__ method itself.
- Class attributes, variables owned by the class itself. All values of class attributes are the same
for each Instance.
"""
#: explanation(BooleanField): Step explained to fulfill action.
explanation = models.CharField(max_length=200, null=True)
#: edited(DateField): Date action adjusted.
edited = models.DateField(auto_now_add=True)
#: action(ForeignKey): Relation to action.
action = models.ForeignKey(Action, on_delete=models.CASCADE)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
19937,
25,
4981,
355,
636,
286,
25,
284,
4598,
62,
4868,
628,
220,
220,
220,
15622,
416,
25,
22299,
959,
319,
2534,
12,
940,
12,
5539,
13,
317,
2746,
318,
262... | 3.18 | 750 |
#!/usr/bin/env python
#
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
import zipfile
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
1946,
8180,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198... | 3.247191 | 89 |
import os
import sys
import time
import datetime
import threading
from urlparse import urlparse
import requests
CONNECT_TIMEOUT = 30
def get_bigiq_session(host, username, password):
''' Creates a Requests Session to the BIG-IQ host configured '''
if requests.__version__ < '2.9.1':
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
bigiq = requests.Session()
bigiq.verify = False
bigiq.headers.update({'Content-Type': 'application/json'})
bigiq.timeout = CONNECT_TIMEOUT
token_auth_body = {'username': username,
'password': password,
'loginProviderName': 'local'}
login_url = "https://%s/mgmt/shared/authn/login" % (host)
response = bigiq.post(login_url,
json=token_auth_body,
verify=False,
auth=requests.auth.HTTPBasicAuth(
username, password))
response_json = response.json()
bigiq.headers.update(
{'X-F5-Auth-Token': response_json['token']['token']})
bigiq.base_url = 'https://%s/mgmt/cm/device/licensing/pool' % host
return bigiq
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
4704,
278,
198,
198,
6738,
19016,
29572,
1330,
19016,
29572,
198,
198,
11748,
7007,
198,
198,
10943,
48842,
62,
34694,
12425,
796,
1542,
628,
198,
4299,... | 2.241966 | 529 |
from django.conf.urls import url
import views
urlpatterns = [
url(r'^$', views.ListClassView.as_view(), name='classes-list',),
url(r'^(?P<class_pk>[\d]+)/$', views.DetailClassView.as_view(), name='class-detail',),
url(r'^(?P<class_pk>[\d]+)/edit$', views.UpdateClassView.as_view(), name='class-update',),
url(r'^add/$', views.CreateClassView.as_view(), name='class-create',),
url(r'^(?P<class_pk>[\d]+)/delete$', views.DeleteClassView.as_view(), name='class-delete',),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
11748,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
5009,
13,
8053,
9487,
7680,
13,
292,
62,
1177,
22784,
1438,
11... | 2.381643 | 207 |
import quandl
import pandas_datareader.data as web
import datetime
import pandas as pd
import sklearn
import numpy as np
import scipy as sp
from operator import methodcaller
import time
"""
# 'realistic version'. buy and sell prices are the opening prices for the next day, not the closing price. don't have access to adjusted open value
def buy(self, day, symbol):
span, _, _ = self.strategy
analyst = next(filter(lambda a:a.symbol == symbol, self.analysts))
next_day = self.period[self.period.index(day) + 1]
price = analyst.get_var(next_day, 'Open')
# print('triggered at: {:.2f}, buying at: {:.2f}'.format(self.data.loc[day, 'Adj Close'], price))
qty = self.capital / price
# update capital
self.capital -= qty * price
# open operation
self.operations.append(
Operation(symbol = symbol, price = price, qty = qty, start_date = day, span=span))
# change start and end date of operations
def sell(self, day, operation):
analyst = next(filter(lambda a:a.symbol == operation.symbol, self.analysts))
# get open price for the next day. if last day, get closing price for the day
if day == self.period[-1]:
price = analyst.get_var(day, 'Close')
else:
next_day = self.period[self.period.index(day) + 1]
price = analyst.get_var(next_day, 'Open')
# print('triggered at: {:.2f}, selling at: {:.2f}'.format(self.data.loc[day, 'Adj Close'], price))
# update capital
self.capital += operation.qty * price
# close operation
operation.close(day, price)
def check_signal(self, day):
# get X
X = self.get_X(day)
# scale and extract principal components
self.scaler.partial_fit(self.get_X(day))
X = self.scaler.transform(X)
X = self.pca.transform(X)
# get label
label = self.clf.predict(X)
return label
""" | [
11748,
627,
392,
75,
198,
11748,
19798,
292,
62,
19608,
533,
5067,
13,
7890,
355,
3992,
198,
11748,
4818,
8079,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1341,
35720,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
8... | 2.366197 | 852 |
from plots.plot_utils import plot
import os
if __name__ == '__main__':
base_address = '../plots_data/sml'
accuracy = os.path.join(
base_address,
'run-sml_model-MiniImagenetModel_mbs-4_n-5_k-1_stp-5_mini_imagenet_model_feature_10000_clusters_500_logs_train-tag-Accuracy.json'
)
accuracy_val = os.path.join(
base_address,
'run-sml_model-MiniImagenetModel_mbs-4_n-5_k-1_stp-5_mini_imagenet_model_feature_10000_clusters_500_logs_val-tag-Accuracy.json'
)
colors = ['red', 'green']
names = ['Train', 'Validation']
plot(
[accuracy, accuracy_val],
colors,
names,
output_name='Accuracy.pdf'
)
| [
6738,
21528,
13,
29487,
62,
26791,
1330,
7110,
198,
11748,
28686,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2779,
62,
21975,
796,
705,
40720,
489,
1747,
62,
7890,
14,
82,
4029,
6,
198,
2... | 2.091185 | 329 |
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
def loadfile(filename):
"""
load training data file
:param filename: resolute path correspond to this file
:return: x: itration, y: train accurrency
"""
data = open(filename,'r')
x = []
y = []
for line in data:
x_ = int(line.split(',')[0].split(' ')[1])
y_ = float(line.split(',')[1].split(' ')[3].replace('\n', ''))
x.append(x_)
y.append(y_)
return x, y
def pltdata(x, y, x_f, y_f):
"""
use matplotlib.pylib to visualize data
:param x: take care that x equals to x_f here because they are using same itrations
:param y:
:param x_f:
:param y_f:
:return:
"""
fig = plt.figure(1,figsize=(9,6))
# tick margin
xmajorLocator = MultipleLocator(4000)
xminorLocator = MultipleLocator(100)
ymajorLocator = MultipleLocator(0.2)
yminorLocator = MultipleLocator(0.05)
# left y axis
ax = fig.subplots(1)
ax.plot(x,y,color="blue",linestyle="-",label="MNIST")
ax.legend(loc="lower left",shadow=True)
ax.set_ylabel('MNIST-Accurrency')
ax.xaxis.set_major_locator(xmajorLocator)
ax.xaxis.set_minor_locator(xminorLocator)
ax.yaxis.set_major_locator(ymajorLocator)
ax.yaxis.set_minor_locator(yminorLocator)
ax.xaxis.grid(True, which='major')
ax.yaxis.grid(True, which='minor')
# right y axis
ax_f = ax.twinx()
ax_f.plot(x_f, y_f, color="red",linestyle="-",label="fashion-MNIST")
ax_f.legend(loc="lower right",shadow=True)
ax_f.set_ylabel("fashion-MNIST-Accurrency")
ax_f.xaxis.set_major_locator(xmajorLocator)
ax_f.xaxis.set_minor_locator(xminorLocator)
ax_f.yaxis.set_major_locator(ymajorLocator)
ax_f.yaxis.set_minor_locator(yminorLocator)
# common x axis display
ax.set_xlabel('Iterations/100')
plt.title('Supervised Learning:Traing Result')
plt.savefig('result.png',dpi=100)
plt.show()
if __name__ == "__main__":
x_f, y_f = loadfile("Misc/fashion_result.txt")
x, y = loadfile("Misc/deepResult.txt")
pltdata(x,y,x_f,y_f) | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1330,
20401,
33711,
1352,
198,
198,
4299,
3440,
7753,
7,
34345,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3440,
3047,
136... | 2.216441 | 961 |
"""Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
from userbot.utils import admin_cmd
import asyncio
@borg.on(admin_cmd(pattern="puta"))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
@borg.on(admin_cmd(pattern="fottiti"))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
| [
37811,
36,
5908,
7285,
198,
10493,
49505,
25,
198,
13,
368,
31370,
32545,
198,
13,
368,
31370,
17180,
198,
13,
368,
31370,
1058,
14,
198,
13,
368,
31370,
532,
22955,
37811,
198,
198,
6738,
5735,
400,
261,
1330,
2995,
198,
6738,
2836,
... | 2.708955 | 134 |
from input import test,actual
# returns a list of (group_size, dict_of_questions_answered)
| [
198,
6738,
5128,
1330,
1332,
11,
50039,
198,
198,
2,
5860,
257,
1351,
286,
357,
8094,
62,
7857,
11,
8633,
62,
1659,
62,
6138,
507,
62,
31966,
8,
628
] | 3.241379 | 29 |
from .base_processing import read_data, read_data_and_merge_temporal_features
"""
Features used :
133 - Left ventricular size and function : 22420, 22421, 22422, 22423, 22424, 22425, 22426, 22427
128 - Pulse wave analysis : 12673, 12674, 12675, 12676, 12677, 12678, 12679, 12680,
12681, 12682, 12683, 12684, 12686, 12687, 12697, 12698, 12699
"""
| [
6738,
764,
8692,
62,
36948,
1330,
1100,
62,
7890,
11,
1100,
62,
7890,
62,
392,
62,
647,
469,
62,
11498,
35738,
62,
40890,
198,
37811,
198,
23595,
973,
1058,
198,
197,
16945,
532,
9578,
7435,
41001,
2546,
290,
2163,
1058,
26063,
1238,
... | 2.75 | 128 |
from typing import BinaryIO
from formats.binary import BinaryReader, BinaryWriter
from formats.filesystem import FileFormat
import io
| [
6738,
19720,
1330,
45755,
9399,
198,
198,
6738,
17519,
13,
39491,
1330,
45755,
33634,
11,
45755,
34379,
198,
6738,
17519,
13,
16624,
6781,
1330,
9220,
26227,
198,
11748,
33245,
628,
628,
628,
628,
198
] | 4.205882 | 34 |
#mdc segundo o algoritmo de Euclides
# a b a%b
# 21 15 6 (15 % 21 = 15)
# 15 6 3 menor maior resto
# 6 3 0
#a lógica a, b = b, a%b
#repito até que a%b seja 0
#quando a%b for zero mdc é o b
a = int(input('a: '))
b = int(input('b: '))
while a%b != 0:
a, b = b, a%b
print (b)
| [
2,
9132,
66,
384,
70,
41204,
267,
435,
7053,
270,
5908,
390,
48862,
1460,
198,
2,
220,
220,
220,
257,
220,
220,
220,
275,
220,
220,
220,
257,
4,
65,
198,
2,
220,
220,
2310,
220,
1315,
220,
220,
220,
220,
718,
220,
220,
220,
22... | 1.60177 | 226 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# ==============================================================================
# Copyright 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Modified from TensorFlow example:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/label_image/label_image.py
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import numpy as np
import tensorflow as tf
import ngraph_bridge
import time
from subprocess import check_output, call
import shlex
if __name__ == "__main__":
file_name = "grace_hopper.jpg"
model_file = "inception_v3_2016_08_28_frozen.pb"
label_file = "imagenet_slim_labels.txt"
input_height = 299
input_width = 299
input_mean = 0
input_std = 255
input_layer = "input"
output_layer = "InceptionV3/Predictions/Reshape_1"
parser = argparse.ArgumentParser()
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
args = parser.parse_args()
if not args.graph:
download_and_prepare()
else:
model_file = args.graph
if not args.input_layer:
raise Exception("Specify input layer for this network")
else:
input_layer = args.input_layer
if not args.output_layer:
raise Exception("Specify output layer for this network")
else:
output_layer = args.output_layer
if args.labels:
label_file = args.labels
else:
label_file = None
if args.image:
file_name = args.image
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
graph = load_graph(model_file)
t = read_tensor_from_image_file(
file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
config = tf.compat.v1.ConfigProto()
config_ngraph_enabled = ngraph_bridge.update_config(config)
with tf.compat.v1.Session(
graph=graph, config=config_ngraph_enabled) as sess:
# Warmup
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
# Run
import time
start = time.time()
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
elapsed = time.time() - start
print('Time taken for inference: %f seconds' % elapsed)
results = np.squeeze(results)
if label_file:
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
for i in top_k:
print(labels[i], results[i])
else:
print("No label file provided. Cannot print classification results")
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.778206 | 1,817 |
from setuptools import setup
from parsl.version import VERSION
install_requires = [
'ipyparallel'
]
tests_require = [
'ipyparallel',
'mock>=1.0.0',
'nose',
'pytest'
]
setup(
name='parsl',
version=VERSION,
description='Simple data dependent workflows in Python',
long_description='Simple and easy parallel workflows system for Python',
url='https://github.com/swift-lang/swift-e-lab',
author='Yadu Nand Babuji',
author_email='yadu@uchicago.edu',
license='Apache 2.0',
download_url = 'https://github.com/swift-lang/swift-e-lab/archive/0.1.tar.gz',
package_data={'': ['LICENSE']},
packages=['parsl', 'parsl.app', 'parsl.dataflow'],
install_requires=install_requires,
classifiers = [
# Maturity
'Development Status :: 3 - Alpha',
# Intended audience
'Intended Audience :: Developers',
# Licence, must match with licence above
'License :: OSI Approved :: Apache Software License',
# Python versions supported
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords = ['Workflows', 'Scientific computing'],
#tests_require=tests_reequire
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
13544,
75,
13,
9641,
1330,
44156,
2849,
198,
198,
17350,
62,
47911,
796,
685,
198,
220,
220,
220,
705,
541,
88,
1845,
29363,
6,
198,
220,
220,
220,
2361,
198,
198,
41989,
62,
46115,
79... | 2.523422 | 491 |
from django.db import models
# Create your models here.
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.625 | 24 |
"""
simple sample to access S3 service
"""
import os, botocore
import config as cfg
from boto3 import client
from boto3.s3.transfer import S3Transfer
from flask import Flask, request, render_template, redirect, url_for, send_file, make_response
from werkzeug import secure_filename
# create a S3 service client
client = client('s3',
aws_access_key_id = cfg.AWS_ACCESS_ID,
aws_secret_access_key=cfg.AWS_ACCESS_KEY,
region_name=cfg.AWS_ACCESS_REGION)
app = Flask(__name__)
@app.route('/')
def show_bucket():
"""
list all buckets in your S3 service
:return: flask render template
"""
buckets = client.list_buckets()
return render_template('show_bucket.html', buckets=buckets)
@app.route('/bucket/<bucket_name>', methods=['GET', 'POST'])
def show_file(bucket_name):
"""
if request method is GET, list all files in this bucket, otherwise upload file to S3
:return: flask render template
"""
try:
if request.method == 'GET':
objects = client.list_objects(Bucket=bucket_name)
return render_template('show_file.html', files=objects, bucket_name=bucket_name)
else:
file = request.files['file']
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
upload_file_to_s3(bucket_name, filename)
remove_temp_file(filename)
return redirect(url_for('show_file', bucket_name=bucket_name))
except botocore.exceptions.ClientError as e:
return render_template('show_error.html', error_msg=str(e.response['Error']))
@app.route('/download/<bucket_name>/<filename>', methods=['GET', 'POST'])
def download_file_from_s3(bucket_name, filename):
"""
download the file from S3 and return to user
:return: file object
"""
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
transfer = S3Transfer(client)
try:
transfer.download_file(bucket_name, filename, filepath)
return send_file(filepath, as_attachment=True)
except botocore.exceptions.ClientError as e:
return render_template('show_error.html', error_msg=str(e.response['Error']))
def upload_file_to_s3(bucket_name, filename):
"""
create a S3Transfer to upload the file to S3
"""
transfer = S3Transfer(client)
transfer.upload_file(os.path.join(app.config['UPLOAD_FOLDER'], filename),
bucket_name,
filename)
def remove_temp_file(filename):
"""
remove the temporary file
"""
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
if os.path.exists(filepath) and os.path.isfile(filepath):
os.unlink(filepath)
if __name__ == '__main__':
# a temp folder for uploading
app.config['UPLOAD_FOLDER'] = cfg.SITE_UPLOAD_TMP_FOLDER
# limit the upload file size
app.config['MAX_CONTENT_LENGTH'] = 64 * 1024 * 1024
# run flask app according to specific setting
app.run(host=cfg.SITE_ADDRESS,
port=cfg.SITE_PORT,
debug=cfg.SITE_DEBUG)
| [
37811,
198,
36439,
6291,
284,
1895,
311,
18,
2139,
198,
37811,
198,
11748,
28686,
11,
10214,
420,
382,
198,
11748,
4566,
355,
30218,
70,
198,
6738,
275,
2069,
18,
1330,
5456,
198,
6738,
275,
2069,
18,
13,
82,
18,
13,
39437,
1330,
31... | 2.380917 | 1,331 |
from os import getenv
from neptune_python_utils.gremlin_utils import GremlinUtils
from neptune_python_utils.endpoints import Endpoints
def get_neptune_iam_connection(neptune_host, neptune_port=8182):
"""Returns a Neptune connection using IAM authentication. It expects valid
AWS credentials and the environment variable ``AWS_REGION``.
Example:
from neptune_helper import get_neptune_iam_connection
from gremlin_python.process.anonymous_traversal import traversal
conn = get_neptune_iam_connection("neptune.example.com", 8182)
g = traversal().withRemote(conn)
"""
region = getenv('AWS_REGION', None)
if region is None:
raise EnvVarNotSetError('AWS_REGION')
endpoints = Endpoints(
neptune_endpoint=neptune_host,
neptune_port=neptune_port,
region_name=region,
)
gremlin_utils = GremlinUtils(endpoints)
return gremlin_utils.remote_connection()
class EnvVarNotSetError(Exception):
"""It is returned when an environment variable was not set."""
| [
6738,
28686,
1330,
651,
24330,
198,
198,
6738,
497,
457,
1726,
62,
29412,
62,
26791,
13,
70,
17244,
62,
26791,
1330,
402,
17244,
18274,
4487,
198,
6738,
497,
457,
1726,
62,
29412,
62,
26791,
13,
437,
13033,
1330,
5268,
13033,
628,
198... | 2.696429 | 392 |
from Greengraph.greengraph import Greengraph
import matplotlib.pyplot as plt
from argparse import ArgumentParser
if __name__ == "__main__":
runIt() | [
6738,
3469,
34960,
13,
14809,
34960,
1330,
3469,
34960,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
22... | 3.304348 | 46 |
from simple_converge.tf_regularizers.L1Regularizer import L1Regularizer
from simple_converge.tf_regularizers.L2Regularizer import L2Regularizer
from simple_converge.tf_regularizers.L1L2Regularizer import L1L2Regularizer
regularizers_collection = {
"l1_regularizer": L1Regularizer,
"l2_regularizer": L2Regularizer,
"l1_l2_regularizer": L1L2Regularizer
}
| [
6738,
2829,
62,
1102,
332,
469,
13,
27110,
62,
16338,
11341,
13,
43,
16,
40164,
7509,
1330,
406,
16,
40164,
7509,
201,
198,
6738,
2829,
62,
1102,
332,
469,
13,
27110,
62,
16338,
11341,
13,
43,
17,
40164,
7509,
1330,
406,
17,
40164,
... | 2.636364 | 143 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
__version__ = '$Id$'
# author: Michał Niklas, michal.niklas@wp.pl
USAGE = """change "TRANSLATION" into "en:English text" for untranslated texts
\tusage:
\t\tlng_prepare.py [file_name]
\t\t\t-coverts selected file and creates .lng2 file
\t\tlng_prepare.py
\t\t\t-coverts pwsafe_*.lng files and creates .lng2 files
"""
# untranslated Polish texts
"""
; START_SHOW "Show in start menu"
LangString START_SHOW ${LANG_POLISH} "TRANSLATION"
; START_SHORTCUT "Install desktop shortcut"
LangString START_SHORTCUT ${LANG_POLISH} "TRANSLATION"
"""
import sys
import glob
import re
RE_ENG_TXT = re.compile(r';\s*(\w+)\s+\"(.*)\"')
RE_TRANSLATION = re.compile(r'LangString\s+(\w+)\s+.* \"(\w+)\"')
if '--version' in sys.argv:
print(__version__)
elif '--help' in sys.argv:
print(USAGE)
else:
ac = 0
for fn in sys.argv[1:]:
if not fn.startswith('--'):
prepare_file(fn)
ac += 1
if ac < 1:
for fn in glob.glob('pwsafe_??.lng'):
prepare_file(fn)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
834,
9641,
834,
796,
705,
3,
7390,
3,
6,
198,
198,
2,
1772,
25,
38844,
41615,
11271,
21921,
11,
285,
488,
282,
13,... | 2.231111 | 450 |
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from .utils import log
POST_URL = "https://api.speedcurve.com/v1/deploys"
def deploy_ping(
api_key: str, site_id: str, note: str, detail: str, dry_run: bool = False
):
"""Based on https://api.speedcurve.com/#add-a-deploy"""
data = {
"site_id": site_id,
}
if note:
data["note"] = note
if detail:
data["detail"] = detail
if dry_run:
log.info(f"Posting {data} to {POST_URL} with API key {api_key[:3] + '...'!r}")
return
adapter = HTTPAdapter(
max_retries=Retry(
backoff_factor=0.3,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["POST"],
)
)
session = requests.Session()
session.mount("https://", adapter)
auth = (api_key, "x")
response = session.post(POST_URL, data=data, auth=auth)
response.raise_for_status()
log.info(response.json())
| [
11748,
7007,
198,
6738,
7007,
13,
324,
12126,
1330,
14626,
47307,
198,
6738,
7007,
13,
43789,
13,
333,
297,
571,
18,
13,
22602,
13,
1186,
563,
1330,
4990,
563,
198,
198,
6738,
764,
26791,
1330,
2604,
198,
198,
32782,
62,
21886,
796,
... | 2.273942 | 449 |
import pytest
import utils
@pytest.mark.db
@pytest.mark.asyncio
async def test_without_connection(database):
"""Test maybe_acquire without an existing connection."""
async with utils.db.maybe_acquire(database, None) as connection:
value = await connection.fetchval("SELECT 1;")
assert value == 1
@pytest.mark.db
@pytest.mark.asyncio
async def test_with_connection(database):
"""Test maybe_acquire with an existing connection."""
async with database.acquire() as existing:
async with utils.db.maybe_acquire(database, existing) as connection:
value = await connection.fetchval("SELECT 1;")
assert value == 1
| [
11748,
12972,
9288,
198,
11748,
3384,
4487,
198,
198,
31,
9078,
9288,
13,
4102,
13,
9945,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
198,
292,
13361,
825,
1332,
62,
19419,
62,
38659,
7,
48806,
2599,
198,
220,
220,
220,
3722... | 2.907895 | 228 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import markdown
from markdown.extensions.meta import MetaExtension
from mdx_wikilink_plus.mdx_wikilink_plus import WikiLinkPlusExtension
unittest.TestLoader.sortTestMethodsUsing = None
meta_text = """
wiki_base_url: /local
wiki_url_whitespace: _
wiki_url_case: lowercase
wiki_label_case: capitalize
wiki_html_class: wiki-lnk
wiki_image_class: wiki-img
""".strip()
text = """
[[wikilink]] `[[wikilink]]`
[[/path/to/file name]]
[[/path/to/file_name]]
[[/path/to/file-name]]
[[/path/to/file name/?a=b&b=c]]
[[/path/to/file name.html]]
[[/path/to/file name.html?a=b&b=c]]
[[https://www.example.com/?]]
[[https://www.example.com/?a=b&b=c]]
[[https://www.example.com/example-tutorial]]
[[https://www.example.com/example-tutorial | Example Tutorial]]
[[wikilink.png]]
[[/path/to/file name.jpg?a=b&b=c]]
[[https://example.jpeg?a=b&b=c]]
[[https://www.example.com/example-tutorial.jpeg]]
[[https://example.com/example-tutorial.gif | Example Tutorial]]
[[example tutorial.jpg | Example-Tutorial| alt= better example |alt=Alternate example]]
""".strip()
md_configs1 = {
'mdx_wikilink_plus': {
'base_url': '/static',
'end_url': '.html',
'url_case': 'lowercase',
'html_class': 'a-custom-class',
},
}
md_configs2 = {
'mdx_wikilink_plus': {
'base_url': '/static',
'end_url': '.html',
'url_whitespace': '-',
'url_case': 'uppercase',
'label_case': 'titlecase',
'image_class': 'wikilink',
'build_url': build_url,
},
}
if __name__ == "__main__":
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
555,
715,
395,
198,
11748,
1317,
2902,
... | 2.038462 | 936 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import matplotlib.patches as mpatches
@image_comparison(baseline_images=['fancyarrow_test_image'])
@image_comparison(baseline_images=['boxarrow_test_image'], extensions=['png'])
def __prepare_fancyarrow_dpi_cor_test():
"""
Convenience function that prepares and returns a FancyArrowPatch. It aims
at being used to test that the size of the arrow head does not depend on
the DPI value of the exported picture.
NB: this function *is not* a test in itself!
"""
fig2 = plt.figure("fancyarrow_dpi_cor_test", figsize=(4, 3), dpi=50)
ax = fig2.add_subplot(111)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.add_patch(mpatches.FancyArrowPatch(posA=(0.3, 0.4), posB=(0.8, 0.6),
lw=3, arrowstyle=u'->',
mutation_scale=100))
return fig2
@image_comparison(baseline_images=['fancyarrow_dpi_cor_100dpi'],
remove_text=True, extensions=['png'],
savefig_kwarg=dict(dpi=100))
def test_fancyarrow_dpi_cor_100dpi():
"""
Check the export of a FancyArrowPatch @ 100 DPI. FancyArrowPatch is
instantiated through a dedicated function because another similar test
checks a similar export but with a different DPI value.
Remark: test only a rasterized format.
"""
__prepare_fancyarrow_dpi_cor_test()
@image_comparison(baseline_images=['fancyarrow_dpi_cor_200dpi'],
remove_text=True, extensions=['png'],
savefig_kwarg=dict(dpi=200))
def test_fancyarrow_dpi_cor_200dpi():
"""
As test_fancyarrow_dpi_cor_100dpi, but exports @ 200 DPI. The relative size
of the arrow head should be the same.
"""
__prepare_fancyarrow_dpi_cor_test()
@image_comparison(baseline_images=['fancyarrow_dash'],
remove_text=True, extensions=['png'],
style='default')
| [
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
28000,
1098,
62,
17201... | 2.279095 | 928 |
import unittest
import StringIO
import argparse
from mock import patch, Mock
from appscale.tools.appscale_stats import (
get_node_stats_rows, get_process_stats_rows, get_summary_process_stats_rows, get_proxy_stats_rows,
sort_process_stats_rows, sort_proxy_stats_rows, show_stats,
INCLUDE_NODE_LIST, _get_stats
)
| [
11748,
555,
715,
395,
198,
11748,
10903,
9399,
198,
11748,
1822,
29572,
198,
198,
6738,
15290,
1330,
8529,
11,
44123,
198,
198,
6738,
598,
9888,
13,
31391,
13,
1324,
9888,
62,
34242,
1330,
357,
198,
220,
651,
62,
17440,
62,
34242,
62,... | 2.891892 | 111 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import rospy
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import Pose, Point, Quaternion, Vector3
from visualization_msgs.msg import Marker, MarkerArray
from ros947d_vmarker import create_marker
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
686,
2777,
88,
198,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
48900,
11,
5315,
48192,
4339,
198,
... | 2.754545 | 110 |
# -*- coding: utf-8 -*-
import discord
import random
import math
from discord.ext import commands
"""
This extension implements basic commands based upon the use of randomly
generated numbers or choices, just to add some interactivity to the bot.
"""
class RNG:
"""
8ball is a very simple eight ball command, answering a yes or no
question with a randomly selected choice, having the possibility
of 2 uncertain answers, 4 affirmative answers and 4 negative answers.
"""
@commands.command(name="8ball",
description="Answers all your yes or no questions.",
pass_context=True)
"""
roll simulates the roll of a dice, although being able to take any
amount of sides. It allows for you to roll multiple dice and add
a value to the final result.
"""
@commands.command(name="roll",
description="Rolls a dice of your choice. Use -d to " +
"see all rolls.",
aliases=['dice', 'r'])
"""
choose has the bot choose randomly from a set of text options the user
provides, separated by commas.
"""
@commands.command(name="choose",
description="Chooses between a set of options. " +
"Separate them by a comma.",
aliases=["choice"],
pass_context=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
36446,
198,
11748,
4738,
198,
11748,
10688,
198,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
37811,
198,
1212,
7552,
23986,
4096,
9729,
1912,
2402,
262,
... | 2.504363 | 573 |
# -*- coding: utf-8 -*-
# Used by setup.py, so minimize top-level imports.
VERSION = (2, 2, 0)
__version__ = ".".join(str(i) for i in VERSION)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16718,
416,
9058,
13,
9078,
11,
523,
17775,
1353,
12,
5715,
17944,
13,
198,
198,
43717,
796,
357,
17,
11,
362,
11,
657,
8,
198,
198,
834,
9641,
834,
796,
366,
5... | 2.377049 | 61 |
import requests
import json
requests.Response.text = Response.text
requests.Response.json = Response.json
requests.Response.getData = Response.getData | [
11748,
7007,
198,
11748,
33918,
628,
198,
8897,
3558,
13,
31077,
13,
5239,
796,
18261,
13,
5239,
198,
8897,
3558,
13,
31077,
13,
17752,
796,
18261,
13,
17752,
198,
8897,
3558,
13,
31077,
13,
1136,
6601,
796,
18261,
13,
1136,
6601
] | 3.707317 | 41 |
# 2 sum intends to see if 2 numbers in an array can sum up to a target number
# Constraints. no element is repeated
# brute force
# Algorithm
# Start from beginning of array
# add numbers from beginning + 1 till the end looking for match(sum)
# if match is found return current start and index of match
# if match is not found do for next index
# repeat
arr = [3,4,2]
target = 6
print(find_target1(arr, target))
# # Time complexity n2(quadratic)
# # Space complexity n
# Can this algorithm be optimized? yes
# Algorithm
# Begin from array start position
# subtract value from target
# check if result is in array
# return beginning index, result index
# if not found, repeat starting at next array position
print(find_target2(arr, target))
| [
198,
2,
362,
2160,
19582,
284,
766,
611,
362,
3146,
287,
281,
7177,
460,
2160,
510,
284,
257,
2496,
1271,
198,
2,
1482,
2536,
6003,
13,
645,
5002,
318,
5100,
220,
198,
198,
2,
33908,
2700,
198,
198,
2,
978,
42289,
220,
198,
2,
7... | 3.472727 | 220 |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 23:03:28 2016
@author: yxl
"""
from sciapp.action import Simple
import scipy.ndimage as ndimg
import numpy as np | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
4280,
220,
767,
2242,
25,
3070,
25,
2078,
1584,
198,
31,
9800,
25,
331,
87,
75,
198,
37811,
198,
198,
6738,
629,
544,
381,
13,
2673,
13... | 2.507692 | 65 |
# Create your views here.
from django.shortcuts import render
from django.http import HttpResponse
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy as __
# 언어 코드를 변경하는 뷰를 만들어 보기
# 1) url named group을 통해 language code 받기
from django.conf import settings
# 2) 쿼리 스트링으로 language code 받기
# 3) 언어별 설정 변경 뷰를 별도로 만들기
| [
2,
13610,
534,
5009,
994,
13,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
26791,
1330,
11059,
198,
6738,
42625,
14208,
13,
26791,
13,
41... | 1.761702 | 235 |
from __future__ import division
import numpy as np
import scipy
import pylab
from pylab import *
import random
import itertools
import cv
from Struct import Struct
import utils as ut
import pairdict as pd
import pandas as pa
import ppi_utils as pu
#COLORS = ['#4571A8', 'black', '#A8423F', '#89A64E', '#6E548D', '#3D96AE',
#'#DB843D', '#91C4D5', '#CE8E8D', '#B6CA93', '#8EA5CB', 'yellow',
#'gray', 'blue']
#COLORS_BLACK = ['#4571A8', 'white', '#A8423F', '#89A64E', '#6E548D', '#3D96AE',
#'#DB843D', '#91C4D5', '#CE8E8D', '#B6CA93', '#8EA5CB', 'yellow',
#'gray', 'blue']
COLORSTRING = "4571A8, 000000, A8423F, 89A64E, 6E548D, 3D96AE, DB843D, 91C4D5, CE8E8D, B6CA93, 8EA5CB, FFFF00, 404040, 0000FF"
COLORS_WHITE = ["#"+c for c in COLORSTRING.split(', ')]
COLORSTRING_BLACK = "4571A8, FFFFFF, A8423F, 89A64E, 6E548D, 3D96AE, DB843D, 91C4D5, CE8E8D, B6CA93, 8EA5CB, FFFF00, 404040, 0000FF"
COLORS_BLACK = ["#"+c for c in COLORSTRING_BLACK.split(', ')]
COLORS = COLORS_WHITE
def stacked_bar(names, values):
"""
values is a lol. values[0] corresponds to the values for names[0].
"""
valuesT = zip(*values)
padded = [[0]*len(valuesT[0])] + valuesT # 0s in first row is helpful
arr = np.array(padded)
arrcum = np.cumsum(arr, axis=0)
for i in range(1, arr.shape[1]):
bar(range(1,arr.shape[0]+1), arr[i], align='center', bottom=arrcum[i-1],
color=COLORS[i-1])
ax = gca()
ax.set_xticklabels(['']+names)
df = pa.DataFrame(arr[1:][::-1], columns=names)
print df
return df
# def cluster(corr):
# # corr: a matrix of similarity scores, such as a covariance matrix
# ymat = hcluster.pdist(corr)
# zmat = hcluster.linkage(ymat)
# figure()
# order = hcluster.dendrogram(zmat)['leaves']
# figure()
# imshow(corr[order,:][:,order])
# # check for failure signs
# for i in random.sample(range(len(order)),10):
# if order[i] - order[i-1] == 1:
# print 'HEY!! probable clustering failure.'
# break
# return order
def pr_plot(cv_pairs, total_trues, rescale=None, style=None, prec_test=None,
true_ints=None, return_data=False, do_plot=True, **kwargs):
"""
rescale: adjust precision values assuming rescale times as many negatives
total_trues:
- None for just displaying recall count instead of fraction
- 'auto' to calculate from the supplied tested cv_pairs
- integer to use that supplied integer as total trues
"""
if true_ints:
pdtrues = pd.PairDict(true_ints)
cv_pairs = [(p[0],p[1],p[2],1 if pdtrues.contains(tuple(p[:2])) else 0) for p
in cv_pairs]
if total_trues == 'auto':
total_trues = len([t for t in cv_pairs if t[3]==1])
recall,precision = cv.pr(cv_pairs)
if rescale:
precision = [ p / (p + (1-p) * rescale) for p in precision]
if prec_test:
kwargs['label'] = kwargs.get('label','') + (' Re:%0.2f' %
cv.calc_recall(precision,prec_test, total_trues)) + (' @ Pr:%0.2f'
% prec_test)
if total_trues:
recall = [r/total_trues for r in recall]
args = [style] if style is not None else []
if do_plot:
plot(recall, precision, *args, **kwargs)
xlabel('Recall: TP/(TP+FN)')
ylabel('Precision: TP/(TP+FP)')
ylim(-0.02,1.02)
xlim(xmin=-0.002)
legend()
if return_data:
return recall,precision
def ppis_scatter(ppis1, ppis2, useinds=range(3)):
"""
useinds: set to [0,1,3,2] to take ppi.learning_examples output into (score,
t/f) tuples; [0,1,3] to exclude the class.
"""
pd1,pd2 = [pd.PairDict([[p[i] for i in useinds] for p in ppis])
for ppis in ppis1,ppis2]
nvals = len(useinds)-2
pdcomb = pd.pd_union_disjoint_vals(pd1, pd2, adefaults=[0]*nvals,
bdefaults=[0]*nvals)
vals = zip(*ut.i1(pdcomb.d.items()))
v1s,v2s = zip(*vals[:nvals]), zip(*vals[nvals:])
v1s,v2s = [ut.i0(x) for x in v1s,v2s]
return v1s,v2s
def scatter_union_labeled(avals, alabels, bvals, blabels):
"""
vals are the columns of data to scatter (eg, el.mat[:,0]).
labels are el.prots.
"""
dfs = [pa.DataFrame(data=vals,index=labels) for vals,labels in
[(avals,alabels),(bvals,blabels)]]
dfout = dfs[0].join(dfs[1], how='outer', rsuffix='_b')
dfout = dfout.fillna(0)
return dfout.values[:,0],dfout.values[:,1]
def multi_scatter(comps,scatter_func=scatter_blake, preprocess=None,
names=None, **kwargs):
"""
Takes care of making subplots and labeling axes when comparing more than
two sets of values.
"""
total = len(comps)
for i in range(total):
for j in range(i+1,total):
n = (total-1)*i+j
print i,j,n
subplot(total-1, total-1, n)
ys,xs = comps[i],comps[j]
# this syntax is mis-interpreted, and both new values go into xs
#xs,ys = preprocess(xs,ys) if preprocess else xs,ys
if preprocess:
xs,ys = preprocess(xs,ys)
scatter_func(xs,ys, **kwargs)
if names and j==i+1:
ylabel(names[i])
xlabel(names[j])
def hist_pairs_nonpairs(scores, pairs, negmult=1, do_plot=True, **kwargs):
"""
scores: list of tuples (id1, id2, score)
pairs: list of tuples (id1, id2)
Make a histogram for scores of pairs against random sampling of non-pairs
from the set of ids making up pairs.
"""
assert len(pairs[0])==2, "Too many data points"
nonpairs = pu.nonpairs_gen(pairs, len(pairs)*negmult)
pscores, nscores = [[x for x in scorelist_pairs(l, scores)] for l in pairs, nonpairs]
if do_plot:
hist(pscores, **kwargs)
hist(nscores, **kwargs)
return pscores, nscores
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
198,
11748,
279,
2645,
397,
198,
6738,
279,
2645,
397,
1330,
1635,
198,
11748,
4738,
198,
11748,
340,
861,
10141,
198,
11748,
269,
85,
... | 2.058782 | 2,858 |