content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# # Copyright (c) 2020, Andrey "Limych" Khrolenok <andrey@khrolenok.ru> # Creative Commons BY-NC-SA 4.0 International Public License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # """ The Snowtire binary sensor. For more details about this platform, please refer to the documentation at https://github.com/Limych/ha-snowtire/ """
[ 2, 198, 2, 220, 15069, 357, 66, 8, 12131, 11, 843, 4364, 366, 19352, 88, 354, 1, 5311, 3225, 268, 482, 1279, 392, 4364, 31, 14636, 3225, 268, 482, 13, 622, 29, 198, 2, 220, 17404, 13815, 11050, 12, 7792, 12, 4090, 604, 13, 15, ...
2.828125
128
from __future__ import (division) from pomegranate import * from pomegranate.io import DataGenerator from pomegranate.io import DataFrameGenerator from nose.tools import with_setup from nose.tools import assert_almost_equal from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_less_equal from nose.tools import assert_raises from nose.tools import assert_true from numpy.testing import assert_array_almost_equal import pandas import random import pickle import numpy as np nan = numpy.nan def test_io_fit(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) mu1 = numpy.array([0, 0, 0, 0, 0]) mu2 = numpy.array([1, 1, 1, 1, 1]) cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2)
[ 6738, 11593, 37443, 834, 1330, 357, 21426, 8, 198, 198, 6738, 279, 462, 46324, 378, 1330, 1635, 198, 6738, 279, 462, 46324, 378, 13, 952, 1330, 6060, 8645, 1352, 198, 6738, 279, 462, 46324, 378, 13, 952, 1330, 6060, 19778, 8645, 1352,...
2.562865
684
#! /usr/bin/python from .solution import Solution try: import gurobipy except ImportError: print("Gurobi not found: error ignored to allow tests") def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking): output = __build_callback__(scoring) else: output = None return output def __build_callback__(scoring): return callback
[ 2, 0, 1220, 14629, 14, 8800, 14, 29412, 198, 198, 6738, 764, 82, 2122, 1330, 28186, 198, 198, 28311, 25, 198, 220, 220, 220, 1330, 915, 22609, 541, 88, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 3601, 7203, 38, 1434, 8482, ...
2.881944
144
import sys from fetchcode.vcs.pip._internal.cli.main import main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Optional, List def _wrapper(args=None): # type: (Optional[List[str]]) -> int """Central wrapper for all old entrypoints. Historically pip has had several entrypoints defined. Because of issues arising from PATH, sys.path, multiple Pythons, their interactions, and most of them having a pip installed, users suffer every time an entrypoint gets moved. To alleviate this pain, and provide a mechanism for warning users and directing them to an appropriate place for help, we now define all of our old entrypoints as wrappers for the current one. """ sys.stderr.write( "WARNING: pip is being invoked by an old script wrapper. This will " "fail in a future version of pip.\n" "Please see https://github.com/pypa/pip/issues/5599 for advice on " "fixing the underlying issue.\n" "To avoid this problem you can invoke Python with '-m pip' instead of " "running pip directly.\n" ) return main(args)
[ 11748, 25064, 198, 198, 6738, 21207, 8189, 13, 85, 6359, 13, 79, 541, 13557, 32538, 13, 44506, 13, 12417, 1330, 1388, 198, 6738, 21207, 8189, 13, 85, 6359, 13, 79, 541, 13557, 32538, 13, 26791, 13, 774, 13886, 1330, 17615, 47, 56, 6...
3.010204
392
''' Support for generating documentation readmes for the extensions. Extracts from decorated lua block comments and xml comments. ''' from pathlib import Path from lxml import etree import sys from itertools import chain project_dir = Path(__file__).resolve().parents[1] # Set up an import from the customizer for some text processing. x4_customizer_dir = str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation import Get_BB_Text # Grab the project specifications. from Release_Specs import release_specs def Sections_To_Lines(doc_text_sections): ''' Converts a dict of {section label: text} to a list of text lines, with labelling and formatting applied. Expects the input to start with a 'title', then 'overview', then a series of names of cues or functions. ''' # Transfer to annotated/indented lines. functions_started = False title = '' ret_text_lines = [] for key, text in doc_text_sections: # Extract the title and continue; this isn't printed directly. if key == 'title': title = text.strip() continue # Header gets an 'overview' label. if key == 'overview': ret_text_lines += ['', '### {} Overview'.format(title), ''] indent = '' # Lua functions are in one lump, like overview. elif key == 'functions': ret_text_lines += ['', '### {} Functions'.format(title), ''] indent = '' # Sections may be multiple. elif key == 'section': ret_text_lines += ['',''] indent = '' # Otherwise these are md cues. else: indent = ' ' # Stick a label line when starting the function section. if not functions_started: functions_started = True ret_text_lines += ['', '### {} Cues'.format(title), ''] # Bullet the function name. ret_text_lines.append('* **{}**'.format(key)) # Process the text a bit. text = Merge_Lines(text) # Add indents to functions, and break into convenient lines. text_lines = [indent + line for line in text.splitlines()] # Record for output. ret_text_lines += text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns a list of lines holding the documentation extracted from a decorated MD xml file. ''' # List of tuples of (label, text) hold the extracted text lines. doc_text_sections = [] # Read the xml and pick out the cues. tree = etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] # Stride through comments/cues in the list. # Looking for decorated comments. for node in chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments. # Kinda awkward how lxml checks this (isinstance doesn't work). if node.tag is not etree.Comment: continue # Handle title declarations. if '@doc-title' in node.text: label = 'title' text = node.text.replace('@doc-title','') elif '@doc-overview' in node.text: label = 'overview' text = node.text.replace('@doc-overview','') elif '@doc-section' in node.text: label = 'section' text = node.text.replace('@doc-section','') elif '@doc-cue' in node.text: label = node.getnext().get('name') text = node.text.replace('@doc-cue','') else: # Unwanted comment; skip. continue # Record it. doc_text_sections.append((label, text)) # Process into lines and return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract documentation text from a decorated lua file. ''' text = lua_path.read_text() ret_text_lines = [] # Extract non-indented comments. # TODO: maybe regex this. comment_blocks = [] lua_lines = text.splitlines() i = 0 while i < len(lua_lines): this_line = lua_lines[i] if this_line.startswith('--[['): # Scan until the closing ]]. these_lines = [] # Record the first line. these_lines.append(this_line.replace('--[[','')) i += 1 # Only search to the end of the doc. while i < len(lua_lines): next_line = lua_lines[i] if next_line.startswith(']]'): # Found the last line; skip it. break these_lines.append(next_line) i += 1 comment_blocks.append('\n'.join(these_lines)) # Check single-line comments after block comments, to avoid # -- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one increment per loop. i += 1 # Title to put on label lines. # Starts blank, filled by decorator. title = '' # List of tuples of (label, text) hold the extracted text lines. doc_text_sections = [] # Go through the comments looking for decorators. for comment in comment_blocks: # Handle title declarations. if '@doc-title' in comment: label = 'title' text = comment.replace('@doc-title','') # Text blocks are either overview or cue. elif '@doc-overview' in comment: label = 'overview' text = comment.replace('@doc-overview','') # For now, all functions are lumped together in one comment. elif '@doc-functions' in comment: label = 'functions' text = comment.replace('@doc-functions','') else: # Unwanted comment; skip. continue # Record it. doc_text_sections.append((label, text)) # Process into lines and return. return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting main docs on the forum. #def Make_BB_Code(doc_dir, header_lines = []): # ''' # Turn the ext_dir's readme into a bbcode txt file. # Output is placed in the release folder. # ''' # release_dir = project_dir / 'Release' # if not release_dir.exists(): # release_dir.mkdir() # # # Grab the readme contents. # doc_lines = (doc_dir / 'Readme.md').read_text().splitlines() # # Generate a bbcode version, prefixing with custom header. # bb_lines = header_lines + Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\n'.join(bb_lines)) # return if __name__ == '__main__': Make()
[ 7061, 6, 198, 15514, 329, 15453, 10314, 1100, 6880, 329, 262, 18366, 13, 198, 11627, 974, 82, 422, 24789, 300, 6413, 2512, 3651, 290, 35555, 3651, 13, 198, 7061, 6, 198, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 300, 19875, 1330,...
2.310561
3,030
#Addition of two numbers a = 30 b = 17 print("Sum of a and b is",a + b)
[ 2, 4550, 653, 286, 734, 3146, 198, 64, 796, 1542, 198, 65, 796, 1596, 198, 4798, 7203, 13065, 286, 257, 290, 275, 318, 1600, 64, 1343, 275, 8 ]
2.535714
28
import sys from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit) from PyQt5 import QtGui if __name__ == '__main__': app = QApplication(sys.argv) janela = Janela() janela.carregar_janela() sys.exit(app.exec_())
[ 11748, 25064, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 357, 48, 23416, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
1.622568
257
import base64 import os import sys import PyPDF2 svg = '''<svg id="write-document" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> <rect id="write-doc-background" width="100%" height="100%" fill="#808080"/> <defs id="write-defs"> <script type="text/writeconfig"> <int name="docFormatVersion" value="2" /> <int name="pageColor" value="-1" /> <int name="pageNum" value="0" /> <int name="ruleColor" value="0" /> <float name="marginLeft" value="0" /> <float name="xOffset" value="-380.701752" /> <float name="xRuling" value="0" /> <float name="yOffset" value="1536.84216" /> <float name="yRuling" value="0" /> </script> </defs> ''' pdf_path = sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, "rb") img_width = 720 n_pages = pdf.getNumPages() + 1 page = pdf.getPage(0) width = page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio = height/width img_height = int(aspect_ratio * img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height = 0 for page in range(n_pages): print(f"Processing {page}/{n_pages}", end='\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class="write-page" color-interpolation="linearRGB" x="10" y="{new_page_height+10}" width="{img_width}px" height="{img_height}px" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> <g class="write-content write-v3" width="{img_width}" height="{img_height}" xruling="0" yruling="0" marginLeft="0" papercolor="#FFFFFF" rulecolor="#00000000"> <g class="ruleline write-std-ruling write-scale-down" fill="none" stroke="none" stroke-width="1" shape-rendering="crispEdges" vector-effect="non-scaling-stroke"> <rect class="pagerect" fill="#FFFFFF" stroke="none" x="0" y="0" width="{img_width}" height="{img_height}" /> </g> <image x="0" y="0" width="{img_width}" height="{img_height}" xlink:href="data:image/png;base64,{base64_data}"/> </g> </svg>''' new_page_height += (img_height+10) svg += tmp_svg svg += '''</svg>''' os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg', 'w') as f: f.write(svg) os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg')
[ 11748, 2779, 2414, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 9485, 20456, 17, 198, 198, 21370, 70, 796, 705, 7061, 27, 21370, 70, 4686, 2625, 13564, 12, 22897, 1, 35555, 5907, 2625, 4023, 1378, 2503, 13, 86, 18, 13, 2398, 14, ...
2.390671
1,029
from typing import Dict, List, cast from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType
[ 6738, 19720, 1330, 360, 713, 11, 7343, 11, 3350, 198, 198, 6738, 12972, 62, 2256, 1203, 62, 67, 707, 13, 16302, 13, 17143, 2357, 1330, 25139, 2357, 11, 25139, 2357, 11395, 6030, 11, 25139, 2357, 17257, 6030, 628 ]
3.526316
38
## # This class encapsulates a Region Of Interest, which may be either horizontal # (pixels) or vertical (rows/lines).
[ 2235, 198, 2, 770, 1398, 32652, 15968, 257, 17718, 3226, 12033, 11, 543, 743, 307, 2035, 16021, 220, 198, 2, 357, 79, 14810, 8, 393, 11723, 357, 8516, 14, 6615, 737, 198 ]
3.75
32
#!/usr/bin/python # Author: Zion Orent <zorent@ics.com> # Copyright (c) 2015 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Load i2clcd display module import time, signal, sys import pyupm_i2clcd as upmLCD myLCD = upmLCD.SSD1327(0, 0x3C); logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E, 0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF, 0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66, 0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3, 0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86, 0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83, 0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66, 0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7, 0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6, 0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3, 0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E, 0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3, 0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00, 0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40, 0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70, 0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C, 0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48, 0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40, 0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C, 0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48, 0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40, 0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If you don't set the display to be white, the seeed logo will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96 / 8); for i in range(12): myLCD.setCursor(i, 0) myLCD.setGrayLevel(i) myLCD.write('Hello World') print "Exiting"
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 6434, 25, 16899, 440, 1156, 1279, 89, 382, 429, 31, 873, 13, 785, 29, 198, 2, 15069, 357, 66, 8, 1853, 8180, 10501, 13, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, ...
1.565265
5,470
from .bernsen import bernsen_thresholding_method from .bradley_roth import bradley_thresholding_method from .contrast import contrast_thresholding_method from .feng import feng_thresholding_method from .gaussian import threshold_value_gaussian from .johannsen import johannsen_thresholding_method from .kapur import kapur_thresholding_method from .mean import threshold_value_mean from .minimum_error import minimum_err_thresholding_method from .niblack import niblack_thresholding_method from .nick import nick_thresholding_method from .otsu import otsu_thresholding_method from .p_tile import p_tile_thresholding_method from .pun import pun_thresholding_method from .rosin import rosin_thresholding_method from .sauvola import sauvola_thresholding_method from .singh import singh_thresholding_method from .two_peaks import two_peaks_thresholding_method from .wolf import wolf_thresholding_method
[ 6738, 764, 527, 5907, 268, 1330, 275, 1142, 6248, 62, 400, 10126, 278, 62, 24396, 198, 6738, 764, 1671, 324, 1636, 62, 33640, 1330, 865, 324, 1636, 62, 400, 10126, 278, 62, 24396, 198, 6738, 764, 3642, 5685, 1330, 6273, 62, 400, 101...
3.218638
279
import os from kombu import Queue, Exchange ## Broker settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = "amqp://guest:guest@localhost:5672/" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = "localhost" #BROKER_PORT = 27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the database to store task state and results. #CELERY_RESULT_BACKEND = "mongodb" #CELERY_MONGODB_BACKEND_SETTINGS = { # "host": "localhost", # "port": 27017, # "database": "celery", # "taskmeta_collection": "celery_taskmeta", #}
[ 11748, 28686, 198, 6738, 479, 2381, 84, 1330, 4670, 518, 11, 12516, 198, 198, 2235, 2806, 6122, 6460, 13, 198, 11473, 11380, 1137, 62, 21886, 796, 28686, 13, 1136, 24330, 10786, 11473, 11380, 1137, 62, 21886, 3256, 705, 321, 80, 79, 1...
2.334043
470
# -*- coding: utf-8 -*- # # from __future__ import print_function import csv import os import re import sys import arrow from gsheets import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', "0") == "1" AS_CSV = os.environ.get('CSV', "0") == "1" COL_DATE = 0 COL_WEEKDAY = 1 COL_TIME_START = 2 COL_TIME_END = 3 COL_LUNCH = 4 COL_TIME = 5 # includes lunch COL_TIME_FIXED = 6 # does not include lunch COL_MOVE = 7 COL_WORK_FROM_HOME = 8 COL_NOTES = 9 COL_TASKS_START = 10 SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"] SATURDAY = 5 SUNDAY = 6 if __name__ == "__main__": main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 269, 21370, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 25064, 198, 198, 11748, 15452...
2.284746
295
import numpy as np import pickle import treys import constants FULL_DECK = np.array(treys.Deck.GetFullDeck())
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2298, 293, 198, 11748, 2054, 893, 198, 198, 11748, 38491, 198, 198, 37, 9994, 62, 35, 25171, 796, 45941, 13, 18747, 7, 33945, 893, 13, 5005, 694, 13, 3855, 13295, 5005, 694, 28955, 628 ]
2.756098
41
# -*- coding: utf-8 -*- from cms.models import Page, Title, CMSPlugin, Placeholder from cms.utils import get_language_from_request from django.http import Http404 from django.shortcuts import get_object_or_404
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 269, 907, 13, 27530, 1330, 7873, 11, 11851, 11, 40773, 37233, 11, 8474, 13829, 198, 6738, 269, 907, 13, 26791, 1330, 651, 62, 16129, 62, 6738, 62, 25927, 198, 67...
3.043478
69
import os from PIL import Image, ImageFilter import matplotlib.pyplot as plt import matplotlib.image as mpimg # import seaborn as sns import pandas as pd import numpy as np import random train_path = './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' if __name__ == '__main__': genImage(train_path, 'train') genImage(valid_path, 'valid')
[ 11748, 28686, 220, 198, 6738, 350, 4146, 1330, 7412, 11, 7412, 22417, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2603, 29487, 8019, 13, 9060, 355, 29034, 9600, 198, 2, 1330, 384, 397, 1211, 355, 3013, 82...
2.804348
138
# - - - - - - - - - - - # @author like # @since 2021-02-23 11:08 # @email 980650920@qq.com # from matplotlib import pyplot as plt from matplotlib import rc from matplotlib import font_manager import random x = range(0, 120) y = [random.randint(20, 35) for i in range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y) # chFont = font_manager.FontProperties(family="SimHei") # SimHei # chFont = font_manager.FontProperties(fname="C:/Windows/Fonts/SIMHEI.TTF") # step = 10 xLabels = ["10,{}".format(i) for i in range(60)] xLabels += ["11,{}".format(i) for i in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) # plt.xlabel("", fontProperties=chFont) plt.ylabel(" ()", fontProperties=chFont) plt.title("1012", fontProperties=chFont) plt.show()
[ 2, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 220, 198, 2, 2488, 9800, 588, 198, 2, 2488, 20777, 33448, 12, 2999, 12, 1954, 1367, 25, 2919, 198, 2, 2488, 12888, 32614, 17544, 37128, 31, 38227, 13, 785, 198, 2, 220, 1...
2.395833
336
import os, sys, cdms2, vcs, vcs.testing.regression as regression dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc")) data = dataset("clt") canvas = regression.init() isoline = canvas.createisoline() isoline.label="y" texts=[] colors = [] for i in range(10): text = canvas.createtext() text.color = 50 + 12 * i text.height = 12 colors.append(100 + 12 * i) if i%2 == 0: texts.append(text.name) else: texts.append(text) isoline.text = texts # First test using isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage = "%s%s"%baseline ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage) # Now set isoline.linecolors and test again. canvas.clear() isoline.linecolors = colors canvas.plot(data, isoline, bg=1) baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1]) testImage = os.path.abspath("test_vcs_isoline_labels2.png") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) # Now set isoline.textcolors and test again. canvas.clear() isoline.textcolors = colors canvas.plot(data, isoline, bg=1) baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1]) testImage = os.path.abspath("test_vcs_isoline_labels3.png") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) sys.exit(ret)
[ 11748, 28686, 11, 25064, 11, 22927, 907, 17, 11, 410, 6359, 11, 410, 6359, 13, 33407, 13, 2301, 2234, 355, 20683, 198, 198, 19608, 292, 316, 796, 22927, 907, 17, 13, 9654, 7, 418, 13, 6978, 13, 22179, 7, 85, 6359, 13, 39873, 62, ...
2.49262
542
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Mar 5 05:47:03 2018 @author: zg """ import numpy as np #from scipy import io import scipy.io #import pickle from sklearn.model_selection import StratifiedKFold #import sklearn from scipy.sparse import spdiags from scipy.spatial import distance #import matplotlib.pyplot as plt from sklearn.ensemble import BaggingClassifier from sklearn import svm #from sklearn import metrics from sklearn.metrics import roc_auc_score from sklearn import tree import copy import numpy.matlib from sklearn.exceptions import NotFittedError #import FuzzyRwrBagging as frb #from joblib import Parallel, delayed #import multiprocessing def RWR(A, nSteps, laziness, p0 = None): ''' % the random walk algorithm. % A is the input net matrix, with the diag to be 0. % nSteps: how many steps to walk % laziness: the probablity to go back. % p0: the initial probability. usually it is a zero matrix with the diag to % be 1. % % for example, A could be: % A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % % if nSteps is 1000 and laziness is 0.3, p0 is default, the result is: % [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;... % 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;... % 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;... % 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;... % 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425] % % Each column represents the propability for each node. each element in the % column means the probability to go to that node. % This algorithm will converge. For example, for the above matrix, nSteps = % 100, 1000 or 10000, will give the same result. ''' n = len(A) if p0 == None: p0 = np.eye(n) ''' % In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be % 0.2500 0 0 0 0 0 0 % 0 0.2500 0 0 0 0 0 % 0 0 0.2500 0 0 0 0 % 0 0 0 0.3333 0 0 0 % 0 0 0 0 1.0000 0 0 % 0 0 0 0 0 0.5000 0 % 0 0 0 0 0 0 0.5000 % W will be: % 0 0.5000 0.5000 0 0 0 0 % 0.5000 0 0.2500 0.3333 0 0 0 % 0.5000 0.2500 0 0 1.0000 0 0 % 0 0.2500 0 0 0 0.5000 0.5000 % 0 0 0.2500 0 0 0 0 % 0 0 0 0.3333 0 0 0.5000 % 0 0 0 0.3333 0 0.5000 0 ''' #W = A * spdiags(sum(A)'.^(-1), 0, n, n); #W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \ 0, n, n).toarray() ) p = p0 pl2norm = np.inf unchanged = 0 for i in range(1, nSteps+1): if i % 100 == 0: print(' done rwr ' + str(i-1) ) pnew = (1-laziness) * W.dot(p) + laziness * p0 l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) ) p = pnew if l2norm < np.finfo(float).eps: break else: if l2norm == pl2norm: unchanged = unchanged +1 if unchanged > 10: break else: unchanged = 0 pl2norm = l2norm return p # test RWR() ''' A = np.array([[0,2,2,0,0,0,0],\ [2,0,1,1,0,0,0],\ [2,1,0,0,1,0,0],\ [0,1,0,0,0,1,1],\ [0,0,1,0,0,0,0],\ [0,0,0,1,0,0,1],\ [0,0,0,1,0,1,0]]) nSteps = 1000 lazi = 0.3 RWR(A, nSteps, lazi, None) ''' # test #dst = distance.euclidean(A) # corrent, the same as in Matlab def f_sim_2_aRankNet(sim, k=3): ''' % Convert the similarity matrix to a network graph where each node % has k edges to other nodes (aRank). ''' # delete the diagnal values. # sim = sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) # [~, I] = sort(sim-diag(diag(sim) ) ); I = np.argsort(sim, kind='mergesort') + 1 # [~, I2] = sort(I); I2 = (np.argsort(I, kind='mergesort').T + 1).T # for every column, just keep the top k edges. #aRankNet = (I2 >length(sim)-k); aRankNet = I2 > (len(sim) - k) # make it a diagonal matrix # aRankNet = max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T) # remove the diagonal 1s. # aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return aRankNet # test #sim = np.array([[0, 0.5566, 0.6448, 0.3289], \ # [0.5566, 0, -0.0842, -0.0170], \ # [0.6448, -0.0842, 0, 0.8405], \ # [0.3289, -0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True, True, False], # [ True, False, False, False], # [ True, False, False, True], # [False, False, True, False]]) # #array([[False, True, True, True], # [ True, False, False, False], # [ True, False, False, True], # [ True, False, True, False]]) # #array([[False, True, True, True], # [ True, False, False, True], # [ True, False, False, True], # [ True, True, True, False]]) def f_find_centers_rwMat(rw_mat, k): ''' % on the rw_mat matrix, find some nodes as the centroids for soft % clustering. If we just random pickup some nodes as centroids, that is % not good for fuzzy clusters. % k is the number of centroids. ''' ixs = [] # 1. find the most connected center node as the first centroid. a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col # % most connected node. ix = np.argmax(a) ixs.append(ix) # % 2. iteratively find the rest nodes for i in range(1, k): tmp = rw_mat[:, ixs] b = np.sum(tmp, axis=1) b[ixs] = np.inf # % find the farthest node ix = np.argmin(b) ixs.append(ix) return ixs # test #tmp = f_find_centers_rwMat(rw_mat, 10) #test #>>> a = np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1, 2, 3, 4]) #>>> a.flatten('F') #array([1, 3, 2, 4]) ''' a = np.array( range(0,100) ) b = np.matlib.repmat(a, 100, 1) ct = getCutoff(b, 70) ''' def f_eu_dist(X): ''' calculate the euclidean distance between instances ''' sim = np.zeros(( len(X), len(X) )) for i in range(0, len(X)): for j in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] = tmp sim = -sim np.fill_diagonal(sim, 0) return sim #test #sim = f_eu_dist(X) def f_eu_dist2(X1, X2): ''' calculate the euclidean distance between instances from two datasets ''' sim = np.zeros(( len(X1), len(X2) )) for i in range(0, len(X1) ): for j in range(0, len(X2) ): tmp = distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp sim = -sim return sim #test #sim = f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X: data # k: number of clusters ''' The return variable clus stores the instance indices for each cluster. However, this data structure is not easy to find for a instance, which are the clusters it belongs to, thus we also need to convert clus to a true-false matrix. ''' if each_clus_sz == None: # on average, how many clusters does one inst belongs to. #overlap_factor = 2; # the estimated size of each cluster. default is half the number of # instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...') print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) ) # sim = squareform(pdist(X)); # sim = -sim; sim = np.zeros((len(X), len(X) ) ) for i in range(0, len(X)): for j in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] = tmp sim = -sim print(' done calculating the Euclidean distance matrix') # --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating the A-rank KNN graph') # % -------- RWR -------- nSteps = 1000 lazi = 0.3 rw = RWR(ori_graph, nSteps, lazi) # remove probability of returning start node np.fill_diagonal(rw, 0) rw_mat = rw print(' done RWR') # --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat > ct # % set the diagnal to 1 np.fill_diagonal(rw_net, True) clus = [] for i in range(0, k): tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp) # --------------------------------------------------------------- # % sort the clusters lens = f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered = [clus[i] for i in ix] print(' center inst. index of each cluster: ') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print(' size of each cluster: ') print(lens[ix]) print(' done RWR clustering') return clus_ordered #test #clus = f_fuzzy_rwr_clusters(X, 100) # pass # test #tfs = f_clus_to_tfs(clus, len(X)) # pass def f_tfs_2_instClus(tfs): ''' convert the boolean table representation of clustering result to for each instance, what clusters it belongs to. ''' inst_clus = [] for i in range(0, len(tfs)): row = list( np.where(tfs[i, :] ) [0] ) inst_clus.append(row) return inst_clus # test #inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \ # bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \ # random_state=None, n_estimators = 100 ) # bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] # test ''' X_tr = X y_tr = y X_te = X y_te = y [y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) ''' #def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging): # ''' # corresponds to f_weka_bg_svm_tr_te() in Matlab version # ''' # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \ # bagging = BaggingClassifier(BaseBagging, \ # random_state=None, n_estimators = 100 ) # bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] def f_tr_te(X_tr, y_tr, X_te, model): ''' corresponds to f_weka_bg_svm_tr_te() in Matlab version ''' #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \ #bagging = BaggingClassifier(BaseBagging, \ # random_state=None, n_estimators = 100 ) model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred) return y_pred def f_k_fo(X, y, model, k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version ''' y = y.flatten() y_pred = np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y) for train_index, test_index in skf.split(X, y): #print("TRAIN: ", train_index, " TEST: ", test_index) X_tr, X_te = X[train_index], X[test_index] #y_tr, y_te = y[train_index], y[test_index] y_tr = y[train_index] if np.unique(y_tr).size == 1: y_pred_fo = np.zeros( len(test_index) ) #print len(X_te) #print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else: y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index] = y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred) return y_pred # test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \ # random_state=None, n_estimators = 100 ) #y_pred = f_k_fo(X, y, model, k_fold=10) # #print roc_auc_score(y.flatten(), y_pred) # the easy dataset mesothelioma get 1.0 CV result. # breast cancer get 0.599 # all results are correct. def f_quantileNorm(templete, target): ''' Templete is the standard, change the target to the values in the templete. Target may have a very different range than the templete. templete and target should be 1d n by 1 array. f_my_quantileNorm() ''' ix_target = np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete] new = target return new # test #templete = X[:, 0] #target = X[:, 1] #new = f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y, k_fold=10): # ''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version # corresponds to f_k_fo() # ''' # y_pred = np.zeros((y.size, 1)) # # skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) # # for train_index, test_index in skf.split(X, y): # #print("TRAIN:", train_index, "TEST:", test_index) # X_tr, X_te = X[train_index], X[test_index] # y_tr, y_te = y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner): ''' % using each cluster data to predict the whole instances, while self % prediction using 10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version ''' n_clusters = len(clus) y_pred_multi = np.zeros((y.size, n_clusters) ) models = [] for j in range(0, n_clusters): # for each cluster Xj = X[clus[j].flatten(), :] yj = y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model) print(' Cluster '+str(j)+' started...') #if len(yj) > 10: if len(yj) > 15 and np.unique(yj).size != 1: # ------------------ for self ------------------ #if np.unique(yj).size == 1: # y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try: y_pred = f_k_fo(Xj, yj, model, fo_inner) # quantileNorm templete = y_pred_whole[clus[j].flatten()] target = y_pred y_pred = f_quantileNorm(templete, target) # copy the normed prediction to the whole data. y_pred_multi[clus[j].flatten(), j] = y_pred print(' c-'+str(j)+' done predicting local instances') # ------------------ for other ----------------- ix_other = set(range(0, y.size)) - set(clus[j].flatten()) ix_other = list(ix_other) #print ix_other X_other = X[ix_other , :] #y_other = y[ix_other ] # predict #y_pred = f_tr_te(Xj, yj, X_other, model) #if np.unique(yj).size != 1: model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten() # quantileNorm templete = y_pred_whole[ix_other] target = y_pred y_pred = f_quantileNorm(templete, target) #else: # y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy to the whole array y_pred_multi[ix_other, j] = y_pred print(' c-'+str(j)+' done predicting remote instances') except ValueError as e: print(e) print(' skip this cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred else: if len(yj) <= 15: print (' '+str(len(yj))+' insts in cluster, <= 15, skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred if np.unique(yj).size == 1: print (' warning, #unique class label(s) == 1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models] # test #[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y, clus): # ''' # Calculate the decision table # % This version changed from the cluster-cluster dec_mat to instance-cluster # % dec_mat. This solution will avoid the case that if one cluster decision # % is wrong leading entrie cluster prediction is wrong, which is the reason # % of instability. However, we cannot use a systematic evaluation criteria # % such as AUC, I will try using the predicted prob at first. # # % This version 3 adds the support for fuzzy clustering - one instance may # % belongs to more than one cluster. # % This updated version also outputs the predicted values of y. # % support more than 3 clusters # % normalization take place in y_pred_self and y_pred_other, thus do not # % need normalization when predict y_pred_ICE. # % ixsp is another cluster form. # # corresponds to f_dec_tab_4_bg_svm() in Matlab version # ''' # #n_clusters = len(clus) # ## dec_mat stores the prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred # # # ## k_fold of inner cross-validation # #fo_inner = 10 # # --------------------------- WHOLE ------------------------- # # # --------------------------- SELF ------------------------- def f_err_mat(X, y, clus, model): ''' Calculate the decision table corresponds to f_dec_tab_4_bg_svm() in Matlab version ''' n_clusters = len(clus) # err_mat stores the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred # col 0 to col n_clusters-1 store the predictions by each cluster # the last col stores the pred by whole data #models = [] # k_fold of inner cross-validation fo_inner = 5 # --------------------------- WHOLE ------------------------- # Predict each cluster using the whole data. model_whole = copy.deepcopy(model) y_pred_whole = f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X, y) # fit a model using all data rather than only a fold pred_prob_mat[:, n_clusters] = y_pred_whole print (' Done evaluation using whole instances') print (' Start to evaluate each cluster ') # --------------------------- SELF ------------------------- # predict the whole instances using each cluster data, while self # prediction using 10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \ y_pred_whole, model, fo_inner) print (' Done evaluation using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make a tmp array a stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat - tmp ) print (' Done calculating error table and fitting ICE models') return [err_mat, models] """ #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \ #model = BaggingClassifier(base_estimator = svm.LinearSVR(), \ #model = BaggingClassifier(base_estimator = svm.LinearSVC(), \ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X, y, clus, model) """ def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the err table to decision table. ''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs: for each instance, which clusters should be used. dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)): # Matlab code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end ) = dec_row(:, end ) - adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self; row = np.copy( err_mat[i, :] ) #print row row[-1] = row[-1] - adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus] - adv_self #print row ix_good_clus = list( np.where( row < row[-1] ) [0] ) #print ix_good_clus if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): ''' Use the training data to predict the testing data. Use whole training data to predict Use each cluster of training data to predict the testing data. ''' y_pred_all = np.zeros(( len(X_te), len(clus) + 1 )) # the first col is the prediction using the whole data model_whole = models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model) #print 'whole model good ' # start from the second col, the result is by each cluster for i in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i) except : if model_i == 0: y_pred_te = np.zeros(len(X_te)) elif model_i == 1: y_pred_te = np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e: # print(repr(e)) # y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+' good ' #y_pred_te = f_tr_te(Xi, yi, X_te, model) if doNorm == True: templete = y_pred_all[:, 0] target = y_pred_te y_pred = f_quantileNorm(templete, target) else: y_pred = y_pred_te y_pred_all[:, i+1] = y_pred return y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing values by replacing them by mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' This version use the err mat to re-clustering ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************** re-clustering ******************** n_iter = 2 for i in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision table') return [clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains the same information that clus is the instances ids for each cluster, while inst_clus stores that for each instance, which cluster(s) it belongs to. dec_ixs stores the good cluster(s) for each instance, which may include even a remote cluster. each instance in dec_ixs does not contain the whole set of instances. ''' # the first col is the prediction using the whole data # start from the second col, the result is by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te) # ---------- for each testing instance ---------- #n_partials = np.zeros( len(X_te) ) #n_wholes = np.zeros( len(X_te) ) for j in range(0, len(X_te) ): # for each testing instance # find the top 10 neighbors for each test instance neighbour_col = neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing inst ' + str(j) #print ' ix of top neighbors:' #print ix_top_neighbors # ---------- find all neighbors' picks ---------- clus_ids_to_use = [] nei_labels = [] for cur_nb in range(0, N): # for each neighbour # find each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use + clus_id_to_use # also find neighbor's label. maybe will be used later as KNN pred # instead of using whole to pred. nei_labels = nei_labels + list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print clus_ids_to_use # cluster id + 1 to make the ix fit the col id in y_pred_all a = clus_ids_to_use a = list( np.array(a) + 1 ) clus_ids_to_use = a # number of partial models used n_partial = len(clus_ids_to_use) # number of whole models used, based on parameters alpha, beta and N. n_whole = int( round( alpha*n_partial + beta*N ) ) clus_ids_to_use = clus_ids_to_use + [0] * n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing instances.') return y_pred_ICE # test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w = 0.4 s = 0.5 N = 5 alpha = 1 beta = 1 k_fold = 10 aucs_ICE = [] aucs_whole = [] # f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for j in range(1, 50): for j in range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) model = svm.SVC(kernel='linear', probability = True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size ) y_preds_whole = np.zeros( y.size ) fold_i = 1 for train_index, test_index in skf.split(X, y): # print("TRAIN:", train_index, "TEST:", test_index) X_tr, X_te = X[train_index], X[test_index] y_tr, y_te = y[train_index], y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole print( j) print( 'fold ' + str(fold_i) + ' finished') fold_i = fold_i + 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n') except: continue
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 2892, 1526, 220, 642, 8870, 25, 2857, 25, 3070, 2864, 198, 198, 31, 9800, 25, 1976, 70, ...
1.928396
17,122
#!/usr/bin/env python3 """ Imports 7-series routing fabric to the rr graph. For ROI configurations, this also connects the synthetic IO tiles to the routing node specified. Rough structure: Add rr_nodes for CHANX and CHANY from the database. IPIN and OPIN rr_nodes should already be present from the input rr_graph. Create a mapping between database graph_nodes and IPIN, OPIN, CHANX and CHANY rr_node ids in the rr_graph. Add rr_edge for each row in the graph_edge table. Import channel XML node from connection database and serialize output to rr_graph XML. """ import argparse import os.path from hilbertcurve.hilbertcurve import HilbertCurve import math import prjxray.db from prjxray.roi import Roi import prjxray.grid as grid from lib.rr_graph import graph2 from lib.rr_graph import tracks from lib.connection_database import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix import simplejson as json from lib import progressbar_utils import datetime import re import functools import pickle import sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\.IOI_)({})([01])(.*)'.format( "|".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box): """ Reduce the number of connection boxes by merging some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B' """ box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box: box = 'IMUX' if box.endswith('_L'): box = box.replace('_L', '') return box REBUF_NODES = {} REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): """ Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns them in a list sorted according to their Y coordinates. """ cur.execute( """ SELECT name FROM phy_tile WHERE name LIKE "CLK_HROW_BOT_R_%" OR name LIKE "CLK_HROW_TOP_R_%" OR name LIKE "CLK_BUFG_REBUF_%" ORDER BY grid_y DESC; """ ) return [t[0] for t in cur.fetchall()] HCLK_CMT_TILES = {} def check_feature(feature): """ Check if enabling this feature requires other features to be enabled. Some pips imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 """ # IOI_SING tiles have bits in common with the IOI tiles. # # The difference is that the TOP IOI_SING tile shares bits with # the bottom half of a normal IOI tile, while the BOTTOM IOI_SING # shares bits with the top half of a normal IOI TILE. # # The following, is to change the edge feature to accomodate this # need, as the IOI_SING tiles have the same wire, and pip names # despite they are found on the TOP or BOTTOM of an IOI column m = IOI_SING_REGEX.fullmatch(feature) if m: # Each clock region spans a total of 50 IOBs. # The IOI_SING are found on top or bottom of the whole # IOI/IOB column. The Y coordinate identified with the # second capture group is dived by 50 to get the relative # position of the IOI_SING within the clock region column is_bottom_sing = int(m.group(2)) % 50 == 0 # This is the value to attach to the source pip name that # changes based on which IOI_SING is selected (top or bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if is_bottom_sing else '0' # This is the value to attach to the IOI_SITE_PIPS names # in the destination wire of the pip # # Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value = '0' if is_bottom_sing else '1' unchanged_feature = "{}{}{}{}".format( m.group(1), m.group(2), m.group(3), m.group(4) ) src_wire = m.group(6).replace('_SING', '') for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if pip in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = "{}{}".format(dst_value, src_wire) feature = "{}{}".format(unchanged_feature, changed_feature) feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this PIP is active the IOB operates in the differential output mode. # There is no feature assosciated with that PIP in the prjxray db but there # is a tile-wide feature named "DIFF_OUT". # # The "DIFF_OUT" cannot be set in the architecture as it is defined one # level up in the hierarchy (its tile-wide, not site-wide). So here we # map the PIP's feature to "DIFF_OUT" if feature_path[2] == "IOB_DIFFO_OUT0" and \ feature_path[1] == "IOB_DIFFO_IN1": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections are hard wires that connect IOB33M and IOB33S sites. # They are used in differential input mode. # # Vivado does not report this connection as a PIP but in the prjxray db it # is a pip. Instead of making it a pseudo-pip we simply reject fasm # features here. if feature_path[2] == "IOB_PADOUT0" and feature_path[1] == "IOB_DIFFI_IN1": return '' if feature_path[2] == "IOB_PADOUT1" and feature_path[1] == "IOB_DIFFI_IN0": return '' # REBUF stuff rebuf_key = (feature_path[0], feature_path[1]) if rebuf_key in REBUF_SOURCES: return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] ) return ' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_buffer_feature)) # BUFHCE sites are now routed through, without the need of placing them, therefore, # when the relative pip is traversed, the correct fasm feature needs to be added. # The relevant features are: # - IN_USE: to enable the BUFHCE site # - ZINV_CE: to disable the inverter on CE input which is connected to VCC. # This sets the CE signal to constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str = m.group(1) if 'L' in x_loc_str: x_loc = 0 elif 'R' in x_loc_str: x_loc = 1 else: assert False, "Impossible to determine X location of BUFHCE" y_loc = m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\'b1'.format( feature_path[0], bufhce_loc ) return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if m: return ' '.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) ) ) m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return ' '.join((feature, enable_cascout)) parts = feature.split('.') wire_feature = feature_when_routed(parts[1]) if wire_feature is not None: return '{} {}.{}'.format(feature, parts[0], wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\.]+)\.([^\]]+)\[0\]$') def set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ): """ Assign a connection box to an IPIN node. """ node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box( conn, graph, graph_node_pkey, node_idx, connection_box_map ): """ Update connection box of IPIN node if needed. """ cur = conn.cursor() cur.execute( """ SELECT connection_box_wire_pkey FROM graph_node WHERE pkey = ?""", (graph_node_pkey, ) ) connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey is not None: cur.execute( """ SELECT grid_x, grid_y FROM phy_tile WHERE pkey = ( SELECT phy_tile_pkey FROM wire WHERE pkey = ? )""", (connection_box_wire_pkey, ) ) grid_x, grid_y = cur.fetchone() cur.execute( "SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?", (connection_box_wire_pkey, ) ) wire_in_tile_pkey = cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute( """ SELECT switch.intrinsic_delay FROM switch WHERE pkey = ( SELECT site_pin_switch_pkey FROM wire_in_tile WHERE pkey = ( SELECT wire_in_tile_pkey FROM wire WHERE pkey = ( SELECT site_wire_pkey FROM node WHERE pkey = ( SELECT node_pkey FROM graph_node WHERE pkey = ? ) ) ) )""", (graph_node_pkey, ) ) site_pin_delay = cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ) def create_connection_boxes(conn, graph): """ Assign connection box ids for all connection box types. """ cur = conn.cursor() cur.execute( """ SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN ( SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN ( SELECT connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey IS NOT NULL ) );""" ) connection_box_map = {} for wire_in_tile_pkey, tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map def phy_grid_dims(conn): """ Returns physical grid dimensions. """ cur = conn.cursor() cur.execute("SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;") x_max = cur.fetchone()[0] cur.execute("SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;") y_max = cur.fetchone()[0] return x_max + 1, y_max + 1 def find_constant_network(graph): """ Find VCC and GND tiles and create synth_tiles input. All arches should have these synthetic tiles, search the input rr graph for the SYN-GND and SYN-VCC tiles. """ block_types = {} for block_type in graph.block_types: block_types[block_type.name] = block_type.id assert 'SYN-GND' in block_types assert 'SYN-VCC' in block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc = None vcc_loc = None for grid_loc in graph.grid: if gnd_block_id == grid_loc.block_type_id: assert gnd_loc is None gnd_loc = (grid_loc.x, grid_loc.y) if vcc_block_id == grid_loc.block_type_id: assert vcc_loc is None vcc_loc = (grid_loc.x, grid_loc.y) assert gnd_loc is not None assert vcc_loc is not None synth_tiles = { 'tiles': { "VCC": { 'loc': vcc_loc, 'pins': [ { 'wire': 'VCC', 'pad': 'VCC', 'port_type': 'VCC', 'is_clock': False, }, ], }, "GND": { 'loc': gnd_loc, 'pins': [ { 'wire': 'GND', 'pad': 'GND', 'port_type': 'GND', 'is_clock': False, }, ], }, } } return synth_tiles if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 1846, 3742, 767, 12, 25076, 28166, 9664, 284, 262, 374, 81, 4823, 13, 198, 198, 1890, 15107, 40, 25412, 11, 428, 635, 20417, 262, 18512, 24418, 19867, 284, 262, 28166, 198, ...
2.055455
7,177
######################### ######################### # Need to account for limit in input period ######################### ######################### # Baseline M67 long script -- NO crowding # New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file # Doing this so we don't have to run analyse each time # Can write separate script for p-ecc plots # Quest paths in this version of script import pandas as pd import numpy as np import os from astropy.coordinates import SkyCoord from astropy import units, constants from astropy.modeling import models, fitting import scipy.stats from scipy.integrate import quad #for Quest import matplotlib matplotlib.use('Agg') doIndividualPlots = True from matplotlib import pyplot as plt #similar to field, but limiting by the hard-soft boundary if __name__ == "__main__": filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all'] #get the Raghavan binary fraction fit fbFit= fitRagfb() print(fbFit) #to normalize intAll, err = quad(RagNormal, -20, 20) intCut, err = quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff in percent error for "recovered" Pcut = 0.1 #assumed mean stellar mass mMean = 0.5 #minimum number of lines to consider in file Nlim = 3 if (doIndividualPlots): fmass, axmass = plt.subplots() fqrat, axqrat = plt.subplots() fecc, axecc = plt.subplots() flper, axlper = plt.subplots() fdist, axdist = plt.subplots() fmag, axmag = plt.subplots() frad, axrad = plt.subplots() #bins for all the histograms Nbins = 25 mbins = np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0, 1.05, 0.05, dtype='float') lpbins = np.arange(-2, 10, 0.5, dtype='float') dbins = np.arange(0, 40, 1, dtype='float') magbins = np.arange(11, 25, 1, dtype='float') rbins = np.arange(0, 100, 0.2, dtype='float') #blanks for the histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec = dict() ehRec = dict() lphRec = dict() dhRec = dict() maghRec = dict() rhRec = dict() for f in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA = [] Dec = [] recFrac = [] recN = [] rawN = [] obsN = [] fileN = [] fileObsN = [] fileRecN = [] allNPrsa = [] obsNPrsa = [] recNPrsa = [] # Lists for period and eccentricity for Andrew's circularization plots eccAll = [] eccObs = [] eccRec = [] pAll = [] pObs = [] pRec = [] # Using prsa dataframes for these lists because of period cutoff at 1000 days # Dataframes to write to files later; 3 files for each sub-population - append everything to these peccAll = pd.DataFrame(columns = ['e', 'p']) peccObs = pd.DataFrame(columns = ['e', 'p']) peccRec = pd.DataFrame(columns = ['e', 'p']) #Read in all the data and make the histograms d = "./input_files/" files = os.listdir(d) IDs = [] for i, f in enumerate(files): print(round(i/len(files),4), f) fl = file_len(d+f) if (fl >= 4): #read in the header header = pd.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms ##################### Nmult = header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest of the file data = pd.read_csv(d+f, header = 2).fillna(-999) rF = 0. rN = 0. Nrec = 0. Nobs = 0. raN = 0. obN = 0. fiN = 0. fioN = 0. firN = 0. NallPrsa = 0. NobsPrsa = 0. NrecPrsa = 0. Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)] # Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if (Nall >= Nlim): #create histograms #All m1hAll0, m1b = np.histogram(data["m1"], bins=mbins) qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins) ehAll0, eb = np.histogram(data["e"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data["d"], bins=dbins) maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins) rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for the binary fraction, as a function of mass dm1 = np.diff(m1b) m1val = m1b[:-1] + dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf = True) print("fb, Phs = ", fb, Phs) Nmult *= fb m1hAll += m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] != -999] Nobs = len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa = len(prsaObs.index) # Appending for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins) qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins) ehObs0, eb = np.histogram(obs["e"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs["d"], bins=dbins) maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins) rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame() prsaRecCombined = pd.DataFrame() for filt in filters: key = filt+'LSS_PERIOD' if (filt == 'all'): key = 'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))] Nrec = len(rec.index) #I'd like to account for all filters here to have more accurate numbers recCombined = recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) # Going to use prsaRecCombined for ecc-p plots to account for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins) qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins) ehRec0, eb = np.histogram(rec["e"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec["d"], bins=dbins) maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins) rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for the mollweide if (filt == 'all'): Nrec = len(recCombined.index) rF = Nrec/Nall rN = Nrec/Nall*Nmult raN = Nmult obN = Nobs/Nall*Nmult fiN = Nall fioN = Nobs firN = Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p and ecc lists eccAll = np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs = np.concatenate(pObs) pRec = np.concatenate(pRec) # print('Ecc lists:', eccAll, eccObs, eccRec) # print('P lists:', pAll, pObs, pRec) # Appending lists with all the p/ecc values to our dataframes # All dataframe peccAll['e'] = eccAll peccAll['p'] = pAll # Observable dataframe peccObs['e'] = eccObs peccObs['p'] = pObs # Recovered dataframe peccRec['e'] = eccRec peccRec['p'] = pRec # print('Final Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns) # 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p']) #plot and save the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r"$l$",fontsize=16) #ax.set_ylabel(r"$b$",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4) ax.set_xlabel("RA",fontsize=16) ax.set_ylabel("Dec",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r"$l$",fontsize=16) #ax.set_ylabel(r"$b$",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) ax.set_xlabel("RA",fontsize=16) ax.set_ylabel("Dec",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print("###################") print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN))) print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.) print("###################") print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN))) print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN))) print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN))) print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.) print("###################") print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa))) print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
[ 14468, 7804, 2, 198, 14468, 7804, 2, 198, 2, 10664, 284, 1848, 329, 4179, 287, 5128, 2278, 198, 14468, 7804, 2, 198, 14468, 7804, 2, 198, 198, 2, 6455, 4470, 337, 3134, 890, 4226, 1377, 8005, 4315, 278, 198, 2, 968, 4226, 18984, 4...
2.109198
7,143
import FWCore.ParameterSet.Config as cms import os process = cms.Process("summary") process.MessageLogger = cms.Service( "MessageLogger", debugModules = cms.untracked.vstring( "*" ), cout = cms.untracked.PSet( threshold = cms.untracked.string( "DEBUG" ) ), destinations = cms.untracked.vstring( "cout" ) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source("EmptySource", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load("CondCore.CondDB.CondDB_cfi") process.load("CondTools.BeamSpot.BeamSpotRcdPrinter_cfi") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output = "summary2018_Prompt.txt" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.p = cms.Path(process.BeamSpotRcdPrinter)
[ 11748, 48849, 14055, 13, 36301, 7248, 13, 16934, 355, 269, 907, 198, 11748, 28686, 198, 198, 14681, 796, 269, 907, 13, 18709, 7203, 49736, 4943, 198, 198, 14681, 13, 12837, 11187, 1362, 796, 269, 907, 13, 16177, 7, 366, 12837, 11187, ...
2.178117
786
from django.urls import path from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(), name='token_refresh'), path('verify/', TokenVerifyView.as_view(), name='token_verify'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 1334, 62, 30604, 62, 36439, 73, 46569, 13, 33571, 1330, 357, 198, 220, 220, 220, 29130, 5944, 3153, 47, 958, 7680, 11, 198, 220, 220, 220, 29130, 8134, 3447, 7680, 11, 198, 22...
2.664336
143
from ftplib import FTP,error_perm, all_errors import posixpath from io import BytesIO,SEEK_SET from .source import DataSource import sys import re reftp = re.compile('(ssh|ftp)\:\/\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\/(.*)')
[ 198, 6738, 10117, 489, 571, 1330, 45854, 11, 18224, 62, 16321, 11, 477, 62, 48277, 198, 11748, 1426, 844, 6978, 198, 6738, 33245, 1330, 2750, 4879, 9399, 11, 36078, 42, 62, 28480, 198, 198, 6738, 764, 10459, 1330, 6060, 7416, 198, 117...
2.363636
99
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tests for the knowledge base.""" import unittest from plaso.containers import artifacts from plaso.engine import knowledge_base from tests import test_lib as shared_test_lib if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 51, 3558, 329, 262, 3725, 2779, 526, 15931, 198, 198, 11748, 555, 715, 395, 198, 198, 6738, 458, 292, 78, ...
2.835052
97
# https://leetcode.com/problems/word-break-ii/ from typing import List s = Solution() print(s.wordBreak_dfs('catsanddog', ["cat", "cats", "and", "sand", "dog"])) print(s.wordBreak_dfs('pineapplepenapple', [ "apple", "pen", "applepen", "pine", "pineapple"])) # text = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # words = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa", # "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"] # print(s.wordBreak(text, words))
[ 2, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 4775, 12, 9032, 12, 4178, 14, 198, 198, 6738, 19720, 1330, 7343, 628, 198, 198, 82, 796, 28186, 3419, 198, 198, 4798, 7, 82, 13, 4775, 31737, 62, 7568, 82, 10786, 24619,...
2.675799
219
# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import re import sys import textwrap from alembic.autogenerate import api as alembic_ag_api from alembic import config as alembic_config from alembic.operations import ops as alembic_ops from alembic import script as alembic_script import fixtures import mock from neutron_lib.utils import helpers from oslo_utils import fileutils import pkg_resources import sqlalchemy as sa from testtools import matchers from neutron.conf.db import migration_cli from neutron.db import migration from neutron.db.migration import autogen from neutron.db.migration import cli from neutron.tests import base from neutron.tests import tools from neutron.tests.unit import testlib_api
[ 2, 15069, 2321, 968, 7610, 7311, 11, 11419, 357, 30571, 17932, 8, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2,...
3.51436
383
from decimal import Decimal from .abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money
[ 6738, 32465, 1330, 4280, 4402, 198, 6738, 764, 39305, 1330, 2080, 19334, 282, 13290, 4338, 198, 198, 2, 14964, 268, 338, 22343, 12, 1462, 12, 34, 68, 4386, 11, 355, 3417, 287, 23780, 2150, 338, 13728, 3242, 3406, 12911, 198 ]
3.55
40
""" MicroPython driver for Bosh BME280 temperature, pressure and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: Nelio Goncalves Godoi, Roberto Colistete Jr Version: 3.1.2 @ 2018/04 License: MIT License (https://opensource.org/licenses/MIT) """ import time from ustruct import unpack, unpack_from from array import array # BME280 default address BME280_I2CADDR = 0x76 # BME280_I2CADDR = 0x77 OSAMPLE_0 = 0 OSAMPLE_1 = 1 OSAMPLE_2 = 2 OSAMPLE_4 = 3 OSAMPLE_8 = 4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF = 0 FILTER_2 = 1 FILTER_4 = 2 FILTER_8 = 3 FILTER_16 = 4 CELSIUS = 'C' FAHRENHEIT = 'F' KELVIN = 'K'
[ 37811, 198, 13031, 37906, 4639, 329, 42776, 347, 11682, 21033, 5951, 11, 3833, 290, 27716, 314, 17, 34, 12694, 25, 198, 5450, 1378, 2503, 13, 39565, 354, 12, 82, 641, 419, 721, 13, 785, 14, 65, 301, 14, 29498, 14, 439, 62, 29498, ...
2.276471
340
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Objects relating to sourcing connections & variables from Hashicorp Vault """ from typing import Optional import hvac from cached_property import cached_property from hvac.exceptions import InvalidPath, VaultError from airflow.exceptions import AirflowException from airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin
[ 2, 198, 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, ...
4.041812
287
# -*- coding: utf-8 -*- """Computes distance between killmails by text similarity. Edit Distance Metrics - Levenshtein Distance - Damerau-Levenshtein Distance - Jaro Distance - Jaro-Winkler Distance - Match Rating Approach Comparison - Hamming Distance Vector Distance Metrics - Jaccard Similarity - Cosine Distance Written By: Adam Coscia Updated On: 11/09/2019 """ # Start timing import time start = time.time() total = 0 def lap(msg): """Records time elapsed.""" global start, total elapsed = (time.time() - start) - total total = time.time() - start if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed > 60: if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap("Importing modules...") from ast import literal_eval from functools import reduce import os import sys import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1, los2): """Calculates cosine distance between two killmails' item lists. 1. Converts collection of long text items to raw document representation. 2. Converts the collection of raw documents to a matrix of TF-IDF features using TfidfVectorizer (combines vector counting and TF-IDF calculator). 3. Computes cosine similarity between feature vectors. Uses linear kernel since TF-IDF matrix will be normalized already. Arguments: los1: First document, a list of raw strings. los2: Second document, a list of raw strings. Returns: cosine distance as a value between 0-1, with 1 being identical. """ if type(los1) == float or type(los2) == float: return 0 if len(los1) == 0 or len(los2) == 0: return 0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1]) # Create bag of words doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2]) # Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist def get_short_text_cosine_distance(los1, los2): """Calculates cosine distance between two killmails' item lists. 1. Converts collection of short text items to raw document representation. 2. Converts the collection of raw documents to a matrix of TF-IDF features using TfidfVectorizer (combines vector counting and TF-IDF calculator). 3. Computes cosine similarity between feature vectors. Uses linear kernel since TF-IDF matrix will be normalized already. Arguments: los1: First document, a list of raw strings. los2: Second document, a list of raw strings. Returns: cosine distance as a value between 0-1, with 1 being identical and 0 being complete different. """ if type(los1) == float or type(los2) == float: return 0 if len(los1) == 0 or len(los2) == 0: return 0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1]) # Create bag of words doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2]) # Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist # Load CSV from local file lap("Loading CSV data from local file...") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df = df.dropna() # Convert items column to correct data type lap("Converting 'item' column value types...") df['items'] = df['items'].apply(literal_eval) # Group DataFrame by character_id and compute distance series for each group lap("Computing cosine distances and change in kd by grouping character_id's...") groupby = df.groupby('character_id') # group dataframe by character_id num_groups = len(groupby) # get number of groups count = 0 # current group number out of number of groups groups = [] # list to append modified group dataframes to for name, gp in groupby: # Order the observations and prepare the dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) # Generate change in kills over change in deaths and change in kd ratio kills1 = gp['k_count'] kills2 = gp['k_count'].shift() deaths1 = gp['d_count'] deaths2 = gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of observations sequentially to compare pairs = [] items1 = gp['items'] items2 = gp['items'].shift() for i in range(1, len(gp)): # Start from 1 to avoid adding nan pair los1 = items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2, los1)) # Generate distance series using pairs list and different metrics # start distance series with nan due to starting range at 1 cos_dist_lt = [np.nan] # cosine distance b/w long text BoW cos_dist_st = [np.nan] # cosine distance b/w short text BoW for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) # Record progress count += 1 print(f"Progress {count/num_groups:2.1%}", end="\r") lap("Concatenating resulting groups and writing to file...") df_res = pd.concat(groups) df_res.to_csv(f'data/useable_victims_distancesAndKD.csv') lap("Exit")
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 7293, 1769, 5253, 1022, 1494, 26165, 416, 2420, 26789, 13, 198, 198, 18378, 34600, 3395, 10466, 198, 12, 1004, 574, 1477, 22006, 34600, 198, 12, 360, 2382, 559, 1...
2.542869
2,496
import numpy as np from itertools import product from typing import List from src.config import ConfigChess from src.chess.board import Board from src.chess.move import Move
[ 11748, 299, 32152, 355, 45941, 198, 6738, 340, 861, 10141, 1330, 1720, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 12351, 13, 11250, 1330, 17056, 7376, 824, 198, 6738, 12351, 13, 2395, 824, 13, 3526, 1330, 5926, 198, 6738, 12351, 13, ...
3.666667
48
from random import gauss
[ 6738, 4738, 1330, 31986, 1046, 628 ]
4.333333
6
import re from typing import Any, ClassVar, Dict, List, NoReturn, Union from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from requests import Response, Session from .exc import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException, ) from .resources import CuentaFisica, Orden, Resource, Saldo from .version import __version__ as client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com'
[ 11748, 302, 198, 6738, 19720, 1330, 4377, 11, 5016, 19852, 11, 360, 713, 11, 7343, 11, 1400, 13615, 11, 4479, 198, 198, 6738, 45898, 13, 1069, 11755, 1330, 791, 15999, 2348, 42289, 198, 6738, 45898, 13, 71, 1031, 6759, 13, 1891, 2412,...
2.920128
313
import sys import os.path import timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') )) from aql_tests import skip, AqlTestCase, runLocalTests from aql.util_types import UniqueList, SplitListType, List, ValueListType #//===========================================================================// #//===========================================================================// if __name__ == "__main__": runLocalTests()
[ 11748, 25064, 198, 11748, 28686, 13, 6978, 198, 11748, 640, 270, 198, 198, 17597, 13, 6978, 13, 28463, 7, 657, 11, 28686, 13, 6978, 13, 27237, 6978, 7, 418, 13, 6978, 13, 22179, 7, 28686, 13, 6978, 13, 15908, 3672, 7, 11593, 7753, ...
3.738095
126
from datetime import datetime from src.utils import uploaded_file import os
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 12351, 13, 26791, 1330, 19144, 62, 7753, 198, 11748, 28686, 628 ]
4.052632
19
# config params KB = 1024 MB = 1024*KB GB = 1024*MB # name of meta root dir META_DIR = ".metasync" # batching time for daemon SYNC_WAIT = 3 # blob size BLOB_UNIT = 32*MB # Increase of Paxos proposal number PAXOS_PNUM_INC = 10 # authentication directory import os AUTH_DIR = os.path.join(os.path.expanduser("~"), ".metasync")
[ 2, 4566, 42287, 198, 198, 22764, 796, 28119, 198, 10744, 796, 28119, 9, 22764, 198, 4579, 796, 28119, 9, 10744, 198, 198, 2, 1438, 286, 13634, 6808, 26672, 198, 44, 20892, 62, 34720, 796, 27071, 4164, 292, 13361, 1, 198, 198, 2, 154...
2.614173
127
import unittest from py.tests.utils import test from py import valid_parentheses as vp
[ 11748, 555, 715, 395, 198, 6738, 12972, 13, 41989, 13, 26791, 1330, 1332, 198, 6738, 12972, 1330, 4938, 62, 8000, 39815, 355, 410, 79, 628 ]
3.52
25
import tensorflow as tf import numpy as np import time import cv2 from hitnet.utils_hitnet import * drivingStereo_config = CameraConfig(0.546, 1000)
[ 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 640, 198, 11748, 269, 85, 17, 198, 6738, 2277, 3262, 13, 26791, 62, 17945, 3262, 1330, 1635, 628, 198, 24255, 1273, 32934, 62, 11250, 796, 20432, 1693...
2.945455
55
from django import forms from fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms import HouseholdTenureForm form_element_plugin_registry.register(HouseholdTenurePlugin)
[ 6738, 42625, 14208, 1330, 5107, 198, 198, 6738, 277, 13411, 13, 8692, 1330, 5178, 15878, 37233, 11, 1296, 62, 30854, 62, 33803, 62, 2301, 4592, 198, 198, 6738, 764, 23914, 1330, 37306, 24893, 495, 8479, 628, 198, 198, 687, 62, 30854, ...
3.535714
56
__all__ = ['EnemyBucketWithStar', 'Nut', 'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard', 'EnemyEquipedRotor', 'CyclingEnemyObject', 'Joints', 'Bomb', 'Contacts']
[ 834, 439, 834, 796, 37250, 4834, 3065, 33, 38811, 3152, 8248, 3256, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 705, 49004, 3256, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 705, 3856, 321, 3256, 198, 220, 22...
1.508197
244
from __future__ import print_function from six.moves import range import torchvision.transforms as transforms import torch.backends.cudnn as cudnn import torch import torch.nn as nn from torch.autograd import Variable import torch.optim as optim import torchvision.utils as vutils import numpy as np import os import time from PIL import Image, ImageFont, ImageDraw from copy import deepcopy from miscc.config import cfg from miscc.utils import mkdir_p from CaptionDatasets import * from tensorboard import summary from tensorboard import FileWriter from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3 # ################## Shared functions ################### # ################# Text to image task############################ #
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 2237, 13, 76, 5241, 1330, 2837, 198, 198, 11748, 28034, 10178, 13, 7645, 23914, 355, 31408, 198, 11748, 28034, 13, 1891, 2412, 13, 66, 463, 20471, 355, 269, 463, 20471, 198, 11...
3.520179
223
from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file import bottle import controller from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as dt bottle.run(reloader=True, debug=True)
[ 6738, 9294, 1330, 309, 3620, 6489, 6158, 62, 34219, 11, 6339, 11, 1057, 11, 11055, 11, 18941, 11, 651, 11, 1281, 11, 2581, 11, 2882, 11, 6284, 62, 35487, 11, 33608, 11, 15614, 11, 4049, 11, 9037, 62, 7753, 198, 11748, 9294, 198, 1...
2.717949
156
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Parameter initialization for transducer RNN/Transformer parts.""" import six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model, args): """Initialize transducer model. Args: model (torch.nn.Module): transducer instance args (Namespace): argument Namespace containing options """ if args.dtype != "transformer": if args.etype == "transformer": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype == "transformer": initialize(model, args.transformer_init) else: lecun_normal_init_parameters(model.encoder) initialize(model.decoder, args.transformer_init)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 36301, 37588, 329, 1007, 646, 2189, 371, 6144, 14, 8291, 16354, 3354, 526, 15931, 198, 198, 11748, 2237...
2.384913
517
import model import numpy as np import datasetReader as df import main # Number of traces loaded T T = 1 # Generate traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces P0 = np.matrix("[ .02 0;" "0 0 0.5;" "0 0 0]") P1 = np.matrix("[0.1 0 0;" "0 0.5 0;" "0 0 0.9]") M = np.matrix("[0.25 0 0;" "0 0.23 0;" "0 0 0.85]")
[ 11748, 2746, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 27039, 33634, 355, 47764, 198, 11748, 1388, 198, 198, 2, 7913, 286, 20675, 9639, 309, 198, 51, 796, 352, 198, 2, 2980, 378, 20675, 198, 2213, 2114, 62, 69, 9548, 796, 47764,...
1.812261
261
#!/usr/bin/env python import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Use the painter to draw using colors. # This is not a pipeline object. It will support pipeline objects. # Please do not use this object directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() # --- end of script --
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 410, 30488, 198, 6738, 410, 30488, 13, 22602, 13, 44374, 1330, 410, 30488, 3855, 6601, 30016, 198, 36392, 42, 62, 26947, 62, 13252, 2394, 796, 410, 30488, 3855, 6601, 30016, 3419,...
2.523659
951
import csv
[ 11748, 269, 21370, 198 ]
2.75
4
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Tuple import numpy as np import torch from ignite.engine import Engine from monai.handlers import SurfaceDistance def create_spherical_seg_3d( radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99) ) -> np.ndarray: """ Return a 3D image with a sphere inside. Voxel values will be 1 inside the sphere, and 0 elsewhere. Args: radius: radius of sphere (in terms of number of voxels, can be partial) centre: location of sphere centre. im_shape: shape of image to create See also: :py:meth:`~create_test_image_3d` """ # Create image image = np.zeros(im_shape, dtype=np.int32) spy, spx, spz = np.ogrid[ -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2] ] circle = (spx * spx + spy * spy + spz * spz) <= radius * radius image[circle] = 1 image[~circle] = 0 return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test input a list of channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] if __name__ == "__main__": unittest.main()
[ 2, 15069, 12131, 532, 33448, 25000, 20185, 42727, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921...
2.659831
829
#!/usr/bin/env python3 import sys import logging import yaml import pandas as pd import numpy as np from collections import defaultdict from sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest from sklearn.impute import SimpleImputer from anoflows.hpo import find_best_flows from data_loading import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1: logging.error("YAML data specification missing from the command line arguments") exit(1) spec_file = sys.argv[1] df, spec = load_data(spec_file) max_rows = min(len(df), spec.get("max_rows", 40000)) novelty_detection = spec.get("novelty", True) normal_classes = spec["normal_classes"] precision = defaultdict(list) for rounds in range(spec.get("rounds", 1)): # random sampling df = df.sample(n=max_rows, replace=False) label_col = spec["label_column"] y = df[label_col].values other = df.drop(label_col, inplace=False, axis=1) X = other.values # imputing X = SimpleImputer(copy=False).fit_transform(X) # train/test split X_train, X_test, y_train, y_test = \ train_test_split(X, y, shuffle=False, test_size=0.5) if novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep, :] y_train = y_train[keep] # training #flows, loss = find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train) # prediction pred = { "anoflows": flows.likelihood(X_test), "iforest": iforest.decision_function(X_test) } # evaluation y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0] ref = np.zeros(len(y_test)) ref[y_true] = 1 k = len(y_true) for name, y_pred in pred.items(): anomaly_indices = y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() / k logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test))) precision[name].append(prec) logging.info("* SUMMARY %s", spec_file) for name, prec in precision.items(): prec = 100 * np.array(prec) mean = np.mean(prec) std = np.std(prec) logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 25064, 198, 11748, 18931, 198, 11748, 331, 43695, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 17268, 1330, 4277, 11600, 198, 198,...
2.413863
981
__all__ = ['VERSION', 'version_info'] VERSION = '1.4a1'
[ 834, 439, 834, 796, 37250, 43717, 3256, 705, 9641, 62, 10951, 20520, 198, 198, 43717, 796, 705, 16, 13, 19, 64, 16, 6, 628 ]
2.416667
24
from scheme import Structure __all__ = ('Configurable', 'Registry')
[ 6738, 7791, 1330, 32522, 198, 198, 834, 439, 834, 796, 19203, 16934, 11970, 3256, 705, 8081, 4592, 11537, 198 ]
3.631579
19
from .command import * from .database import * from .entrypoint import * from .group import * from .http import * from .messaging import * from .method import * from .operation import * from .stack import * from .threads import *
[ 198, 6738, 764, 21812, 1330, 1635, 198, 6738, 764, 48806, 1330, 1635, 198, 6738, 764, 13000, 4122, 1330, 1635, 198, 6738, 764, 8094, 1330, 1635, 198, 6738, 764, 4023, 1330, 1635, 198, 6738, 764, 37348, 3039, 1330, 1635, 198, 6738, 764, ...
3.609375
64
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Extracts random constraints from reference files.""" import argparse import random import sys from sacrebleu import extract_ngrams if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases") parser.add_argument("--len", "-l", type=int, default=1, help="phrase length") parser.add_argument( "--add-sos", default=False, action="store_true", help="add <s> token" ) parser.add_argument( "--add-eos", default=False, action="store_true", help="add </s> token" ) parser.add_argument("--seed", "-s", default=0, type=int) args = parser.parse_args() Main(args)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 2429...
2.861199
317
# Copyright 2020 Alexis Lopez Zubieta # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. import os from .command import Command
[ 2, 220, 15069, 220, 12131, 31078, 22593, 47828, 1155, 64, 198, 2, 198, 2, 220, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 198, 2, 220, 4866, 286, 428, 3788, 290, 3917, 10314, 3696, 357, 1169, ...
4.267516
157
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import csv import furl import json import re import sys from collections import defaultdict if __name__ == '__main__': parser = argparse.ArgumentParser( description="Process a file with zenon json records and print some information about them.") parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.") parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.") # these are arguments to print some specific information parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.") parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.") parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.") # these are meant to work together select by a url pattern then print information about the records parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.") parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)") parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.") parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids") parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages") main(parser.parse_args())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 1822, 29572, 198, 11748, 269, 21370, 198, 11748, 277, 6371, 198, 11748, 33918, 198, 11748, 302, 198, 11...
3.38
550
# ================================================================================================== # Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== __author__ = 'Brian Larson' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary
[ 2, 38093, 10052, 28, 198, 2, 15069, 2321, 3009, 11, 3457, 13, 198, 2, 16529, 3880, 438, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 670, 2845, 287, ...
4.99505
202
# -*- coding: utf-8 -*- #!/usr/bin/env python # # Copyright 2018-2020 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import json import os from datetime import datetime, timedelta from world import world from nose.tools import eq_, assert_less from bigml.api import HTTP_CREATED from bigml.api import HTTP_ACCEPTED from bigml.api import FINISHED from bigml.api import FAULTY from bigml.api import get_status from read_pca_steps import i_get_the_pca #@step(r'the pca name is "(.*)"') #@step(r'I create a PCA from a dataset$') #@step(r'I create a PCA from a dataset$') #@step(r'I update the PCA name to "(.*)"$') #@step(r'I wait until the PCA status code is either (\d) or (-\d) less than (\d+)') #@step(r'I wait until the PCA is ready less than (\d+)')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 2864, 12, 42334, 4403, 5805, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 1...
3.099034
414
days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'] operation = '' options = ['Info', 'Check-in/Out', 'Edit games', 'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = [] TOKEN = 'bot_token' group_id = id_of_group_chat
[ 12545, 62, 1659, 62, 10464, 796, 37250, 23810, 41707, 26133, 41707, 27150, 41707, 25381, 41707, 20610, 41707, 19844, 3256, 705, 21934, 20520, 198, 27184, 796, 10148, 198, 25811, 796, 37250, 12360, 3256, 705, 9787, 12, 259, 14, 7975, 3256, ...
2.959184
98
''' Austin Richards 2/20/21 sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences ''' import pyinputplus as ip def get_cost(food_name): '''gets the cost of items in sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made them all cost 0.25 'no':0 # saying no to a topping costs nothing } return food_dict[food_name] sandwich_builder()
[ 7061, 6, 198, 40245, 14743, 362, 14, 1238, 14, 2481, 198, 198, 38142, 11451, 12, 10297, 13, 9078, 3544, 12972, 15414, 9541, 284, 26571, 2836, 5128, 329, 20433, 15387, 198, 7061, 6, 198, 11748, 12972, 15414, 9541, 355, 20966, 198, 198, ...
2.037791
344
# -*- coding: utf-8 -*- ''' HeaderUpdater class test ======================== ''' import unittest from tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode import SCode, SCmd from builder.containers.chapter import Chapter from builder.containers.episode import Episode from builder.containers.scene import Scene from builder.containers.story import Story from builder.core import headerupdater as hd
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 39681, 4933, 67, 729, 1398, 1332, 198, 4770, 2559, 198, 7061, 6, 198, 198, 11748, 555, 715, 395, 198, 6738, 5254, 13, 9288, 26791, 1330, 3601, 62, 9288, ...
3.603306
121
import numpy as np import h5py filename = "test_vlen_datasets_np_bool.h5" rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename, 'x') # create file, fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset("vlen_matrix", (2,), compression="gzip", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)): dset[r] = rows[r] f.flush() f.close() f = h5py.File(filename, 'r') dsetr = f["vlen_matrix"] for r in range(dsetr.shape[0]): print(dsetr[r])
[ 11748, 299, 32152, 355, 45941, 198, 11748, 289, 20, 9078, 198, 198, 34345, 796, 366, 9288, 62, 85, 11925, 62, 19608, 292, 1039, 62, 37659, 62, 30388, 13, 71, 20, 1, 198, 198, 8516, 796, 685, 37659, 13, 18747, 26933, 37659, 13, 17821...
1.858311
367
import os, time import numpy as np import scipy.signal import scipy.misc import scipy.ndimage.filters import matplotlib.pyplot as plt import PIL from PIL import ImageDraw import angles import cv2 import SimpleITK as sitk def fig2data(fig): """ @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it @param fig a matplotlib figure @return a numpy 3D array of RGBA values """ # draw the renderer fig.canvas.draw() # Get the RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb give pixmap in ARGB mode. # Roll the ALPHA channel to have it in RGBA mode buf = np.roll(buf, 3, axis=2) return buf
[ 11748, 28686, 11, 640, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 629, 541, 88, 13, 12683, 282, 198, 11748, 629, 541, 88, 13, 44374, 198, 11748, 629, 541, 88, 13, 358, 9060, 13, 10379, 1010, 198, 198, 11748, 2603, 294...
2.511905
336
from __future__ import absolute_import import re from email.mime.text import MIMEText from smtplib import SMTP from weasyl import define, macro EMAIL_ADDRESS = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+\Z") def normalize_address(address): """ Converts an e-mail address to a consistent representation. Returns None if the given address is not considered valid. """ address = address.strip() if not EMAIL_ADDRESS.match(address): return None local, domain = address.split("@", 1) return "%s@%s" % (local, domain.lower()) def send(mailto, subject, content): """Send an e-mail. `mailto` must be a normalized e-mail address to send this e-mail to. The system email will be designated as the sender. """ message = MIMEText(content.strip()) message["To"] = mailto message["From"] = macro.MACRO_EMAIL_ADDRESS message["Subject"] = subject # smtp.sendmail() only converts CR and LF (produced by MIMEText and our templates) to CRLF in Python 3. In Python 2, we need this: msg_crlf = re.sub(r"\r\n|[\r\n]", "\r\n", message.as_string()) smtp = SMTP(define.config_read_setting('host', "localhost", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, ) finally: smtp.quit() define.metric('increment', 'emails')
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 11748, 302, 198, 6738, 3053, 13, 76, 524, 13, 5239, 1330, 337, 3955, 2767, 2302, 198, 6738, 895, 83, 489, 571, 1330, 9447, 7250, 198, 198, 6738, 356, 292, 2645, 1330, 8160, 1...
2.425926
594
import pytest from rdflib import Graph, Namespace, Literal from rdflib.namespace import RDF, RDFS from sphinx_probs_rdf.directives import PROBS SYS = Namespace("http://example.org/system/")
[ 11748, 12972, 9288, 198, 198, 6738, 374, 67, 2704, 571, 1330, 29681, 11, 28531, 10223, 11, 25659, 1691, 198, 6738, 374, 67, 2704, 571, 13, 14933, 10223, 1330, 371, 8068, 11, 371, 8068, 50, 198, 6738, 599, 20079, 87, 62, 1676, 1443, ...
2.782609
69
#analysis function for three level game
[ 2, 20930, 2163, 329, 1115, 1241, 983, 628 ]
5.125
8
notice = """ Cone Demo ----------------------------------- | Copyright 2022 by Joel C. Alcarez | | [joelalcarez1975@gmail.com] | |-----------------------------------| | We make absolutely no warranty | | of any kind, expressed or implied | |-----------------------------------| | This graphics library outputs | | to a bitmap file. | ----------------------------------- """ from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import subprocess as proc from os import path if __name__=="__main__": main()
[ 42138, 796, 37227, 201, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 327, 505, 34588, 201, 198, 20368, 6329, 201, 198, 91, 15069, 33160, 416, 18623, 327, 13, 978, 6651, 89, 930, 201, 198, 91, 685, 7639, 417, 28...
2.411371
299
""" Plot a training curve for the 6D data simulator of CT* """ import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import regular_array_monte_carlo # create array to store results for plotting rmse = np.ones((25, 2)) noise = 0.01 # create array of sampled regular array layouts #cand_points = regular_array_monte_carlo(10000) # create testing points X_test, y_test = create_testing_points_regular(noise) n = 0 n_target = 0 n_train = 0 while n_train < 200: n_target = 100 +100*n # create training points X_train, y_train, n_train = \ create_training_points_irregular(n_target, noise) # fit GP regression and calculate rmse kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) # report rmse print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train rmse[n, 1] = np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF - 6D 1% noise - irregular array training - max change halved') plt.ylabel('RMSE') plt.xlabel('Training points') plt.savefig('analysis/GP_machine_learning_plots/\ gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png')
[ 37811, 28114, 257, 3047, 12133, 329, 262, 718, 35, 1366, 35375, 286, 16356, 9, 198, 37811, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 1341, 35720, 13, 3866, 36948, ...
2.555556
783
import sqlite3 import subprocess, datetime from flask import Flask, request, session, g, redirect, url_for, \ abort, render_template, flash from contextlib import closing from tquery import get_latest_record from config import * app = Flask(__name__) app.config.from_object(__name__) # DB helper functions def init_db(): """Initializes the sqlite3 database. This function must be imported and executed from the Python interpreter before the application is first run.""" with closing(connect_db()) as db: with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() # Auto-open and close DB when serving requests def validate_AC_command(user_mode, user_temperature): """Validates and sanitizes user-input command; translates command into irsend call.""" codes = dict() if user_mode not in app.config['ACMODES']: codes['mode_error'] = True else: codes['mode_error'] = False if user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error'] = True else: codes['temperature_error'] = False if not codes['mode_error'] and not codes['temperature_error']: codes['mode'] = user_mode codes['temperature'] = user_temperature if codes['mode'] == 'off': command_postfix = 'off' elif codes['mode'] == 'heat': command_postfix = 'heat' + codes['temperature'] else: command_postfix = codes['temperature'] codes['command'] = command_postfix return codes def command_history(): """Returns a list of dictionaries, each containing a command issued to the AC previously. The list is ordered chronologically, from newest to oldest.""" cur = g.db.execute('select command, ts, user from commands order by id desc') command_history = [] for row in cur.fetchall(): if row[0][0] == 'h': cmd = 'heat to ' + row[0][4:] elif row[0] == 'off': cmd = 'off' else: cmd = 'cool to ' + row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history def last_record(): """Returns the last temperature and humidity record data. The returned object is a dict with keys ts, fahrenheit, celsius and humidity. """ db_record = get_latest_record() out_record = dict() out_record['date'] = db_record[0].strftime("%Y-%m-%d") out_record['time'] = db_record[0].strftime("%H:%M") out_record['celsius'] = db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] = int(round(db_record[2])) return out_record if __name__ == '__main__': app.run(host='0.0.0.0')
[ 11748, 44161, 578, 18, 198, 11748, 850, 14681, 11, 4818, 8079, 198, 6738, 42903, 1330, 46947, 11, 2581, 11, 6246, 11, 308, 11, 18941, 11, 19016, 62, 1640, 11, 3467, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
2.507826
1,150
"""Helper functions to tests.""" import numpy as np def norm(vs: np.array) -> float: """Compute the norm of a vector.""" return np.sqrt(np.dot(vs, vs)) def create_random_matrix(size: int) -> np.array: """Create a numpy random matrix.""" return np.random.normal(size=size ** 2).reshape(size, size) def create_symmetic_matrix(size: int) -> np.array: """Create a numpy symmetric matrix.""" xs = create_random_matrix(size) return xs + xs.T def check_eigenpairs( matrix: np.ndarray, eigenvalues: np.ndarray, eigenvectors: np.ndarray) -> bool: """Check that the eigenvalue equation holds.""" for i, value in enumerate(eigenvalues): residue = np.dot( matrix, eigenvectors[:, i]) - value * eigenvectors[:, i] assert norm(residue) < 1e-8
[ 37811, 47429, 5499, 284, 5254, 526, 15931, 198, 198, 11748, 299, 32152, 355, 45941, 628, 198, 4299, 2593, 7, 14259, 25, 45941, 13, 18747, 8, 4613, 12178, 25, 198, 220, 220, 220, 37227, 7293, 1133, 262, 2593, 286, 257, 15879, 526, 1593...
2.456456
333
from typing import List if __name__ == "__main__": nums = [2, 3, 1, 0, 2, 5, 3] print(Solution().findRepeatNumber(nums))
[ 6738, 19720, 1330, 7343, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 997, 82, 796, 685, 17, 11, 513, 11, 352, 11, 657, 11, 362, 11, 642, 11, 513, 60, 198, 220, 220, 220, 3601, 7, 46344,...
2.433962
53
import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers import Dense from gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs = 8000 # Create our features to draw from two distinct 2D normal distributions x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs) x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs) # Stack our inputs into one feature space X = np.vstack((x1, x2)) print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors = ['red'] * num_obs + ['blue'] * num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:, 1], c = colors, alpha = 0.5) # Lets randomly split things into training and testing sets so we don't cheat X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # Create our model network = GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100) from scipy.special import logit colors = ['red'] * num_obs + ['blue'] * num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5) # Range of our X values start_x1 = -5 end_x1 = 7 weights = network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y = (bias + start_x1 * weights[0] - logit(0.5)) / - weights[1] end_y = (bias + end_x1 * weights[0] - logit(0.5)) / -weights[1] plt.plot([start_x1, end_x1], [start_y, end_y], color='grey')
[ 11748, 299, 32152, 355, 45941, 201, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 201, 198, 201, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 4512, 62, 9288, 62, 35312, 201, 198, 6738, 308, 43812, 62, 20471, 13,...
2.337912
728
import itertools as it import numpy as np import mdtraj as md from progressbar import ProgressBar from scattering.utils.utils import get_dt from scattering.utils.constants import get_form_factor def compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False): """Compute the partial van Hove function of a trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory on which to compute the Van Hove function chunk_length : int length of time between restarting averaging water : bool use X-ray form factors for water that account for polarization r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum radii. bin_width : float, optional, default=0.005 Width of the bins in nanometers. n_bins : int, optional, default=None The number of bins. If specified, this will override the `bin_width` parameter. self_correlation : bool, default=True Whether or not to include the self-self correlations Returns ------- r : numpy.ndarray r positions generated by histogram binning g_r_t : numpy.ndarray Van Hove function at each time and position """ n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0]) unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass > 0])) partial_dict = dict() for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and {1} ...'.format(elem1, elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial if partial: return partial_dict norm = 0 g_r_t = None for key, val in partial_dict.items(): elem1, elem2 = key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1 * concentration1 * form_factor2 * concentration2 if g_r_t is None: g_r_t = np.zeros_like(val) g_r_t += val * coeff norm += coeff # Reshape g_r_t to better represent the discretization in both r and t g_r_t_final = np.empty(shape=(chunk_length, len(r))) for i in range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm t = trj.time[:chunk_length] return r, t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True): """Compute the partial van Hove function of a trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory on which to compute the Van Hove function chunk_length : int length of time between restarting averaging selection1 : str selection to be considered, in the style of MDTraj atom selection selection2 : str selection to be considered, in the style of MDTraj atom selection r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum radii. bin_width : float, optional, default=0.005 Width of the bins in nanometers. n_bins : int, optional, default=None The number of bins. If specified, this will override the `bin_width` parameter. self_correlation : bool, default=True Whether or not to include the self-self correlations Returns ------- r : numpy.ndarray r positions generated by histogram binning g_r_t : numpy.ndarray Van Hove function at each time and position """ unique_elements = ( set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) > 1 for val in unique_elements]): raise UserWarning( 'Multiple elements found in a selection(s). Results may not be ' 'direcitly comprable to scattering experiments.' ) # Don't need to store it, but this serves to check that dt is constant dt = get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames / chunk_length) g_r_t = None pbar = ProgressBar() for i in pbar(range(n_chunks)): times = list() for j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, ) if g_r_t is None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t += g_r_t_frame return r, g_r_t
[ 11748, 340, 861, 10141, 355, 340, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 45243, 9535, 73, 355, 45243, 198, 6738, 4371, 5657, 1330, 18387, 10374, 198, 198, 6738, 45765, 13, 26791, 13, 26791, 1330, 651, 62, 28664, 198, 6738,...
2.120191
2,937
# -*- coding: utf-8 -*- # nn_benchmark # author - Quentin Ducasse # https://github.com/QDucasse # quentin.ducasse@ensta-bretagne.org from __future__ import absolute_import __all__ = ["lenet","lenet5","quant_lenet5", "quant_cnv", "quant_tfc", "mobilenetv1","quant_mobilenetv1", "vggnet", "quant_vggnet", "common", "alexnet", "quant_alexnet"] from .alexnet import * from .lenet import * from .lenet5 import * from .mobilenetv1 import * from .quant_mobilenetv1 import * from .quant_alexnet import * from .quant_lenet5 import * from .quant_cnv import * from .quant_tfc import * from .vggnet import * from .quant_vggnet import * from .common import *
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 299, 77, 62, 26968, 4102, 198, 2, 1772, 532, 42447, 24165, 21612, 198, 2, 3740, 1378, 12567, 13, 785, 14, 48, 35, 1229, 21612, 198, 2, 627, 31371, 13, 6077, ...
2.023196
388
#pylint:disable=no-member import cv2 as cv import numpy as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny = cv.Canny(blur, 125, 175) cv.imshow('Canny Edges', canny) # ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank, contours, -1, (200,120,100), 1) cv.imshow('Contours Drawn', blank) cv.waitKey(0)
[ 2, 79, 2645, 600, 25, 40223, 28, 3919, 12, 19522, 198, 198, 11748, 269, 85, 17, 355, 269, 85, 198, 11748, 299, 32152, 355, 45941, 198, 198, 9600, 796, 269, 85, 13, 320, 961, 10786, 14, 14490, 14, 12384, 576, 1324, 14, 36881, 14, ...
2.222222
369
from . import fcosr_tools __all__ = ['fcosr_tools']
[ 6738, 764, 1330, 277, 6966, 81, 62, 31391, 198, 834, 439, 834, 796, 37250, 69, 6966, 81, 62, 31391, 20520 ]
2.55
20
# Copyright (c) 2022, Juve and contributors # For license information, please see license.txt # import frappe from frappe.model.document import Document
[ 2, 15069, 357, 66, 8, 33160, 11, 12585, 303, 290, 20420, 198, 2, 1114, 5964, 1321, 11, 3387, 766, 5964, 13, 14116, 198, 198, 2, 1330, 5306, 27768, 198, 6738, 5306, 27768, 13, 19849, 13, 22897, 1330, 16854, 198 ]
3.948718
39
import sys from os import path import urllib; from urllib.request import urlretrieve from subprocess import call if __name__ == "__main__": if (len(sys.argv) < 2): print("Enter a directory to install hooks") else: if (path.exists(sys.argv[1])): install_hooks(sys.argv[1])
[ 11748, 25064, 198, 6738, 28686, 1330, 3108, 198, 11748, 2956, 297, 571, 26, 422, 2956, 297, 571, 13, 25927, 1330, 19016, 1186, 30227, 198, 6738, 850, 14681, 1330, 869, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, ...
2.421875
128
""" Micro webapp based on WebOb, Jinja2, WSGI with a simple router """ import os import hmac import hashlib import mimetypes from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from webob import Request from webob import Response from jinja2 import Environment, FileSystemLoader
[ 37811, 198, 13031, 3992, 1324, 1912, 319, 5313, 5944, 11, 17297, 6592, 17, 11, 25290, 18878, 351, 257, 2829, 20264, 198, 37811, 198, 198, 11748, 28686, 198, 11748, 289, 20285, 198, 11748, 12234, 8019, 198, 11748, 17007, 2963, 12272, 198, ...
3.397727
88
import logging from django.core import mail from django.conf import settings from django.core.management.base import BaseCommand import amo.utils from users.models import UserProfile log = logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL SUBJECT = 'Instructions for Automatic Upgrade to Add-on SDK 1.0' MSG = """\ Hello Mozilla Add-ons Developer! With the final version of the Add-on SDK only a week away, we wanted to get in touch with all add-on developers who have existing SDK-based (Jetpack) add-ons. We would like you to know that going forward AMO will be auto-updating add-ons with new versions of the Add-on SDK upon release. To ensure that your add-on(s) are auto-updated with the 1.0 final version of the SDK, we would ask that you download the latest release candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update your add-on(s) on AMO. After the 1.0 release, we will scan our add-ons database and automatically upgrade any SDK-based add-ons we find that are using verions 1.0RC2 or greater to the 1.0 final version of the SDK. Any add-ons we find using versions of the SDK below 1.0RC2 will not be auto-updated and you will need to upgrade them to the 1.0 version of the SDK manually. Thank you for participating in the early stages of the Add-on SDK's development. Feedback and engagement from developers like you are the foundations for success in our open source community! Sincerely, The Mozilla Add-ons Team """
[ 11748, 18931, 198, 198, 6738, 42625, 14208, 13, 7295, 1330, 6920, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 13, 8692, 1330, 7308, 21575, 198, 198, 11748, 716, 78, 13, 26791, 198, 6738,...
3.32914
477
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Specify and constraints to determine which targets are observable for an observer. """ from __future__ import (absolute_import, division, print_function, unicode_literals) # Standard library from abc import ABCMeta, abstractmethod import datetime import time import warnings # Third-party from astropy.time import Time import astropy.units as u from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord from astropy import table import numpy as np from numpy.lib.stride_tricks import as_strided # Package from .moon import moon_illumination from .utils import time_grid_from_range from .target import get_skycoord __all__ = ["AltitudeConstraint", "AirmassConstraint", "AtNightConstraint", "is_observable", "is_always_observable", "time_grid_from_range", "GalacticLatitudeConstraint", "SunSeparationConstraint", "MoonSeparationConstraint", "MoonIlluminationConstraint", "LocalTimeConstraint", "PrimaryEclipseConstraint", "SecondaryEclipseConstraint", "Constraint", "TimeConstraint", "observability_table", "months_observable", "max_best_rescale", "min_best_rescale", "PhaseConstraint", "is_event_observable"] _current_year = time.localtime().tm_year # needed for backward compatibility _current_year_time_range = Time( # needed for backward compatibility [str(_current_year) + '-01-01', str(_current_year) + '-12-31'] ) def _make_cache_key(times, targets): """ Make a unique key to reference this combination of ``times`` and ``targets``. Often, we wish to store expensive calculations for a combination of ``targets`` and ``times`` in a cache on an ``observer``` object. This routine will provide an appropriate, hashable, key to store these calculations in a dictionary. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of targets. Returns ------- cache_key : tuple A hashable tuple for use as a cache key """ # make a tuple from times try: timekey = tuple(times.jd) + times.shape except BaseException: # must be scalar timekey = (times.jd,) # make hashable thing from targets coords try: if hasattr(targets, 'frame'): # treat as a SkyCoord object. Accessing the longitude # attribute of the frame data should be unique and is # quicker than accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets is a string. targkey = (targets,) except BaseException: targkey = (targets.frame.data.lon,) return timekey + targkey def _get_altaz(times, observer, targets, force_zero_pressure=False): """ Calculate alt/az for ``target`` at times linearly spaced between the two times in ``time_range`` with grid spacing ``time_resolution`` for ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets. observer : `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- altaz_dict : dict Dictionary containing two key-value pairs. (1) 'times' contains the times for the alt/az computations, (2) 'altaz' contains the corresponding alt/az coordinates at those times. """ if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} # convert times, targets to tuple for hashing aakey = _make_cache_key(times, targets) if aakey not in observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False): """ Calculate moon altitude az and illumination for an array of times for ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. observer : `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- moon_dict : dict Dictionary containing three key-value pairs. (1) 'times' contains the times for the computations, (2) 'altaz' contains the corresponding alt/az coordinates at those times and (3) contains the moon illumination for those times. """ if not hasattr(observer, '_moon_cache'): observer._moon_cache = {} # convert times to tuple for hashing aakey = _make_cache_key(times, 'moon') if aakey not in observer._moon_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets): """ Calculate next meridian transit for an array of times for ``targets`` and ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets Returns ------- time_dict : dict Dictionary containing a key-value pair. 'times' contains the meridian_transit times. """ if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {} # convert times to tuple for hashing aakey = _make_cache_key(times, targets) if aakey not in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] def compute_constraint(self, times, observer, targets): solar_altitude = self._get_solar_altitudes(times, observer, targets) mask = solar_altitude <= self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint): """ Constrain the distance between the Galactic plane and some targets. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of target (inclusive). `None` indicates no limit. """ self.min = min self.max = max class SunSeparationConstraint(Constraint): """ Constrain the distance between the Sun and some targets. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between Sun and target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between Sun and target (inclusive). `None` indicates no limit. """ self.min = min self.max = max class MoonSeparationConstraint(Constraint): """ Constrain the distance between the Earth's moon and some targets. """ def __init__(self, min=None, max=None, ephemeris=None): """ Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between moon and target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between moon and target (inclusive). `None` indicates no limit. ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). """ self.min = min self.max = max self.ephemeris = ephemeris class MoonIlluminationConstraint(Constraint): """ Constrain the fractional illumination of the Earth's moon. Constraint is also satisfied if the Moon has set. """ def __init__(self, min=None, max=None, ephemeris=None): """ Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. ephemeris : str, optional Ephemeris to use. If not given, use the one set with `~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin' by default). """ self.min = min self.max = max self.ephemeris = ephemeris def compute_constraint(self, times, observer, targets): # first is the moon up? cached_moon = _get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt moon_down_mask = moon_alt < 0 moon_up_mask = moon_alt >= 0 illumination = cached_moon['illum'] if self.min is None and self.max is not None: mask = (self.max >= illumination) | moon_down_mask elif self.max is None and self.min is not None: mask = (self.min <= illumination) & moon_up_mask elif self.min is not None and self.max is not None: mask = ((self.min <= illumination) & (illumination <= self.max)) & moon_up_mask else: raise ValueError("No max and/or min specified in " "MoonSeparationConstraint.") return mask class LocalTimeConstraint(Constraint): """ Constrain the observable hours. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~datetime.time` Earliest local time (inclusive). `None` indicates no limit. max : `~datetime.time` Latest local time (inclusive). `None` indicates no limit. Examples -------- Constrain the observations to targets that are observable between 23:50 and 04:08 local time: >>> from astroplan import Observer >>> from astroplan.constraints import LocalTimeConstraint >>> import datetime as dt >>> subaru = Observer.at_site("Subaru", timezone="US/Hawaii") >>> # bound times between 23:50 and 04:08 local Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) """ self.min = min self.max = max if self.min is None and self.max is None: raise ValueError("You must at least supply either a minimum or a maximum time.") if self.min is not None: if not isinstance(self.min, datetime.time): raise TypeError("Time limits must be specified as datetime.time objects.") if self.max is not None: if not isinstance(self.max, datetime.time): raise TypeError("Time limits must be specified as datetime.time objects.") class TimeConstraint(Constraint): """Constrain the observing time to be within certain time limits. An example use case for this class would be to associate an acceptable time range with a specific observing block. This can be useful if not all observing blocks are valid over the time limits used in calls to `is_observable` or `is_always_observable`. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~astropy.time.Time` Earliest time (inclusive). `None` indicates no limit. max : `~astropy.time.Time` Latest time (inclusive). `None` indicates no limit. Examples -------- Constrain the observations to targets that are observable between 2016-03-28 and 2016-03-30: >>> from astroplan import Observer >>> from astropy.time import Time >>> subaru = Observer.at_site("Subaru") >>> t1 = Time("2016-03-28T12:00:00") >>> t2 = Time("2016-03-30T12:00:00") >>> constraint = TimeConstraint(t1,t2) """ self.min = min self.max = max if self.min is None and self.max is None: raise ValueError("You must at least supply either a minimum or a " "maximum time.") if self.min is not None: if not isinstance(self.min, Time): raise TypeError("Time limits must be specified as " "astropy.time.Time objects.") if self.max is not None: if not isinstance(self.max, Time): raise TypeError("Time limits must be specified as " "astropy.time.Time objects.") class PrimaryEclipseConstraint(Constraint): """ Constrain observations to times during primary eclipse. """ def __init__(self, eclipsing_system): """ Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in primary eclipse. """ self.eclipsing_system = eclipsing_system class SecondaryEclipseConstraint(Constraint): """ Constrain observations to times during secondary eclipse. """ def __init__(self, eclipsing_system): """ Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in secondary eclipse. """ self.eclipsing_system = eclipsing_system class PhaseConstraint(Constraint): """ Constrain observations to times in some range of phases for a periodic event (e.g.~transiting exoplanets, eclipsing binaries). """ def __init__(self, periodic_event, min=None, max=None): """ Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System on which to compute the phase. For example, the system could be an eclipsing or non-eclipsing binary, or exoplanet system. min : float (optional) Minimum phase (inclusive) on interval [0, 1). Default is zero. max : float (optional) Maximum phase (inclusive) on interval [0, 1). Default is one. Examples -------- To constrain observations on orbital phases between 0.4 and 0.6, >>> from astroplan import PeriodicEvent >>> from astropy.time import Time >>> import astropy.units as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4, max=0.6) The minimum and maximum phase must be described on the interval [0, 1). To constrain observations on orbital phases between 0.6 and 1.2, for example, you should subtract one from the second number: >>> constraint = PhaseConstraint(binary, min=0.6, max=0.2) """ self.periodic_event = periodic_event if (min < 0) or (min > 1) or (max < 0) or (max > 1): raise ValueError('The minimum of the PhaseConstraint must be within' ' the interval [0, 1).') self.min = min if min is not None else 0.0 self.max = max if max is not None else 1.0 def is_always_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): """ A function to determine whether ``targets`` are always observable throughout ``time_range`` given constraints in the ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable : list List of booleans of same length as ``targets`` for whether or not each target is observable in the time range given the constraints. """ if not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def is_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): """ Determines if the ``targets`` are observable during ``time_range`` given constraints in ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable : list List of booleans of same length as ``targets`` for whether or not each target is ever observable in the time range given the constraints. """ if not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None): """ Determines if the ``target`` is observable at each time in ``times``, given constraints in ``constraints`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional) Array of mid-event times on which to test the constraints times_ingress_egress : `~astropy.time.Time` (optional) Array of ingress and egress times for ``N`` events, with shape (``N``, 2). Returns ------- event_observable : `~numpy.ndarray` Array of booleans of same length as ``times`` for whether or not the target is ever observable at each time, given the constraints. """ if not hasattr(constraints, '__len__'): constraints = [constraints] if times is not None: applied_constraints = [constraint(observer, target, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:, 0] times_egr = times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer, target, times=times_ing, grid_times_targets=True) for constraint in constraints] applied_constraints_egr = [constraint(observer, target, times=times_egr, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): """ Determines which month the specified ``targets`` are observable for a specific ``observer``, given the supplied ``constraints``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence If ``time_range`` is not specified, defaults to current year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observable_months : list List of sets of unique integers representing each month that a target is observable, one set per target. These integers are 1-based so that January maps to 1, February maps to 2, etc. """ # TODO: This method could be sped up a lot by dropping to the trigonometric # altitude calculations. if not hasattr(constraints, '__len__'): constraints = [constraints] times = time_grid_from_range(time_range, time_grid_resolution) # TODO: This method could be sped up a lot by dropping to the trigonometric # altitude calculations. applied_constraints = [constraint(observer, targets, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable = [] for target, observable in zip(targets, constraint_arr): s = set([t.datetime.month for t in times[observable]]) months_observable.append(s) return months_observable def observability_table(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): """ Creates a table with information about observability for all the ``targets`` over the requested ``time_range``, given the constraints in ``constraints_list`` for ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. If a single (scalar) time, the table will be for a 24 hour period centered on that time. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observability_table : `~astropy.table.Table` A Table containing the observability information for each of the ``targets``. The table contains four columns with information about the target and it's observability: ``'target name'``, ``'ever observable'``, ``'always observable'``, and ``'fraction of time observable'``. The column ``'time observable'`` will also be present if the ``time_range`` is given as a scalar. It also contains metadata entries ``'times'`` (with an array of all the times), ``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'`` (containing the supplied ``constraints``). """ if not hasattr(constraints, '__len__'): constraints = [constraints] is_24hr_table = False if hasattr(time_range, 'isscalar') and time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames = ['target name', 'ever observable', 'always observable', 'fraction of time observable'] target_names = [target.name for target in targets] ever_obs = np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs]) if times is None and time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable'] = tab['fraction of time observable'] * 24*u.hour tab.meta['times'] = times.datetime tab.meta['observer'] = observer tab.meta['constraints'] = constraints return tab def min_best_rescale(vals, min_val, max_val, less_than_min=1): """ rescales an input array ``vals`` to be a score (between zero and one), where the ``min_val`` goes to one, and the ``max_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) less_than_min : 0 or 1 what is returned for ``vals`` below ``min_val``. (in some cases anything less than ``min_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``max_val`` equal 0 and those equal to ``min_val`` equal 1 Examples -------- rescale airmasses to between 0 and 1, with the best (1) and worst (2.25). All values outside the range should return 0. >>> from astroplan.constraints import min_best_rescale >>> import numpy as np >>> airmasses = np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2, 0. , 0. ]) """ rescaled = (vals - max_val) / (min_val - max_val) below = vals < min_val above = vals > max_val rescaled[below] = less_than_min rescaled[above] = 0 return rescaled def max_best_rescale(vals, min_val, max_val, greater_than_max=1): """ rescales an input array ``vals`` to be a score (between zero and one), where the ``max_val`` goes to one, and the ``min_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) greater_than_max : 0 or 1 what is returned for ``vals`` above ``max_val``. (in some cases anything higher than ``max_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``min_val`` equal 0 and those equal to ``max_val`` equal 1 Examples -------- rescale an array of altitudes to be between 0 and 1, with the best (60) going to 1 and worst (35) going to 0. For values outside the range, the rescale should return 0 below 35 and 1 above 60. >>> from astroplan.constraints import max_best_rescale >>> import numpy as np >>> altitudes = np.array([20, 30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ]) """ rescaled = (vals - min_val) / (max_val - min_val) below = vals < min_val above = vals > max_val rescaled[below] = 0 rescaled[above] = greater_than_max return rescaled
[ 2, 49962, 739, 257, 513, 12, 565, 682, 347, 10305, 3918, 5964, 532, 766, 38559, 24290, 13, 81, 301, 198, 37811, 198, 22882, 1958, 290, 17778, 284, 5004, 543, 6670, 389, 42550, 329, 198, 272, 22890, 13, 198, 37811, 198, 198, 6738, 11...
2.472992
13,163
from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from django.shortcuts import render from django.http import HttpResponseRedirect from core.models import Post, Category, Tag from backend.forms import PostForm, CategoryForm, TagForm # Create your views here.
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 6218, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, 17594, 62, 35827, 198, 6738, 42625, 14208, 13, 7295, 13, 79, 363, 20900, 1330, 31525, 20900, 11, 7873, 3673, ...
3.701754
114
import dataclasses import io import multiprocessing as _mp import uuid import zipfile from concurrent.futures import Future from multiprocessing.connection import Connection from typing import List, Optional, Tuple import numpy from tiktorch import log from tiktorch.rpc import Shutdown from tiktorch.rpc import mp as _mp_rpc from tiktorch.rpc.mp import MPServer from tiktorch.server.reader import eval_model_zip from .backend import base from .rpc_interface import IRPCModelSession def _run_model_session_process( conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None ): try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except ModuleNotFoundError: pass # probably running on windows if log_queue: log.configure(log_queue) session_proc = ModelSessionProcess(model_zip, devices) srv = MPServer(session_proc, conn) srv.listen() def start_model_session_process( model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None ) -> Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn = _mp.Pipe() proc = _mp.Process( target=_run_model_session_process, name="ModelSessionProcess", kwargs={"conn": server_conn, "devices": devices, "log_queue": log_queue, "model_zip": model_zip}, ) proc.start() return proc, _mp_rpc.create_client(IRPCModelSession, client_conn)
[ 11748, 4818, 330, 28958, 198, 11748, 33245, 198, 11748, 18540, 305, 919, 278, 355, 4808, 3149, 198, 11748, 334, 27112, 198, 11748, 19974, 7753, 198, 6738, 24580, 13, 69, 315, 942, 1330, 10898, 198, 6738, 18540, 305, 919, 278, 13, 38659,...
2.705686
598
from openpype.modules.ftrack.lib import BaseEvent from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent ) def register(session): '''Register plugin. Called when used as an plugin.''' DelAvalonIdFromNew(session).register()
[ 6738, 1280, 79, 2981, 13, 18170, 13, 701, 39638, 13, 8019, 1330, 7308, 9237, 198, 6738, 1280, 79, 2981, 13, 18170, 13, 701, 39638, 13, 8019, 13, 9226, 261, 62, 27261, 1330, 327, 7759, 62, 1404, 5446, 62, 2389, 62, 20373, 198, 6738, ...
2.853659
123
import unittest import tests.settings_mock as settings_mock from tests.activity.classes_mock import FakeLogger from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission
[ 11748, 555, 715, 395, 198, 11748, 5254, 13, 33692, 62, 76, 735, 355, 6460, 62, 76, 735, 198, 6738, 5254, 13, 21797, 13, 37724, 62, 76, 735, 1330, 33482, 11187, 1362, 198, 6738, 30798, 13, 1818, 11125, 62, 27682, 395, 38855, 276, 700...
3.722222
54
from urllib import urlencode import urlparse from django.shortcuts import Http404, redirect from django.contrib.auth.views import logout from django.contrib import messages from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from vumi.utils import load_class_by_string from go.base.utils import vumi_api
[ 6738, 2956, 297, 571, 1330, 2956, 11925, 8189, 198, 11748, 19016, 29572, 198, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 367, 29281, 26429, 11, 18941, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 33571, 1330, 2604, 448, ...
3.302752
109
from typogrify.filters import amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError from functools import wraps from django.conf import settings from django import template from django.utils.safestring import mark_safe from django.utils.encoding import force_unicode register = template.Library() def make_safe(f): """ A function wrapper to make typogrify play nice with django's unicode support. """ wrapper.is_safe = True return wrapper register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants)) register.filter('titlecase', make_safe(titlecase)) register.filter('typogrify', make_safe(typogrify)) register.filter('widont', make_safe(widont))
[ 6738, 2170, 519, 81, 1958, 13, 10379, 1010, 1330, 20766, 11, 11022, 11, 4238, 62, 421, 6421, 11, 4451, 4464, 1187, 11, 3670, 7442, 11, 2170, 519, 81, 1958, 11, 9214, 756, 11, 17134, 519, 81, 1958, 12331, 198, 6738, 1257, 310, 10141,...
3.114391
271
"""Read, write, create Brainvoyager VMR file format.""" import struct import numpy as np from bvbabel.utils import (read_variable_length_string, write_variable_length_string) # ============================================================================= def read_vmr(filename): """Read Brainvoyager VMR file. Parameters ---------- filename : string Path to file. Returns ------- header : dictionary Pre-data and post-data headers. data : 3D numpy.array Image data. """ header = dict() with open(filename, 'rb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data sets, # typically containing the whole brain (head) of subjects. The # intensity values are stored as a series of bytes. See the V16 format # for a version storing each intensity value with two bytes (short # integers). The VMR format contains a small header followed by the # actual data followed by a second, more extensive, header. The current # version of VMR files is "4", which is only slightly different from # version 3 (as indicated below). Version 3 added offset values to # format 2 in order to represent large data sets efficiently, e.g. in # the context of advanced segmentation processing. Compared to the # original file version "1", file versions 2 and higher contain # additional header information after the actual data ("post-data # header"). This allows to read VMR data sets with minimal header # checking if the extended information is not needed. The information # in the post-data header contains position information (if available) # and stores a series of spatial transformations, which might have been # performed to the original data set ("history record"). The # post-header data can be probably ignored for custom routines, but is # important in BrainVoyager QX for spatial transformation and # coregistration routines as well as for proper visualization. # Expected binary data: unsigned short int (2 bytes) data, = struct.unpack('<H', f.read(2)) header["File version"] = data data, = struct.unpack('<H', f.read(2)) header["DimX"] = data data, = struct.unpack('<H', f.read(2)) header["DimY"] = data data, = struct.unpack('<H', f.read(2)) header["DimZ"] = data # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): Each data element (intensity value) is # represented in 1 byte. The data is organized in three loops: # DimZ # DimY # DimX # # The axes terminology follows the internal BrainVoyager (BV) format. # The mapping to Talairach axes is as follows: # BV (X front -> back) [axis 2 after np.reshape] = Y in Tal space # BV (Y top -> bottom) [axis 1 after np.reshape] = Z in Tal space # BV (Z left -> right) [axis 0 after np.reshape] = X in Tal space # Expected binary data: unsigned char (1 byte) data_img = np.zeros((header["DimZ"] * header["DimY"] * header["DimX"]), dtype="<B") for i in range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1)) data_img = np.reshape( data_img, (header["DimZ"], header["DimY"], header["DimX"])) data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes # --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): The first four entries of the post-data # header are new since file version "3" and contain offset values for # each dimension as well as a value indicating the size of a cube with # iso-dimensions to which the data set will be internally "expanded" # for certain operations. The axes labels are in terms of # BrainVoyager's internal format. These four entries are followed by # scan position information from the original file headers, e.g. from # DICOM files. The coordinate axes labels in these entries are not in # terms of BrainVoyager's internal conventions but follow the DICOM # standard. Then follows eventually a section listing spatial # transformations which have been eventually performed to create the # current VMR (e.g. ACPC transformation). Finally, additional # information further descries the data set, including the assumed # left-right convention, the reference space (e.g. Talairach after # normalization) and voxel resolution. if header["File version"] >= 3: # NOTE(Developer Guide 2.6): These four entries have been added in # file version "3" with BrainVoyager QX 1.7. All other entries are # identical to file version "2". # Expected binary data: short int (2 bytes) data, = struct.unpack('<h', f.read(2)) header["OffsetX"] = data data, = struct.unpack('<h', f.read(2)) header["OffsetY"] = data data, = struct.unpack('<h', f.read(2)) header["OffsetZ"] = data data, = struct.unpack('<h', f.read(2)) header["FramingCubeDim"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["PosInfosVerified"] = data data, = struct.unpack('<i', f.read(4)) header["CoordinateSystem"] = data # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header["Slice1CenterX"] = data # First slice center X coordinate data, = struct.unpack('<f', f.read(4)) header["Slice1CenterY"] = data # First slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header["Slice1CenterZ"] = data # First slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header["SliceNCenterX"] = data # Last slice center X coordinate data, = struct.unpack('<f', f.read(4)) header["SliceNCenterY"] = data # Last slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header["SliceNCenterZ"] = data # Last slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header["RowDirX"] = data # Slice row direction vector X component data, = struct.unpack('<f', f.read(4)) header["RowDirY"] = data # Slice row direction vector Y component data, = struct.unpack('<f', f.read(4)) header["RowDirZ"] = data # Slice row direction vector Z component data, = struct.unpack('<f', f.read(4)) header["ColDirX"] = data # Slice column direction vector X component data, = struct.unpack('<f', f.read(4)) header["ColDirY"] = data # Slice column direction vector Y component data, = struct.unpack('<f', f.read(4)) header["ColDirZ"] = data # Slice column direction vector Z component # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["NRows"] = data # Nr of rows of slice image matrix data, = struct.unpack('<i', f.read(4)) header["NCols"] = data # Nr of columns of slice image matrix # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header["FoVRows"] = data # Field of view extent in row direction [mm] data, = struct.unpack('<f', f.read(4)) header["FoVCols"] = data # Field of view extent in column dir. [mm] data, = struct.unpack('<f', f.read(4)) header["SliceThickness"] = data # Slice thickness [mm] data, = struct.unpack('<f', f.read(4)) header["GapThickness"] = data # Gap thickness [mm] # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["NrOfPastSpatialTransformations"] = data if header["NrOfPastSpatialTransformations"] != 0: # NOTE(Developer Guide 2.6): For each past transformation, the # information specified in the following table is stored. The # "type of transformation" is a value determining how many # subsequent values define the transformation: # "1": Rigid body+scale (3 translation, 3 rotation, 3 scale) # "2": Affine transformation (16 values, 4x4 matrix) # "4": Talairach transformation # "5": Un-Talairach transformation (1 - 5 -> BV axes) header["PastTransformation"] = [] for i in range(header["NrOfPastSpatialTransformations"]): header["PastTransformation"].append(dict()) # Expected binary data: variable-length string data = read_variable_length_string(f) header["PastTransformation"][i]["Name"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["PastTransformation"][i]["Type"] = data # Expected binary data: variable-length string data = read_variable_length_string(f) header["PastTransformation"][i]["SourceFileName"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["PastTransformation"][i]["NrOfValues"] = data # Store transformation values as a list trans_values = [] for j in range(header["PastTransformation"][i]["NrOfValues"]): # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) trans_values.append(data) header["PastTransformation"][i]["Values"] = trans_values # Expected binary data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header["LeftRightConvention"] = data # modified in v4 data, = struct.unpack('<B', f.read(1)) header["ReferenceSpaceVMR"] = data # new in v4 # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header["VoxelSizeX"] = data # Voxel resolution along X axis data, = struct.unpack('<f', f.read(4)) header["VoxelSizeY"] = data # Voxel resolution along Y axis data, = struct.unpack('<f', f.read(4)) header["VoxelSizeZ"] = data # Voxel resolution along Z axis # Expected binary data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header["VoxelResolutionVerified"] = data data, = struct.unpack('<B', f.read(1)) header["VoxelResolutionInTALmm"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["VMROrigV16MinValue"] = data # 16-bit data min intensity data, = struct.unpack('<i', f.read(4)) header["VMROrigV16MeanValue"] = data # 16-bit data mean intensity data, = struct.unpack('<i', f.read(4)) header["VMROrigV16MaxValue"] = data # 16-bit data max intensity return header, data_img # ============================================================================= def write_vmr(filename, header, data_img): """Protocol to write Brainvoyager VMR file. Parameters ---------- filename : string Output filename. header : dictionary Header of VMR file. data_img : numpy.array, 3D Image. """ with open(filename, 'wb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # Expected binary data: unsigned short int (2 bytes) data = header["File version"] f.write(struct.pack('<H', data)) data = header["DimX"] f.write(struct.pack('<H', data)) data = header["DimY"] f.write(struct.pack('<H', data)) data = header["DimZ"] f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # Convert axes from Nifti standard back to BV standard data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal # Expected binary data: unsigned char (1 byte) data_img = data_img.flatten() for i in range(data_img.size): f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- if header["File version"] >= 3: # Expected binary data: short int (2 bytes) data = header["OffsetX"] f.write(struct.pack('<h', data)) data = header["OffsetY"] f.write(struct.pack('<h', data)) data = header["OffsetZ"] f.write(struct.pack('<h', data)) data = header["FramingCubeDim"] f.write(struct.pack('<h', data)) # Expected binary data: int (4 bytes) data = header["PosInfosVerified"] f.write(struct.pack('<i', data)) data = header["CoordinateSystem"] f.write(struct.pack('<i', data)) # Expected binary data: float (4 bytes) data = header["Slice1CenterX"] f.write(struct.pack('<f', data)) data = header["Slice1CenterY"] f.write(struct.pack('<f', data)) data = header["Slice1CenterZ"] f.write(struct.pack('<f', data)) data = header["SliceNCenterX"] f.write(struct.pack('<f', data)) data = header["SliceNCenterY"] f.write(struct.pack('<f', data)) data = header["SliceNCenterZ"] f.write(struct.pack('<f', data)) data = header["RowDirX"] f.write(struct.pack('<f', data)) data = header["RowDirY"] f.write(struct.pack('<f', data)) data = header["RowDirZ"] f.write(struct.pack('<f', data)) data = header["ColDirX"] f.write(struct.pack('<f', data)) data = header["ColDirY"] f.write(struct.pack('<f', data)) data = header["ColDirZ"] f.write(struct.pack('<f', data)) # Expected binary data: int (4 bytes) data = header["NRows"] f.write(struct.pack('<i', data)) data = header["NCols"] f.write(struct.pack('<i', data)) # Expected binary data: float (4 bytes) data = header["FoVRows"] f.write(struct.pack('<f', data)) data = header["FoVCols"] f.write(struct.pack('<f', data)) data = header["SliceThickness"] f.write(struct.pack('<f', data)) data = header["GapThickness"] f.write(struct.pack('<f', data)) # Expected binary data: int (4 bytes) data = header["NrOfPastSpatialTransformations"] f.write(struct.pack('<i', data)) if header["NrOfPastSpatialTransformations"] != 0: for i in range(header["NrOfPastSpatialTransformations"]): # Expected binary data: variable-length string data = header["PastTransformation"][i]["Name"] write_variable_length_string(f, data) # Expected binary data: int (4 bytes) data = header["PastTransformation"][i]["Type"] f.write(struct.pack('<i', data)) # Expected binary data: variable-length string data = header["PastTransformation"][i]["SourceFileName"] write_variable_length_string(f, data) # Expected binary data: int (4 bytes) data = header["PastTransformation"][i]["NrOfValues"] f.write(struct.pack('<i', data)) # Transformation values are stored as a list trans_values = header["PastTransformation"][i]["Values"] for j in range(header["PastTransformation"][i]["NrOfValues"]): # Expected binary data: float (4 bytes) f.write(struct.pack('<f', trans_values[j])) # Expected binary data: char (1 byte) data = header["LeftRightConvention"] f.write(struct.pack('<B', data)) data = header["ReferenceSpaceVMR"] f.write(struct.pack('<B', data)) # Expected binary data: float (4 bytes) data = header["VoxelSizeX"] f.write(struct.pack('<f', data)) data = header["VoxelSizeY"] f.write(struct.pack('<f', data)) data = header["VoxelSizeZ"] f.write(struct.pack('<f', data)) # Expected binary data: char (1 byte) data = header["VoxelResolutionVerified"] f.write(struct.pack('<B', data)) data = header["VoxelResolutionInTALmm"] f.write(struct.pack('<B', data)) # Expected binary data: int (4 bytes) data = header["VMROrigV16MinValue"] f.write(struct.pack('<i', data)) data = header["VMROrigV16MeanValue"] f.write(struct.pack('<i', data)) data = header["VMROrigV16MaxValue"] f.write(struct.pack('<i', data)) return print("VMR saved.")
[ 37811, 5569, 11, 3551, 11, 2251, 14842, 40024, 3536, 569, 13599, 2393, 5794, 526, 15931, 198, 198, 11748, 2878, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 275, 85, 65, 9608, 13, 26791, 1330, 357, 961, 62, 45286, 62, 13664, 62, 884...
2.425483
7,448
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ test pretrained models """ from __future__ import print_function import mxnet as mx from common import find_mxnet, modelzoo from score import score VAL_DATA='data/val-5k-256.rec' if __name__ == '__main__': gpus = mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size = 16 * len(gpus) gpus = ','.join([str(i) for i in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500} download_data() test_imagenet1k_resnet(**kwargs) test_imagenet1k_inception_bn(**kwargs)
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
3.15625
416
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # |_ |~) _ _| _ /~\ _ |. # |_)\/ |_)(_|(_|| \_/|_|(_||| # / # ____________ ______ # / __ `\ / / # | \/ / / / # |______ / / / # |____/ / / # _____________ / / # \ / / / # \ / / / # \_______/ / / # ______ / / # \ / / / # \ / / / # \/ / / # / / # / / # \ / # \ / # \/ # _ # \ / _ __|_. _ _ |_) # \/ (/_| | |(_(_|| \/ # / # VerticaPy is a Python library with scikit-like functionality for conducting # data science projects on data stored in Vertica, taking advantage Verticas # speed and built-in analytics and machine learning features. It supports the # entire data science life cycle, uses a pipeline mechanism to sequentialize # data transformation operations, and offers beautiful graphical options. # # VerticaPy aims to do all of the above. The idea is simple: instead of moving # data around for processing, VerticaPy brings the logic to the data. # # # Modules # # Standard Python Modules import math, re, decimal, warnings, datetime from collections.abc import Iterable from typing import Union # VerticaPy Modules import verticapy from verticapy.utilities import * from verticapy.toolbox import * from verticapy.errors import * ## # # __ __ ______ ______ __ __ __ __ __ __ __ # /\ \ / / /\ ___\ /\ __ \ /\ \ /\ \/\ \ /\ "-./ \ /\ "-.\ \ # \ \ \'/ \ \ \____ \ \ \/\ \ \ \ \____ \ \ \_\ \ \ \ \-./\ \ \ \ \-. \ # \ \__| \ \_____\ \ \_____\ \ \_____\ \ \_____\ \ \_\ \ \_\ \ \_\\"\_\ # \/_/ \/_____/ \/_____/ \/_____/ \/_____/ \/_/ \/_/ \/_/ \/_/ # # # ---#
[ 2, 357, 66, 8, 15069, 685, 7908, 12, 1238, 1828, 60, 4527, 17061, 393, 530, 286, 663, 29116, 13, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 921, 743, 407, 779, 428, 2393, ...
2.086783
1,256
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ example.py ~~~~~~~~~ A simple command line application to run flask apps. :copyright: 2019 Miller :license: BSD-3-Clause """ # Known bugs that can't be fixed here: # - synopsis() cannot be prevented from clobbering existing # loaded modules. # - If the __file__ attribute on a module is a relative path and # the current directory is changed with os.chdir(), an incorrect # path will be displayed. from flask import render_template, redirect, request, url_for, flash,jsonify,current_app from flask_login import login_user, logout_user, login_required, current_user from . import book from flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression import cast from datatables import ColumnDT, DataTables from .. import auth from .. import db from .forms import EditBookForm, HackmdMeta # from booktags.db.basemodels import Book from booktags.flaskapp.model.models import BookMain # --------------------------------------------------------- common routines # @book.route('/list/', methods=['GET', 'POST']) # def list_book(): # """ # # :param field: col name # :param order: asc or desc # :return: renew query # """ # books = BookMain.get_all_book() # return render_template('book/list_book.html',books=books) if __name__ == '__main__': pass
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 220, 220, 220, 1672, 13, 9078, 198, 220, 220, 220, 220, 15116, 93, 198, 220, 220, 220, 317, 2829, 3...
3.034934
458
from enum import Enum
[ 6738, 33829, 1330, 2039, 388, 628 ]
3.833333
6
# -*- coding: utf-8 -*- __author__ = """Hendrix Demers""" __email__ = 'hendrix.demers@mail.mcgill.ca' __version__ = '0.1.0'
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 834, 9800, 834, 796, 37227, 39, 437, 8609, 1897, 364, 37811, 198, 834, 12888, 834, 796, 705, 15631, 8609, 13, 9536, 364, 31, 4529, 13, 23209, 70, 359, 13, 6888, ...
2.118644
59
import os import logging from tempfile import mkstemp, mkdtemp from shutil import rmtree from zipfile import ZipFile, ZIP_DEFLATED from datetime import datetime from boto.s3.connection import S3Connection from boto.s3.key import Key __version__ = "0.1.8" __author__ = "Mark Embling" __email__ = "mark@markembling.info" logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) LOGGING_DEFAULTS = {"level": logging.INFO, "format": "%(asctime)s [%(levelname)s]: %(message)s"} def setup_logging(**kwargs): """Convenience function for setting up some sane logging defaults""" opts = dict(LOGGING_DEFAULTS.items() + kwargs.items()) logging.basicConfig(**opts)
[ 11748, 28686, 198, 11748, 18931, 198, 6738, 20218, 7753, 1330, 33480, 927, 79, 11, 33480, 67, 29510, 198, 6738, 4423, 346, 1330, 374, 16762, 631, 198, 6738, 19974, 7753, 1330, 38636, 8979, 11, 42977, 62, 7206, 3697, 11617, 198, 6738, 48...
2.683019
265
# Generated by Django 2.0.2 on 2019-03-08 13:03 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 15, 13, 17, 319, 13130, 12, 3070, 12, 2919, 1511, 25, 3070, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
#!/usr/bin/env python3 if __name__ == '__main__': print(date_time("01.01.2018 00:00")) assert date_time("01.01.2018 00:00") == "1 January 2018 year 0 hours 0 minutes" assert date_time("04.08.1984 08:15") == "4 August 1984 year 8 hours 15 minutes" assert date_time("17.12.1990 07:42") == "17 December 1990 year 7 hours 42 minutes"
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 3601, 7, 4475, 62, 2435, 7203, 486, 13, 486, 13, 7908, 3571, 25, 405, 48774, 198, 220, 220, 2...
2.710938
128
# Copyright (c) 2017, Neil Booth # # All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Representation of a peer server.""" from ipaddress import ip_address from lbry.wallet.server import util from lbry.wallet.server.util import cachedproperty from typing import Dict def update_features(self, features): """Update features in-place.""" try: tmp = Peer(self.host, features) except Exception: pass else: self.update_features_from_peer(tmp) def connection_port_pairs(self): """Return a list of (kind, port) pairs to try when making a connection.""" # Use a list not a set - it's important to try the registered # ports first. pairs = [('SSL', self.ssl_port), ('TCP', self.tcp_port)] while self.other_port_pairs: pairs.append(self.other_port_pairs.pop()) return [pair for pair in pairs if pair[1]] def mark_bad(self): """Mark as bad to avoid reconnects but also to remember for a while.""" self.bad = True def check_ports(self, other): """Remember differing ports in case server operator changed them or removed one.""" if other.ssl_port != self.ssl_port: self.other_port_pairs.add(('SSL', other.ssl_port)) if other.tcp_port != self.tcp_port: self.other_port_pairs.add(('TCP', other.tcp_port)) return bool(self.other_port_pairs) def bucket(self): if self.is_tor: return 'onion' if not self.ip_addr: return '' return tuple(self.ip_addr.split('.')[:2]) def serialize(self): """Serialize to a dictionary.""" return {attr: getattr(self, attr) for attr in self.ATTRS} def _port(self, key): hosts = self.features.get('hosts') if isinstance(hosts, dict): host = hosts.get(self.host) port = self._integer(key, host) if port and 0 < port < 65536: return port return None def _integer(self, key, d=None): d = d or self.features result = d.get(key) if isinstance(d, dict) else None if isinstance(result, str): try: result = int(result) except ValueError: pass return result if isinstance(result, int) else None def _string(self, key): result = self.features.get(key) return result if isinstance(result, str) else None def _protocol_version_string(self, key): version_str = self.features.get(key) ptuple = util.protocol_tuple(version_str) return util.version_string(ptuple) def to_tuple(self): """The tuple ((ip, host, details) expected in response to a peers subscription.""" details = self.real_name().split()[1:] return (self.ip_addr or self.host, self.host, details) def real_name(self): """Real name of this peer as used on IRC.""" parts = [self.host, 'v' + self.protocol_max] if self.pruning: parts.append(f'p{self.pruning:d}') for letter, port in (('s', self.ssl_port), ('t', self.tcp_port)): if port: parts.append(port_text(letter, port)) return ' '.join(parts)
[ 2, 15069, 357, 66, 8, 2177, 11, 15929, 36389, 198, 2, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 383, 17168, 13789, 357, 36393, 8, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727,...
2.501712
1,752
"""Test deCONZ diagnostics.""" from unittest.mock import patch from pydeconz.websocket import STATE_RUNNING from homeassistant.const import Platform from .test_gateway import DECONZ_CONFIG, setup_deconz_integration from tests.components.diagnostics import get_diagnostics_for_config_entry
[ 37811, 14402, 390, 10943, 57, 6689, 34558, 526, 15931, 198, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 279, 5173, 721, 13569, 13, 732, 1443, 5459, 1330, 35454, 62, 49, 4944, 15871, 198, 198, 6738, 1363, 562, ...
3.206522
92
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to transform functions on individual tuples of particles to sets.""" from absl import logging from functools import reduce, partial from collections import namedtuple from enum import Enum from typing import Any, Callable, Optional, Dict, Tuple, Generator, Union import math from operator import mul import numpy as onp from jax import lax from jax import ops from jax import jit, vmap, eval_shape from jax.abstract_arrays import ShapedArray from jax.interpreters import partial_eval as pe import jax.numpy as jnp from jax_md import quantity, space, dataclasses, util import jraph # Types Array = util.Array f32 = util.f32 f64 = util.f64 i32 = util.i32 i64 = util.i64 Box = space.Box DisplacementOrMetricFn = space.DisplacementOrMetricFn MetricFn = space.MetricFn # Cell List def _cell_dimensions(spatial_dimension: int, box_size: Box, minimum_cell_size: float) -> Tuple[Box, Array, Array, int]: """Compute the number of cells-per-side and total number of cells in a box.""" if isinstance(box_size, int) or isinstance(box_size, float): box_size = float(box_size) # NOTE(schsam): Should we auto-cast based on box_size? I can't imagine a case # in which the box_size would not be accurately represented by an f32. if (isinstance(box_size, onp.ndarray) and (box_size.dtype == jnp.int32 or box_size.dtype == jnp.int64)): box_size = float(box_size) cells_per_side = onp.floor(box_size / minimum_cell_size) cell_size = box_size / cells_per_side cells_per_side = onp.array(cells_per_side, dtype=jnp.int64) if isinstance(box_size, onp.ndarray): if box_size.ndim == 1 or box_size.ndim == 2: assert box_size.size == spatial_dimension flat_cells_per_side = onp.reshape(cells_per_side, (-1,)) for cells in flat_cells_per_side: if cells < 3: raise ValueError( ('Box must be at least 3x the size of the grid spacing in each ' 'dimension.')) cell_count = reduce(mul, flat_cells_per_side, 1) elif box_size.ndim == 0: cell_count = cells_per_side ** spatial_dimension else: raise ValueError('Box must either be a scalar or a vector.') else: cell_count = cells_per_side ** spatial_dimension return box_size, cell_size, cells_per_side, int(cell_count) def count_cell_filling(R: Array, box_size: Box, minimum_cell_size: float) -> Array: """Counts the number of particles per-cell in a spatial partition.""" dim = int(R.shape[1]) box_size, cell_size, cells_per_side, cell_count = \ _cell_dimensions(dim, box_size, minimum_cell_size) hash_multipliers = _compute_hash_constants(dim, cells_per_side) particle_index = jnp.array(R / cell_size, dtype=jnp.int64) particle_hash = jnp.sum(particle_index * hash_multipliers, axis=1) filling = ops.segment_sum(jnp.ones_like(particle_hash), particle_hash, cell_count) return filling def cell_list(box_size: Box, minimum_cell_size: float, cell_capacity_or_example_R: Union[int, Array], buffer_size_multiplier: float=1.1 ) -> Callable[[Array], CellList]: r"""Returns a function that partitions point data spatially. Given a set of points {x_i \in R^d} with associated data {k_i \in R^m} it is often useful to partition the points / data spatially. A simple partitioning that can be implemented efficiently within XLA is a dense partition into a uniform grid called a cell list. Since XLA requires that shapes be statically specified, we allocate fixed sized buffers for each cell. The size of this buffer can either be specified manually or it can be estimated automatically from a set of positions. Note, if the distribution of points changes significantly it is likely the buffer the buffer sizes will have to be adjusted. This partitioning will likely form the groundwork for parallelizing simulations over different accelerators. Args: box_size: A float or an ndarray of shape [spatial_dimension] specifying the size of the system. Note, this code is written for the case where the boundaries are periodic. If this is not the case, then the current code will be slightly less efficient. minimum_cell_size: A float specifying the minimum side length of each cell. Cells are enlarged so that they exactly fill the box. cell_capacity_or_example_R: Either an integer specifying the size number of particles that can be stored in each cell or an ndarray of positions of shape [particle_count, spatial_dimension] that is used to estimate the cell_capacity. buffer_size_multiplier: A floating point multiplier that multiplies the estimated cell capacity to allow for fluctuations in the maximum cell occupancy. Returns: A function `cell_list_fn(R, **kwargs)` that partitions positions, `R`, and side data specified by kwargs into a cell list. Returns a CellList containing the partition. """ if util.is_array(box_size): box_size = onp.array(box_size) if len(box_size.shape) == 1: box_size = jnp.reshape(box_size, (1, -1)) if util.is_array(minimum_cell_size): minimum_cell_size = onp.array(minimum_cell_size) cell_capacity = cell_capacity_or_example_R if _is_variable_compatible_with_positions(cell_capacity): cell_capacity = _estimate_cell_capacity( cell_capacity, box_size, minimum_cell_size, buffer_size_multiplier) elif not isinstance(cell_capacity, int): msg = ( 'cell_capacity_or_example_positions must either be an integer ' 'specifying the cell capacity or a set of positions that will be used ' 'to estimate a cell capacity. Found {}.'.format(type(cell_capacity)) ) raise ValueError(msg) return build_cells def _displacement_or_metric_to_metric_sq( displacement_or_metric: DisplacementOrMetricFn) -> MetricFn: """Checks whether or not a displacement or metric was provided.""" for dim in range(1, 4): try: R = ShapedArray((dim,), f32) dR_or_dr = eval_shape(displacement_or_metric, R, R, t=0) if len(dR_or_dr.shape) == 0: return lambda Ra, Rb, **kwargs: \ displacement_or_metric(Ra, Rb, **kwargs) ** 2 else: return lambda Ra, Rb, **kwargs: space.square_distance( displacement_or_metric(Ra, Rb, **kwargs)) except TypeError: continue except ValueError: continue raise ValueError( 'Canonicalize displacement not implemented for spatial dimension larger' 'than 4.') NeighborFn = Callable[[Array, Optional[NeighborList], Optional[int]], NeighborList] return NeighborListFns(lambda R, extra_capacity=0, **kwargs: neighbor_list_fn(R, extra_capacity=extra_capacity, **kwargs), lambda R, nbrs, **kwargs: # pytype: disable=wrong-arg-count neighbor_list_fn(R, nbrs, **kwargs)) def neighbor_list_mask(neighbor: NeighborList, mask_self: bool=False) -> Array: """Compute a mask for neighbor list.""" if is_sparse(neighbor.format): mask = neighbor.idx[0] < len(neighbor.reference_position) if mask_self: mask = mask & (neighbor.idx[0] != neighbor.idx[1]) return mask mask = neighbor.idx < len(neighbor.idx) if mask_self: N = len(neighbor.reference_position) self_mask = neighbor.idx != jnp.reshape(jnp.arange(N), (N, 1)) mask = mask & self_mask return mask def to_jraph(neighbor: NeighborList, mask: Array=None) -> jraph.GraphsTuple: """Convert a sparse neighbor list to a `jraph.GraphsTuple`. As in jraph, padding here is accomplished by adding a ficticious graph with a single node. Args: neighbor: A neighbor list that we will convert to the jraph format. Must be sparse. mask: An optional mask on the edges. Returns: A `jraph.GraphsTuple` that contains the topology of the neighbor list. """ if not is_sparse(neighbor.format): raise ValueError('Cannot convert a dense neighbor list to jraph format. ' 'Please use either NeighborListFormat.Sparse or ' 'NeighborListFormat.OrderedSparse.') receivers, senders = neighbor.idx N = len(neighbor.reference_position) _mask = neighbor_list_mask(neighbor) if mask is not None: _mask = _mask & mask cumsum = jnp.cumsum(_mask) index = jnp.where(_mask, cumsum - 1, len(receivers)) ordered = N * jnp.ones((len(receivers) + 1,), jnp.int32) receivers = ordered.at[index].set(receivers)[:-1] senders = ordered.at[index].set(senders)[:-1] mask = receivers < N return jraph.GraphsTuple( nodes=None, edges=None, receivers=receivers, senders=senders, globals=None, n_node=jnp.array([N, 1]), n_edge=jnp.array([jnp.sum(_mask), jnp.sum(~_mask)]), ) def to_dense(neighbor: NeighborList) -> Array: """Converts a sparse neighbor list to dense ids. Cannot be JIT.""" if neighbor.format is not Sparse: raise ValueError('Can only convert sparse neighbor lists to dense ones.') receivers, senders = neighbor.idx mask = neighbor_list_mask(neighbor) receivers = receivers[mask] senders = senders[mask] N = len(neighbor.reference_position) count = ops.segment_sum(jnp.ones(len(receivers), jnp.int32), receivers, N) max_count = jnp.max(count) offset = jnp.tile(jnp.arange(max_count), N)[:len(senders)] hashes = senders * max_count + offset dense_idx = N * jnp.ones((N * max_count,), jnp.int32) dense_idx = dense_idx.at[hashes].set(receivers).reshape((N, max_count)) return dense_idx Dense = NeighborListFormat.Dense Sparse = NeighborListFormat.Sparse OrderedSparse = NeighborListFormat.OrderedSparse
[ 2, 15069, 13130, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 733...
2.667341
3,953
"""Run from rhucrl_experiments.evaluate folder.""" import socket from lsf_runner import init_runner, make_commands from rhucrl_experiments.evaluate.utilities import ENVIRONMENTS RARL_DIR = "../../runs/RARLAgent" ZERO_SUM_DIR = "../../runs/ZeroSumAgent" SCRIPT = "evaluate_mass_change.py" EXPERIMENTS = { "supermodularity": {"algorithm": "RARL_MF", "base-dir": RARL_DIR}, "shallow": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR}, "greedy": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR}, "lazy": {"algorithm": "HUCRL", "base-dir": RARL_DIR}, }.get(socket.gethostname(), {"algorithm": "RARL", "base-dir": RARL_DIR}) runner = init_runner("EvaluateMassChange.", num_threads=4) for seed in [0, 1, 2, 3, 4]: base_args = {"num-runs": 10, "seed": seed} base_args.update(**EXPERIMENTS) commands = make_commands( SCRIPT, base_args=base_args, common_hyper_args={"environment": ENVIRONMENTS} ) runner.run_batch(commands)
[ 37811, 10987, 422, 9529, 1229, 45895, 62, 23100, 6800, 13, 49786, 9483, 526, 15931, 198, 11748, 17802, 198, 198, 6738, 300, 28202, 62, 16737, 1330, 2315, 62, 16737, 11, 787, 62, 9503, 1746, 198, 198, 6738, 9529, 1229, 45895, 62, 23100, ...
2.446701
394