content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# -*- encoding: utf-8 from sqlalchemy.testing import eq_, is_ from sqlalchemy import schema from sqlalchemy.sql import table, column, quoted_name from sqlalchemy.dialects import mssql from sqlalchemy.dialects.mssql import mxodbc from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import sql from sqlalchemy import Integer, String, Table, Column, select, MetaData,\ update, delete, insert, extract, union, func, PrimaryKeyConstraint, \ UniqueConstraint, Index, Sequence, literal from sqlalchemy import testing from sqlalchemy.dialects.mssql import base
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 198, 6738, 44161, 282, 26599, 13, 33407, 1330, 37430, 62, 11, 318, 62, 198, 6738, 44161, 282, 26599, 1330, 32815, 198, 6738, 44161, 282, 26599, 13, 25410, 1330, 3084, 11, 5721, 11, 10947, ...
3.261111
180
# coding=utf-8 # *** WARNING: this file was generated by pulumigen. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ... import meta as _meta __all__ = [ 'LeaseSpecArgs', 'LeaseArgs', ]
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 17202, 39410, 25, 428, 2393, 373, 7560, 416, 279, 14452, 9324, 13, 17202, 198, 2, 17202, 2141, 407, 4370, 416, 1021, 4556, 345, 821, 1728, 345, 760, 644, 345, 389, 1804, 0, 17202, 198, 198, 117...
3.344828
116
#MenuTitle: Align All Components # -*- coding: utf-8 -*- __doc__=""" Fakes auto-alignment in glyphs that cannot be auto-aligned. """ import GlyphsApp thisFont = Glyphs.font # frontmost font thisFontMaster = thisFont.selectedFontMaster # active master thisFontMasterID = thisFont.selectedFontMaster.id # active master listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs thisFont.disableUpdateInterface() # suppresses UI updates in Font View for thisLayer in listOfSelectedLayers: thisGlyph = thisLayer.parent print "Aligning components in:", thisGlyph.name thisGlyph.beginUndo() # begin undo grouping process( thisLayer ) thisGlyph.endUndo() # end undo grouping thisFont.enableUpdateInterface() # re-enables UI updates in Font View
[ 2, 23381, 19160, 25, 978, 570, 1439, 36109, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 834, 15390, 834, 2625, 15931, 198, 37, 1124, 8295, 12, 282, 16747, 287, 25874, 82, 326, 2314, 307, 8295, 12, 41634, 13...
3.275424
236
#!/usr/bin/env python3 """ Stanford CS106AP TK Drawing Lecture Exercises Courtesy of Nick Parlante """ import tkinter as tk # provided function, this code is complete def make_canvas(width, height): """ Creates and returns a drawing canvas of the given int size, ready for drawing. """ top = tk.Tk() top.minsize(width=width + 10, height=height + 10) canvas = tk.Canvas(top, width=width, height=height) canvas.pack() canvas.xview_scroll(6, "units") # hack so (0, 0) works correctly canvas.yview_scroll(6, "units") return canvas if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 37811, 198, 32140, 3841, 9429, 15801, 2969, 198, 51, 42, 40027, 31209, 495, 1475, 2798, 2696, 198, 31825, 286, 8047, 2547, 75, 12427, 198, 37811, 198, 198, 11748, 256, 74, 384...
2.656652
233
# Only PCM 16 bit wav 44100 Hz - Use audacity or sox to convert audio files. # WAV generation # Synth # sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bip.wav synth 0.25 sine 800 # sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bop.wav synth 0.25 sine 400 # Voices # pico2wave -l "it-IT" -w start.wav "Bene! Si Parte!" # Then convert wav files to to 44100 Hz # Note: some initial sound may not be played. # alsaaudio examples # https://larsimmisch.github.io/pyalsaaudio/libalsaaudio.html import threading import time import socket import sys, os, platform import re import wave import argparse import rospy use_sound_play = False use_alsaaudio = True try: from sound_play.msg import SoundRequest from sound_play.libsoundplay import SoundClient except: print('ROS package sound_play required.') print('Install with: sudo apt-get install ros-kinetic-audio-common libasound2') use_sound_play = False #sys.exit(0) try: import sox except: print('sox required. Install with: pip install --user sox') sys.exit(0) try: import alsaaudio except: print('alsaaudio required. Install with: pip install --user pyalsaaudio') use_alsaaudio = False #sys.exit(0) from asr_server import ASRServer SOUNDS_DIR = "sounds/" # dir with sounds soundfile = None # sound file tts_server = None asr_server = None # def playwav_pa(self, sfile): # global soundfile # self.streaming = True # self.stream = self.pa.open(format = 8, #self.pa.get_format_from_width(f.getsampwidth#()), # channels = 1, #f.getnchannels(), # rate = 44100, #f.getframerate(), # output = True, # stream_callback = TTS_callback, # output_device_index = self.output_device) # soundfile = sfile # soundfile.setpos(0) # self.stream.start_stream() # while self.stream.is_active(): # time.sleep(1.0) # self.stream.stop_stream() # self.stream.close() # self.streaming = False if __name__ == "__main__": parser = argparse.ArgumentParser(description='audio_server') parser.add_argument('-ttsport', type=int, help='TTS server port [default: 9001]', default=9001) parser.add_argument('-asrport', type=int, help='ASR server port [default: 9002]', default=9002) parser.add_argument('-device', type=str, help='audio device [default: \'sysdefault\']', default='sysdefault') args = parser.parse_args() tts_server = TTSServer(args.ttsport,args.device) asr_server = ASRServer(args.asrport) tts_server.start() time.sleep(1) asr_server.start() run = True while (run): try: time.sleep(3) #if (not tts_server.streaming): # cmd = 'play -n --no-show-progress -r 44100 -c1 synth 0.1 sine 50 vol 0.01' # keep sound alive # os.system(cmd) except KeyboardInterrupt: print "Exit" run = False tts_server.stop() asr_server.stop() sys.exit(0)
[ 198, 2, 5514, 4217, 44, 1467, 1643, 266, 615, 5846, 3064, 26109, 532, 5765, 2709, 4355, 393, 523, 87, 284, 10385, 6597, 3696, 13, 198, 198, 2, 370, 10116, 5270, 198, 198, 2, 16065, 400, 198, 2, 523, 87, 532, 77, 1377, 3919, 12, ...
2.345372
1,329
from .video_utils import VideoClips from .utils import list_dir from .folder import make_dataset from .vision import VisionDataset
[ 6738, 764, 15588, 62, 26791, 1330, 7623, 2601, 2419, 198, 6738, 764, 26791, 1330, 1351, 62, 15908, 198, 6738, 764, 43551, 1330, 787, 62, 19608, 292, 316, 198, 6738, 764, 10178, 1330, 19009, 27354, 292, 316, 628 ]
3.567568
37
# -*- coding: utf-8 -*- """ sphinx.builders.qthelp ~~~~~~~~~~~~~~~~~~~~~~ Build input files for the Qt collection generator. :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re import codecs import posixpath from os import path from six import text_type from docutils import nodes from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.util import force_decode from sphinx.util.pycompat import htmlescape _idpattern = re.compile( r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$') # Qt Help Collection Project (.qhcp). # Is the input file for the help collection generator. # It contains references to compressed help files which should be # included in the collection. # It may contain various other information for customizing Qt Assistant. collection_template = u'''\ <?xml version="1.0" encoding="utf-8" ?> <QHelpCollectionProject version="1.0"> <assistant> <title>%(title)s</title> <homePage>%(homepage)s</homePage> <startPage>%(startpage)s</startPage> </assistant> <docFiles> <generate> <file> <input>%(outname)s.qhp</input> <output>%(outname)s.qch</output> </file> </generate> <register> <file>%(outname)s.qch</file> </register> </docFiles> </QHelpCollectionProject> ''' # Qt Help Project (.qhp) # This is the input file for the help generator. # It contains the table of contents, indices and references to the # actual documentation files (*.html). # In addition it defines a unique namespace for the documentation. project_template = u'''\ <?xml version="1.0" encoding="utf-8" ?> <QtHelpProject version="1.0"> <namespace>%(namespace)s</namespace> <virtualFolder>doc</virtualFolder> <customFilter name="%(project)s %(version)s"> <filterAttribute>%(outname)s</filterAttribute> <filterAttribute>%(version)s</filterAttribute> </customFilter> <filterSection> <filterAttribute>%(outname)s</filterAttribute> <filterAttribute>%(version)s</filterAttribute> <toc> <section title="%(title)s" ref="%(masterdoc)s.html"> %(sections)s </section> </toc> <keywords> %(keywords)s </keywords> <files> %(files)s </files> </filterSection> </QtHelpProject> ''' section_template = '<section title="%(title)s" ref="%(ref)s"/>' file_template = ' '*12 + '<file>%(filename)s</file>'
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 220, 220, 220, 599, 20079, 87, 13, 50034, 13, 80, 1169, 34431, 198, 220, 220, 220, 220, 27156, 8728, 4907, 628, 220, 220, 220, 10934, 5128, 3696, 329, 262,...
2.478137
1,052
""" This code is used to scrape ScienceDirect of publication urls and write them to a text file in the current directory for later use. """ import selenium from selenium import webdriver import numpy as np import pandas as pd import bs4 from bs4 import BeautifulSoup import time from sklearn.utils import shuffle def scrape_page(driver): """ This method finds all the publication result web elements on the webpage. Parameters ---------- driver (Selenium webdriver object) : Instance of the webdriver class e.g. webdriver.Chrome() Returns ------- elems (list) : A list of all scraped hrefs from the page """ elems = driver.find_elements_by_class_name('ResultItem') return elems def clean(elems): """ This method takes a list of scraped selenium web elements and filters/ returns only the hrefs leading to publications. Filtering includes removing all urls with keywords that are indicative of non-html links. Parameters ---------- elems (list) : The list of hrefs to be filtered Returns ------- urls (list) : The new list of hrefs, which should be the same as the list displayed on gui ScienceDirect """ titles = [] urls = [] for elem in elems: href_child = elem.find_element_by_css_selector('a[href]') url = href_child.get_attribute('href') title = href_child.text titles.append(title) urls.append(url) return urls, titles def build_url_list(gui_prefix,search_terms,journal_list): """ This method takes the list of journals and creates a tiple nested dictionary containing all accessible urls to each page, in each year, for each journal, for a given search on sciencedirect. """ dict1 = {} years = np.arange(1995,2020) for journal in journal_list: dict2 = {} for year in years: dict3 = {} for i in range(60): url = gui_prefix + search_terms + '&show=100'+ '&articleTypes=FLA%2CREV' + '&years='+ str(year) if i != 0: url = url + '&offset=' + str(i) +'00' url = url + '&pub=' + journal dict3[i] = url dict2[year] = dict3 dict1[journal] = dict2 return dict1 def proxify(scraped_urls,uw_prefix): """ This method takes a list of scraped urls and turns them into urls that go through the UW Library proxy so that all of them are full access. Parameters ---------- scraped_urls (list) : The list of URLs to be converted uw_prefix (str) : The string that all URLs which go through the UW Library Proxy start with. Returns ------- proxy_urls (list) : The list of converted URLs which go through UW Library proxy """ proxy_urls = [] for url in scraped_urls: sd_id = url[-17:] newlink = uw_prefix + sd_id if sd_id.startswith('S'): proxy_urls.append(newlink) return proxy_urls def write_urls(urls,titles,file,journal,year): """ This method takes a list of urls and writes them to a desired text file. Parameters ---------- urls (list) : The list of URLs to be saved. file (file object) : The opened .txt file which will be written to. year (str or int) : The year associated with the publication date. Returns ------- Does not return anything """ for link,title in zip(urls,titles): line = link + ',' + title + ',' + journal + ',' + str(year) file.write(line) file.write('\n') def find_pubTitle(driver,journal): """ This method finds the identifying number for a specific journal. This identifying number is added to the gui query URL to ensure only publciations from the desired journal are being found. """ pub_elems = driver.find_elements_by_css_selector('input[id*=publicationTitles]') pub_names = [] for elem in pub_elems: pub_name = elem.get_attribute("name") if pub_name == journal: return elem.get_attribute('id')[-6:] #returns the identifying number #for that journal df = pd.read_excel('elsevier_journals.xls') df.Full_Category = df.Full_Category.str.lower() # lowercase topics for searching df = df.drop_duplicates(subset = 'Journal_Title') # drop any duplicate journals df = shuffle(df,random_state = 42) # The set of default strings that will be used to sort which journals we want journal_strings = ['chemistry','energy','molecular','atomic','chemical','biochem' ,'organic','polymer','chemical engineering','biotech','coloid'] name = df.Full_Category.str.contains # making this an easier command to type # new dataframe full of only journals who's topic description contained the # desired keywords df2 = df[name('polymer') | name('chemistry') | name('energy') | name('molecular') | name('colloid') | name('biochem') | name('organic') | name('biotech') | name('chemical')] journal_list = df2.Journal_Title # Series of only the journals to be searched gui_prefix = 'https://www.sciencedirect.com/search/advanced?qs=' search_terms = 'chemistry%20OR%20molecule%20OR%20polymer%20OR%20organic' url_dict = build_url_list(gui_prefix,search_terms,journal_list) driver = webdriver.Chrome() uw_prefix = 'https://www-sciencedirect-com.offcampus.lib.washington.edu/science/article/pii/' filename = input("Input filename with .txt extension for URL storage: ") url_counter = 0 master_list = [] file = open(filename,'a+') for journal in journal_list: for year in np.arange(1995,2020): for offset in np.arange(60): page = url_dict[journal][year][offset] print("journal, year, offset = ",journal,year,offset) driver.get(page) time.sleep(2) # need sleep to load the page properly if offset == 0: # if on page 1, we need to grab the publisher number try: # we may be at a page which won't have the item we are looking for pubTitles = find_pubTitle(driver,journal_list[journal_counter]) for url in url_dict[journal]: url = url + '&pubTitles=' + pubTitles # update every url in the list driver.get(url_dict[journal][year][0]) # reload the first page with the new url except: pass # if there is an exception, it means we are on the right page scraped_elems = scrape_page(driver) # scrape the page scraped_urls, titles = clean(scraped_elems) proxy_urls = proxify(scraped_urls,uw_prefix) # not even sure this is needed write_urls(proxy_urls,titles,file,journal,year) url_counter += len(proxy_urls) print('Total URLs saved is: ',url_counter) if len(scraped_elems) < 100: # after content is saved, go to the next year break # because we know this is the last page of urls for this year file.close() driver.quit()
[ 37811, 198, 1212, 2438, 318, 973, 284, 42778, 5800, 13470, 286, 9207, 2956, 7278, 290, 3551, 606, 284, 198, 64, 2420, 2393, 287, 262, 1459, 8619, 329, 1568, 779, 13, 198, 37811, 198, 198, 11748, 384, 11925, 1505, 198, 6738, 384, 11925...
2.501552
2,899
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from flask import Flask from flask_caching import Cache from werkzeug.wrappers import Response CacheConfig = Union[Callable[[Flask], Cache], Dict[str, Any]] DbapiDescriptionRow = Tuple[ str, str, Optional[str], Optional[str], Optional[int], Optional[int], bool ] DbapiDescription = Union[List[DbapiDescriptionRow], Tuple[DbapiDescriptionRow, ...]] DbapiResult = Sequence[Union[List[Any], Tuple[Any, ...]]] FilterValue = Union[datetime, float, int, str] FilterValues = Union[FilterValue, List[FilterValue], Tuple[FilterValue]] FormData = Dict[str, Any] Granularity = Union[str, Dict[str, Union[str, float]]] AdhocMetric = Dict[str, Any] Metric = Union[AdhocMetric, str] OrderBy = Tuple[Metric, bool] QueryObjectDict = Dict[str, Any] VizData = Optional[Union[List[Any], Dict[Any, Any]]] VizPayload = Dict[str, Any] # Flask response. Base = Union[bytes, str] Status = Union[int, str] Headers = Dict[str, Any] FlaskResponse = Union[ Response, Base, Tuple[Base, Status], Tuple[Base, Status, Headers], ]
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
3.326923
572
#!/usr/bin/env python3 import json import platform if __name__ == "__main__": data = make_sys_report() with open("log_system_information.json", "w") as f: json.dump(data, f, indent=4) print(json.dumps(data, indent=4)) print("System info gathered successfully - saved as \"log_system_information.json\"")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 33918, 198, 11748, 3859, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1366, 796, 787, 62, 17597, 62, 13116, 3419, 198, ...
2.707317
123
""" Utils for creating xdelta patches. """ import logging from subprocess import check_output, CalledProcessError from shutil import copyfile from os import remove, path
[ 37811, 198, 18274, 4487, 329, 4441, 2124, 67, 12514, 16082, 13, 198, 37811, 198, 11748, 18931, 198, 6738, 850, 14681, 1330, 2198, 62, 22915, 11, 34099, 18709, 12331, 198, 6738, 4423, 346, 1330, 4866, 7753, 198, 6738, 28686, 1330, 4781, ...
3.909091
44
from rotor import Rotor import sys import getopt if __name__ == '__main__': main(sys.argv[1:])
[ 6738, 44883, 1330, 18481, 273, 198, 11748, 25064, 198, 11748, 651, 8738, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 7, 17597, 13, 853, 85, 58, 16, 25, 12962, 198 ]
2.575
40
import time import numpy as np from scipy import sparse from numba import njit from numpy.linalg import norm from scipy.sparse.linalg import svds from andersoncd.lasso import dual_lasso def solver_group( X, y, alpha, grp_size, max_iter=10000, tol=1e-4, f_gap=10, K=5, use_acc=False, algo='bcd', compute_time=False, tmax=np.infty, verbose=True): """Solve the GroupLasso with BCD/ISTA/FISTA, eventually with extrapolation. Groups are contiguous, of size grp_size. Objective: norm(y - Xw, ord=2)**2 / 2 + alpha * sum_g ||w_{[g]}||_2 TODO: filled docstring Parameters: algo: string 'bcd', 'pgd', 'fista' compute_time : bool, default=False If you want to compute timings or not tmax : float, default=1000 Maximum time (in seconds) the algorithm is allowed to run alpha: strength of the group penalty """ is_sparse = sparse.issparse(X) n_features = X.shape[1] if n_features % grp_size != 0: raise ValueError("n_features is not a multiple of group size") n_groups = n_features // grp_size _range = np.arange(n_groups) groups = dict( bcd=lambda: _range, bcdshuf=lambda: np.random.choice(n_groups, n_groups, replace=False), rbcd=lambda: np.random.choice(n_groups, n_groups, replace=True)) if not is_sparse and not np.isfortran(X): X = np.asfortranarray(X) last_K_w = np.zeros([K + 1, n_features]) U = np.zeros([K, n_features]) if algo in ('pgd', 'fista'): if is_sparse: L = svds(X, k=1)[1][0] ** 2 else: L = norm(X, ord=2) ** 2 lc = np.zeros(n_groups) for g in range(n_groups): X_g = X[:, g * grp_size: (g + 1) * grp_size] if is_sparse: gram = (X_g.T @ X_g).todense() lc[g] = norm(gram, ord=2) else: lc[g] = norm(X_g, ord=2) ** 2 w = np.zeros(n_features) if algo == 'fista': z = np.zeros(n_features) t_new = 1 R = y.copy() E = [] gaps = np.zeros(max_iter // f_gap) if compute_time: times = [] t_start = time.time() for it in range(max_iter): if it % f_gap == 0: if algo == 'fista': R = y - X @ w p_obj = primal_grp(R, w, alpha, grp_size) E.append(p_obj) theta = R / alpha if compute_time: elapsed_times = time.time() - t_start times.append(elapsed_times) if verbose: print("elapsed time: %f " % elapsed_times) if elapsed_times > tmax: break d_norm_theta = np.max( norm((X.T @ theta).reshape(-1, grp_size), axis=1)) if d_norm_theta > 1.: theta /= d_norm_theta d_obj = dual_lasso(y, theta, alpha) gap = p_obj - d_obj if verbose: print("Iteration %d, p_obj::%.5f, d_obj::%.5f, gap::%.2e" % (it, p_obj, d_obj, gap)) gaps[it // f_gap] = gap if gap < tol: print("Early exit") break if algo.endswith('bcd'): if is_sparse: _bcd_sparse( X.data, X.indices, X.indptr, w, R, alpha, lc) else: _bcd(X, w, R, alpha, lc, groups[algo]()) elif algo == 'pgd': w[:] = BST_vec(w + X.T @ R / L, alpha / L, grp_size) R[:] = y - X @ w elif algo == 'fista': w_old = w.copy() w[:] = BST_vec(z - X.T @ (X @ z - y) / L, alpha / L, grp_size) t_old = t_new t_new = (1. + np.sqrt(1 + 4 * t_old ** 2)) / 2. z[:] = w + (t_old - 1.) / t_new * (w - w_old) else: raise ValueError("Unknown algo %s" % algo) if use_acc: if it < K + 1: last_K_w[it] = w else: for k in range(K): last_K_w[k] = last_K_w[k + 1] last_K_w[K - 1] = w for k in range(K): U[k] = last_K_w[k + 1] - last_K_w[k] C = np.dot(U, U.T) try: z = np.linalg.solve(C, np.ones(K)) c = z / z.sum() w_acc = np.sum(last_K_w[:-1] * c[:, None], axis=0) p_obj = primal_grp(R, w, alpha, grp_size) R_acc = y - X @ w_acc p_obj_acc = primal_grp(R_acc, w_acc, alpha, grp_size) if p_obj_acc < p_obj: w = w_acc R = R_acc except np.linalg.LinAlgError: if verbose: print("----------Linalg error") if compute_time: return w, np.array(E), gaps[:it // f_gap + 1], times return w, np.array(E), gaps[:it // f_gap + 1]
[ 11748, 640, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 629, 541, 88, 1330, 29877, 198, 6738, 997, 7012, 1330, 299, 45051, 198, 6738, 299, 32152, 13, 75, 1292, 70, 1330, 2593, 198, 6738, 629, 541, 88, 13, 82, 29572, 13, 75, ...
1.735426
2,899
""" Greedy Word Swap with Word Importance Ranking =================================================== When WIR method is set to ``unk``, this is a reimplementation of the search method from the paper: Is BERT Really Robust? A Strong Baseline for Natural Language Attack on Text Classification and Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and https://github.com/jind11/TextFooler. """ import numpy as np import torch from torch.nn.functional import softmax from textattack.goal_function_results import GoalFunctionResultStatus from textattack.search_methods import SearchMethod from textattack.shared.validators import ( transformation_consists_of_word_swaps_and_deletions, )
[ 37811, 198, 43887, 4716, 9678, 48408, 351, 9678, 17267, 590, 45407, 198, 10052, 4770, 18604, 628, 198, 2215, 370, 4663, 2446, 318, 900, 284, 7559, 2954, 15506, 11, 428, 318, 257, 21123, 32851, 286, 262, 2989, 198, 24396, 422, 262, 3348,...
3.675258
194
from lemur import database def rotate_certificate(endpoint, new_cert): """ Rotates a certificate on a given endpoint. :param endpoint: :param new_cert: :return: """ # ensure that certificate is available for rotation endpoint.source.plugin.update_endpoint(endpoint, new_cert) endpoint.certificate = new_cert database.update(endpoint)
[ 6738, 443, 28582, 1330, 6831, 628, 198, 4299, 23064, 62, 22583, 22460, 7, 437, 4122, 11, 649, 62, 22583, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 18481, 689, 257, 10703, 319, 257, 1813, 36123, 13, 628, 220, 220, 220, 105...
2.992063
126
# coding: utf-8 # Copyright Luna Technology 2015 # Matthieu Riviere <mriviere@luna-technology.com> from __future__ import absolute_import import os from celery import Celery # Set the default Django settings module for the 'celery' program os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pype.settings') from django.conf import settings from celery.signals import setup_logging app = Celery('pype') app.config_from_object('django.conf:settings') app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 15069, 23694, 8987, 1853, 198, 2, 45524, 22304, 34686, 13235, 1279, 76, 15104, 13235, 31, 75, 9613, 12, 45503, 13, 785, 29, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, ...
3.09697
165
import os, sys import json # anchorroot this_file_path = os.path.split(os.path.realpath(__file__))[0] this_path = this_file_path root_path = this_file_path while this_path: if os.path.exists(os.path.join(this_path, 'sosweety_root_anchor.py')): root_path = this_path break par_path = os.path.dirname(this_path) # print(par_path) if par_path == this_path: break else: this_path = par_path sys.path.append(root_path) from modules.sParser.sParser import sParser from modules.knowledgebase.kb import KnowledgeBase train_dir = 'data/train_zh_wiki' train_dir = os.path.join(root_path, train_dir) if not os.path.exists(train_dir): os.makedirs(train_dir) # parse result file parse_result_dir = 'parse_result' parse_result_dir = os.path.join(train_dir, parse_result_dir) if not os.path.exists(parse_result_dir): os.makedirs(parse_result_dir) pos_tags_file_name = 'pos_tags_file' pos_tags_file_path = os.path.join(parse_result_dir, pos_tags_file_name) KB = KnowledgeBase() parser = sParser(KB) with open(pos_tags_file_path, 'w') as pos_tags_file: # file_path = 'data/corpus/zh_wiki/wiki_test' file_path = os.path.join(root_path, file_path) file = open(file_path) line = file.readline() count = 0 while line: count += 1 if count % 5000 == 0: print('parsed %s sentence' % count) text = line.strip() try: ss_pos_tags = parser.text2ss_pos_tags(text) for pos_tags in ss_pos_tags: pos_tags_file.write(json.dumps(pos_tags, ensure_ascii=False) + '\n') except Exception: print('line %s decode error' % count) line = file.readline() file.close()
[ 11748, 28686, 11, 25064, 201, 198, 11748, 33918, 201, 198, 201, 198, 2, 220, 18021, 15763, 201, 198, 5661, 62, 7753, 62, 6978, 796, 28686, 13, 6978, 13, 35312, 7, 418, 13, 6978, 13, 5305, 6978, 7, 834, 7753, 834, 4008, 58, 15, 60,...
2.156886
835
from gtrain import Model import numpy as np import tensorflow as tf #________________________________________EXAMPLES_OF_NetForHypinv_CLASS_____________________________________________
[ 6738, 308, 27432, 1330, 9104, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 628, 628, 198, 2, 10221, 2602, 6369, 2390, 6489, 1546, 62, 19238, 62, 7934, 1890, 49926, 16340, 62, 31631, 10221, 2602, 29343, ...
4.488372
43
from ..factory import Method
[ 6738, 11485, 69, 9548, 1330, 11789, 628 ]
4.285714
7
import numpy from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer from src.support import support
[ 11748, 299, 32152, 198, 198, 6738, 41927, 292, 13, 3866, 36948, 1330, 8379, 198, 6738, 41927, 292, 13, 3866, 36948, 13, 5239, 1330, 29130, 7509, 198, 6738, 12351, 13, 11284, 1330, 1104, 628, 198 ]
4
34
from setuptools import setup version = "1.0.0" long_description = """ PayPalHttp is a generic http client designed to be used with code-generated projects. """ setup( name="paypalhttp", long_description=long_description, version=version, author="PayPal", packages=["paypalhttp", "paypalhttp/testutils", "paypalhttp/serializers"], install_requires=['requests>=2.0.0', 'six>=1.0.0', 'pyopenssl>=0.15'], license="MIT", classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' ], )
[ 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 9641, 796, 366, 16, 13, 15, 13, 15, 1, 198, 198, 6511, 62, 11213, 796, 37227, 198, 197, 19197, 11531, 43481, 318, 257, 14276, 2638, 5456, 3562, 284, 307, 973, 351, 2438, 12, 27568, 4493...
2.738318
428
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from ...lo.xml.attribute import Attribute as Attribute from ...lo.xml.attribute_container import AttributeContainer as AttributeContainer from ...lo.xml.attribute_data import AttributeData as AttributeData from ...lo.xml.export_filter import ExportFilter as ExportFilter from ...lo.xml.fast_attribute import FastAttribute as FastAttribute from ...lo.xml.import_filter import ImportFilter as ImportFilter from ...lo.xml.namespace_container import NamespaceContainer as NamespaceContainer from ...lo.xml.para_user_defined_attributes_supplier import ParaUserDefinedAttributesSupplier as ParaUserDefinedAttributesSupplier from ...lo.xml.text_user_defined_attributes_supplier import TextUserDefinedAttributesSupplier as TextUserDefinedAttributesSupplier from ...lo.xml.user_defined_attributes_supplier import UserDefinedAttributesSupplier as UserDefinedAttributesSupplier from ...lo.xml.x_export_filter import XExportFilter as XExportFilter from ...lo.xml.x_import_filter import XImportFilter as XImportFilter from ...lo.xml.x_import_filter2 import XImportFilter2 as XImportFilter2 from ...lo.xml.xml_export_filter import XMLExportFilter as XMLExportFilter from ...lo.xml.xml_import_filter import XMLImportFilter as XMLImportFilter
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 198, 2, 15069, 33160, 1058, 33, 6532, 12, 22405, 12, 12041, 25, 19935, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 4943, 198, 2, 345, 743,...
3.709091
495
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-09-27 15:35 from __future__ import unicode_literals from django.db import migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 24, 13, 21, 319, 1584, 12, 2931, 12, 1983, 1315, 25, 2327, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 1...
2.672727
55
import numpy as np from sklearn.metrics import roc_curve, auc def prec_recall_1d(nms_pos_o, nms_prob_o, gt_pos_o, durations, detection_overlap, win_size, remove_eof=True): """ nms_pos, nms_prob, and gt_pos are lists of numpy arrays specifying detection position, detection probability and GT position. Each list entry is a different file. Each entry in nms_pos is an array of length num_entries. For nms_prob and gt_pos its an array of size (num_entries, 1). durations is a array of the length of the number of files with each entry containing that file length in seconds. detection_overlap determines if a prediction is counted as correct or not. win_size is used to ignore predictions and ground truth at the end of an audio file. returns precision: fraction of retrieved instances that are relevant. recall: fraction of relevant instances that are retrieved. """ if remove_eof: # filter out the detections in both ground truth and predictions that are too # close to the end of the file - dont count them during eval nms_pos, nms_prob, gt_pos = remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size) else: nms_pos = nms_pos_o nms_prob = nms_prob_o gt_pos = gt_pos_o # loop through each file true_pos = [] # correctly predicts the ground truth false_pos = [] # says there is a detection but isn't for ii in range(len(nms_pos)): num_preds = nms_pos[ii].shape[0] if num_preds > 0: # check to make sure it contains something num_gt = gt_pos[ii].shape[0] # for each set of predictions label them as true positive or false positive (i.e. 1-tp) tp = np.zeros(num_preds) distance_to_gt = np.abs(gt_pos[ii].ravel()-nms_pos[ii].ravel()[:, np.newaxis]) within_overlap = (distance_to_gt <= detection_overlap) # remove duplicate detections - assign to valid detection with highest prob for jj in range(num_gt): inds = np.where(within_overlap[:, jj])[0] # get the indices of all valid predictions if inds.shape[0] > 0: max_prob = np.argmax(nms_prob[ii][inds]) selected_pred = inds[max_prob] within_overlap[selected_pred, :] = False tp[selected_pred] = 1 # set as true positives true_pos.append(tp) false_pos.append(1 - tp) # calc precision and recall - sort confidence in descending order # PASCAL style conf = np.concatenate(nms_prob)[:, 0] num_gt = np.concatenate(gt_pos).shape[0] inds = np.argsort(conf)[::-1] true_pos_cat = np.concatenate(true_pos)[inds].astype(float) false_pos_cat = np.concatenate(false_pos)[inds].astype(float) # i.e. 1-true_pos_cat if (conf == conf[0]).sum() == conf.shape[0]: # all the probability values are the same therefore we will not sweep # the curve and instead will return a single value true_pos_sum = true_pos_cat.sum() false_pos_sum = false_pos_cat.sum() recall = np.asarray([true_pos_sum / float(num_gt)]) precision = np.asarray([(true_pos_sum / (false_pos_sum + true_pos_sum))]) elif inds.shape[0] > 0: # otherwise produce a list of values true_pos_cum = np.cumsum(true_pos_cat) false_pos_cum = np.cumsum(false_pos_cat) recall = true_pos_cum / float(num_gt) precision = (true_pos_cum / (false_pos_cum + true_pos_cum)) return precision, recall
[ 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 4164, 10466, 1330, 686, 66, 62, 22019, 303, 11, 257, 1229, 628, 628, 198, 198, 4299, 3718, 62, 8344, 439, 62, 16, 67, 7, 77, 907, 62, 1930, 62, 78, 11, 299, 907, 62, 167...
2.372124
1,521
# coding: utf-8 #------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- import unittest import azure.mgmt.network.models from testutils.common_recordingtestcase import record from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase #------------------------------------------------------------------------------ if __name__ == '__main__': unittest.main()
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 10097, 45537, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 198, 2, ...
4.837209
129
import gym import gym.spaces as spaces import sys import socket from _thread import * import os import numpy as np import pandas as pd import math as m import time import random if __name__ == '__main__': # Construct MAIN SERVER object env = NetEnv() #WALK for i in range(100000): env.step() print('Done')
[ 11748, 11550, 198, 11748, 11550, 13, 2777, 2114, 355, 9029, 198, 11748, 25064, 198, 198, 11748, 17802, 198, 198, 6738, 4808, 16663, 1330, 1635, 198, 11748, 28686, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, ...
2.866071
112
# -*- coding: utf-8 -*- # @File : session.py # @Author : zhkuo # @Time : 2021/1/3 9:12 # @Desc : from sqlalchemy import create_engine # from sqlalchemy.orm import scoped_session from sqlalchemy.orm import sessionmaker from app.core.config import settings """ : https://www.osgeo.cn/sqlalchemy/orm/session_basics.html https://landybird.github.io/python/2020/03/02/fastapi%E4%B8%8Easgi(5)/ session https://github.com/tiangolo/fastapi/issues/726 session 1. sqlalchemy.orm scoped_session 2. db 3. dependency ( """ # engine engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, connect_args={"check_same_thread": False}) # scoped_session # db_session = scoped_session( # sessionmaker(autocommit=False, autoflush=False, bind=engine) # ) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 8979, 220, 220, 1058, 6246, 13, 9078, 198, 2, 2488, 13838, 1058, 1976, 71, 74, 20895, 198, 2, 2488, 7575, 220, 220, 1058, 33448, 14, 16, 14, 18, 860, 25, ...
2.567901
324
from pathlib import Path import pickle import tkinter as tk import tkinter.filedialog def open_dialog(**opt): """Parameters ---------- Options will be passed to `tkinter.filedialog.askopenfilename`. See also tkinter's document. Followings are example of frequently used options. - filetypes=[(label, ext), ...] - label: str - ext: str, semicolon separated extentions - initialdir: str, default Path.cwd() - multiple: bool, default False Returns -------- filename, str """ root = tk.Tk() root.withdraw() root.wm_attributes("-topmost", True) opt_default = dict(initialdir=Path.cwd()) _opt = dict(opt_default, **opt) return tk.filedialog.askopenfilename(**_opt) def saveas_dialog(**opt): """Parameters ---------- Options will be passed to `tkinter.filedialog.asksaveasfilename`. See also tkinter's document. Followings are example of frequently used options. - filetypes=[(label, ext), ...] - label: str - ext: str, semicolon separated extentions - initialdir: str, default Path.cwd() - initialfile: str, default isn't set Returns -------- filename, str """ root = tk.Tk() root.withdraw() root.wm_attributes("-topmost", True) opt_default = dict(initialdir=Path.cwd()) _opt = dict(opt_default, **opt) return tk.filedialog.asksaveasfilename(**_opt) def load_pickle_with_dialog(mode='rb', **opt): """Load a pickled object with a filename assigned by tkinter's open dialog. kwargs will be passed to saveas_dialog. """ opt_default = dict(filetypes=[('pickled data', '*.pkl'), ('all', '*')]) _opt = dict(opt_default, **opt) fn = open_dialog(**_opt) if fn == '': # canceled return None with Path(fn).open(mode) as f: data = pickle.load(f) return data def dump_pickle_with_dialog(obj, mode='wb', **opt): """Pickle an object with a filename assigned by tkinter's saveas dialog. kwargs will be passed to saveas_dialog. Returns -------- filename: str """ opt_default = dict(filetypes=[('pickled data', '*.pkl'), ('all', '*')]) _opt = dict(opt_default, **opt) fn = saveas_dialog(**_opt) if fn == '': # canceled return '' # note: tkinter with Path(fn).open(mode) as f: pickle.dump(obj, f) return fn
[ 6738, 3108, 8019, 1330, 10644, 198, 11748, 2298, 293, 198, 11748, 256, 74, 3849, 355, 256, 74, 198, 11748, 256, 74, 3849, 13, 69, 3902, 498, 519, 628, 198, 4299, 1280, 62, 38969, 519, 7, 1174, 8738, 2599, 198, 220, 220, 220, 37227, ...
2.522059
952
# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake
[ 2, 220, 220, 220, 15069, 1584, 412, 9655, 10501, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, ...
3.434579
214
import matplotlib.pyplot as plt import networkx as nx from networkx.drawing.nx_agraph import graphviz_layout
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 3127, 87, 355, 299, 87, 198, 6738, 3127, 87, 13, 19334, 278, 13, 77, 87, 62, 6111, 1330, 4823, 85, 528, 62, 39786, 628, 198 ]
3
37
import setuptools setuptools.setup( name = 'sili-canvas', version = '0.0.1', license = 'MIT', url = 'https://github.com/SilicalNZ/canvas', description = 'A series of easy to use classes to perform complex 2D array transformations', long_description = '', author = 'SilicalNZ', packages = ['canvas', 'canvas.common', 'canvas.tools'] )
[ 11748, 900, 37623, 10141, 198, 198, 2617, 37623, 10141, 13, 40406, 7, 198, 220, 220, 220, 1438, 796, 705, 82, 2403, 12, 5171, 11017, 3256, 198, 220, 220, 220, 2196, 796, 705, 15, 13, 15, 13, 16, 3256, 198, 220, 220, 220, 5964, 796...
2.725926
135
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # isort:skip_file import uuid from datetime import datetime import logging from math import nan from unittest.mock import Mock, patch import numpy as np import pandas as pd import tests.test_app import superset.viz as viz from superset import app from superset.constants import NULL_STRING from superset.exceptions import SpatialException from superset.utils.core import DTTM_ALIAS from .base_tests import SupersetTestCase from .utils import load_fixture logger = logging.getLogger(__name__)
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
3.70977
348
# coding:utf-8 from typing import Dict, List, Optional, cast, TYPE_CHECKING from .chord_util import ChordUtil, InternalControlFlowException, NodeIsDownedExceptiopn if TYPE_CHECKING: from .chord_node import ChordNode
[ 2, 19617, 25, 40477, 12, 23, 198, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 11, 32233, 11, 3350, 11, 41876, 62, 50084, 2751, 198, 198, 6738, 764, 354, 585, 62, 22602, 1330, 609, 585, 18274, 346, 11, 18628, 15988, 37535, 16922, 11,...
3.054795
73
#!/usr/bin/env python import pandas as pd from pathlib import Path from torch.utils.data import DataLoader
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 628 ]
3.235294
34
from AudioLib.AudioEffect import AudioEffect
[ 6738, 13491, 25835, 13, 21206, 18610, 1330, 13491, 18610, 198 ]
4.5
10
#!/usr/bin/env python from __future__ import print_function import logging import os
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 18931, 198, 11748, 28686, 628 ]
3.583333
24
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import math import mindspore.nn as nn import mindspore.ops as P from mindspore.common import initializer as init from src.utils import default_recurisive_init, KaimingNormal
[ 2, 15069, 33448, 43208, 21852, 1766, 1539, 12052, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198...
4.112195
205
import numpy as np from blmath.numerics import vx def apex(points, axis): ''' Find the most extreme point in the direction of the axis provided. axis: A vector, which is an 3x1 np.array. ''' coords_on_axis = points.dot(axis) return points[np.argmax(coords_on_axis)] def inflection_points(points, axis, span): ''' Find the list of vertices that preceed inflection points in a curve. The curve is differentiated with respect to the coordinate system defined by axis and span. axis: A vector representing the vertical axis of the coordinate system. span: A vector representing the the horiztonal axis of the coordinate system. returns: a list of points in space corresponding to the vertices that immediately preceed inflection points in the curve ''' coords_on_span = points.dot(span) dx = np.gradient(coords_on_span) coords_on_axis = points.dot(axis) # Take the second order finite difference of the curve with respect to the # defined coordinate system finite_difference_2 = np.gradient(np.gradient(coords_on_axis, dx), dx) # Compare the product of all neighboring pairs of points in the second derivative # If a pair of points has a negative product, then the second derivative changes sign # at one of those points, signalling an inflection point is_inflection_point = [finite_difference_2[i] * finite_difference_2[i + 1] <= 0 for i in range(len(finite_difference_2) - 1)] inflection_point_indices = [i for i, b in enumerate(is_inflection_point) if b] if len(inflection_point_indices) == 0: # pylint: disable=len-as-condition return [] return points[inflection_point_indices] def farthest(from_point, to_points): ''' Find the farthest point among the inputs, to the given point. Return a tuple: farthest_point, index_of_farthest_point. ''' absolute_distances = vx.magnitude(to_points - from_point) index_of_farthest_point = np.argmax(absolute_distances) farthest_point = to_points[index_of_farthest_point] return farthest_point, index_of_farthest_point
[ 11748, 299, 32152, 355, 45941, 198, 6738, 698, 11018, 13, 77, 6975, 873, 1330, 410, 87, 198, 198, 4299, 40167, 7, 13033, 11, 16488, 2599, 198, 220, 220, 220, 705, 7061, 198, 220, 220, 220, 9938, 262, 749, 3257, 966, 287, 262, 4571, ...
3.005674
705
import os import discode TOKEN = os.environ.get("TOKEN") # The token from the developer portal. client = discode.Client(token=TOKEN, intents=discode.Intents.default()) # The ready listener gets fired when the bot/client is completely ready for use. # The message_create listener is fired whenever a message is sent to any channel that the bot has access to.
[ 11748, 28686, 198, 198, 11748, 1221, 1098, 198, 198, 10468, 43959, 796, 28686, 13, 268, 2268, 13, 1136, 7203, 10468, 43959, 4943, 198, 2, 383, 11241, 422, 262, 8517, 17898, 13, 198, 198, 16366, 796, 1221, 1098, 13, 11792, 7, 30001, 28...
3.563107
103
from .activations import * from .adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from .blur_pool import BlurPool2d from .classifier import ClassifierHead, create_classifier from .cond_conv2d import CondConv2d, get_condconv_initializer from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ set_layer_config from .conv2d_same import Conv2dSame, conv2d_same from .conv_bn_act import ConvBnAct from .create_act import create_act_layer, get_act_layer, get_act_fn from .create_attn import get_attn, create_attn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from .evo_norm import EvoNormBatch2d, EvoNormSample2d from .gather_excite import GatherExcite from .global_context import GlobalContext from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible from .inplace_abn import InplaceAbn from .involution import Involution from .linear import Linear from .mixed_conv2d import MixedConv2d from .mlp import Mlp, GluMlp, GatedMlp, ConvMlpGeneral, ConvMlpGeneralv2 from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .norm import GroupNorm, LayerNorm2d from .norm_act import BatchNormAct2d, GroupNormAct from .padding import get_padding, get_same_padding, pad_same from .patch_embed import PatchEmbed from .pool2d_same import AvgPool2dSame, create_pool2d from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from .selective_kernel import SelectiveKernel from .separable_conv import SeparableConv2d, SeparableConvBnAct from .space_to_depth import SpaceToDepthModule from .split_attn import SplitAttn from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from .test_time_pool import TestTimePoolHead, apply_test_time_pool from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_
[ 6738, 764, 15791, 602, 1330, 1635, 198, 6738, 764, 42552, 425, 62, 615, 70, 9806, 62, 7742, 1330, 3467, 198, 220, 220, 220, 29605, 62, 615, 70, 9806, 62, 7742, 17, 67, 11, 2922, 62, 42552, 425, 62, 7742, 17, 67, 11, 30019, 425, ...
2.923784
761
""" Name: Bondi References: Bondi, Proc. Roy. Soc. Lond. A, v282, p303, (1964) Coordinates: Spherical Symmetry: Spherical Notes: Outgoing Coordinates """ from sympy import Function, diag, sin, symbols coords = symbols("r v theta phi", real=True) variables = () functions = symbols("C M", cls=Function) r, v, th, ph = coords C, M = functions metric = diag(0, -C(r, v) ** 2 * (1 - 2 * M(r, v) / r), r ** 2, r ** 2 * sin(th) ** 2) metric[0, 1] = metric[1, 0] = -C(r, v)
[ 37811, 198, 5376, 25, 12812, 72, 198, 19927, 25, 12812, 72, 11, 31345, 13, 9817, 13, 3345, 13, 406, 623, 13, 317, 11, 410, 32568, 11, 279, 22572, 11, 357, 46477, 8, 198, 7222, 585, 17540, 25, 1338, 37910, 198, 13940, 3020, 11973, ...
2.412371
194
from __future__ import unicode_literals import json from django.apps import apps from django.core.urlresolvers import NoReverseMatch, reverse from django.http import Http404, HttpRequest, QueryDict from django.test import TestCase, override_settings from django.utils import timezone from wagtail.wagtailcore.models import Site from wagtailsharing.models import SharingSite import mock from model_mommy import mommy from ask_cfpb.models import ENGLISH_PARENT_SLUG, SPANISH_PARENT_SLUG from ask_cfpb.views import annotate_links, ask_search, redirect_ask_search from v1.util.migrations import get_or_create_page now = timezone.now() def test_autocomplete_es_blank_term(self): result = self.client.get(reverse( 'ask-autocomplete-es', kwargs={'language': 'es'}), {'term': ''}) output = json.loads(result.content) self.assertEqual(output, [])
[ 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 33918, 198, 198, 6738, 42625, 14208, 13, 18211, 1330, 6725, 198, 6738, 42625, 14208, 13, 7295, 13, 6371, 411, 349, 690, 1330, 1400, 49, 964, 325, 23850, 11, ...
2.763804
326
import os from setuptools import setup VERSION = "0.2" setup( name="instapaper-to-sqlite", description="Save data from Instapaper to a SQLite database", long_description=get_long_description(), long_description_content_type="text/markdown", author="Benjamin Congdon", author_email="me@bcon.gdn", url="https://github.com/bcongdon/instapaper-to-sqlite", project_urls={ "Source": "https://github.com/bcongdon/instapaper-to-sqlite", "Issues": "https://github.com/bcongdon/instapaper-to-sqlite/issues", }, classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Database", ], keywords="instapaper sqlite export dogsheep", version=VERSION, packages=["instapaper_to_sqlite"], entry_points=""" [console_scripts] instapaper-to-sqlite=instapaper_to_sqlite.cli:cli """, install_requires=[ "click", "requests", "sqlite-utils~=3.17", "pyinstapaper @ git+https://github.com/bcongdon/pyinstapaper#egg=pyinstapaper", ], extras_require={"test": ["pytest"]}, tests_require=["instapaper-to-sqlite[test]"], )
[ 11748, 28686, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 43717, 796, 366, 15, 13, 17, 1, 628, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 2625, 8625, 499, 2136, 12, 1462, 12, 25410, 578, 1600, 198, 220, 220, 220, 6...
2.408621
580
from typing import List from pybm import PybmConfig from pybm.command import CLICommand from pybm.config import get_reporter_class from pybm.exceptions import PybmError from pybm.reporters import BaseReporter from pybm.status_codes import ERROR, SUCCESS from pybm.util.path import get_subdirs
[ 6738, 19720, 1330, 7343, 198, 198, 6738, 12972, 20475, 1330, 9485, 20475, 16934, 198, 6738, 12972, 20475, 13, 21812, 1330, 7852, 2149, 2002, 392, 198, 6738, 12972, 20475, 13, 11250, 1330, 651, 62, 260, 26634, 62, 4871, 198, 6738, 12972, ...
3.314607
89
""" For a given detector get a WIMPrate for a given detector (not taking into account any detector effects """ import numericalunits as nu import wimprates as wr import dddm export, __all__ = dddm.exporter()
[ 37811, 198, 1890, 257, 1813, 31029, 651, 257, 370, 3955, 47, 4873, 329, 257, 1813, 31029, 357, 1662, 2263, 656, 198, 23317, 597, 31029, 3048, 198, 37811, 198, 198, 11748, 29052, 41667, 355, 14364, 198, 11748, 266, 320, 1050, 689, 355, ...
3.349206
63
#!/usr/bin/env python3 import torch from torch import optim import torch.nn.functional as F import argparse from sklearn.metrics import mean_squared_error import numpy as np import json from . import utils from .model_utils import get_pi_exact_vec, rnn_vae_forward_one_stage, rnn_vae_forward_two_stage
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 28034, 198, 6738, 28034, 1330, 6436, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 198, 11748, 1822, 29572, 198, 6738, 1341, 35720, 13, 4164, 10466, 1330, 1612,...
3.039216
102
#! -*- coding: utf-8 -*- from setuptools import setup, find_packages setup( name='bert4keras', version='0.8.4', description='an elegant bert4keras', long_description='bert4keras: https://github.com/bojone/bert4keras', license='Apache License 2.0', url='https://github.com/bojone/bert4keras', author='bojone', author_email='bojone@spaces.ac.cn', install_requires=['keras<=2.3.1'], packages=find_packages() )
[ 2, 0, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 11639, 4835, 19, 6122, 292, 3256, 198, 220, 220, 220, ...
2.350785
191
# import cProfile # import pstats # import io from picture import * # pr = cProfile.Profile() # pr.enable() if __name__ == '__main__': p = Picture() p.genPerms() p.detuctAll() p.backtrackLoop() p.saveOtput() # pr.disable() # s = io.StringIO() # sortby = 'cumulative' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(s.getvalue())
[ 2, 1330, 269, 37046, 198, 2, 1330, 279, 34242, 198, 2, 1330, 33245, 198, 6738, 4286, 1330, 1635, 198, 198, 2, 778, 796, 269, 37046, 13, 37046, 3419, 198, 2, 778, 13, 21633, 3419, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 83...
2.297753
178
# imports import os import json import subprocess abs_join = lambda p1, p2 : os.path.abspath(os.path.join(p1, p2)) # constants SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) SEED_RELPATH = "./strprose/example_files/_seeds.json" SEED_FULLPATH = abs_join(SCRIPT_DIR, SEED_RELPATH) SEED_INFO = None with open(SEED_FULLPATH, 'r') as f: SEED_INFO = json.load(f) TOOL_RELPATH = "../StrPROSE-synthesizer/StrPROSE/bin/Debug/netcoreapp3.1/StrPROSE.dll" TOOL_FULLPATH = abs_join(SCRIPT_DIR, TOOL_RELPATH) TARGET_RELDIR = "./strprose/targets" TARGET_FULLDIR = abs_join(SCRIPT_DIR, TARGET_RELDIR) MAX_SAMPLE_SIZE = 2000 EXAMPLE_RELDIR = "./strprose/example_files" EXAMPLE_FULLDIR = abs_join(SCRIPT_DIR, EXAMPLE_RELDIR) TIME_OUT = 120 # methods if __name__ == "__main__": for bench_id in SEED_INFO["bench_seeds"]: for seed in SEED_INFO["bench_seeds"][bench_id]: generate_examples(bench_id, seed)
[ 2, 17944, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 850, 14681, 198, 8937, 62, 22179, 796, 37456, 279, 16, 11, 279, 17, 1058, 28686, 13, 6978, 13, 397, 2777, 776, 7, 418, 13, 6978, 13, 22179, 7, 79, 16, 11, 279, 17, 4008, ...
2.275862
406
from __future__ import division from mmtbx.tls import tools import math import time pdb_str_1 = """ CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1 ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C ATOM 1 CA THR B 6 3.000 0.000 0.000 1.00 0.00 C """ pdb_str_2 = """ CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1 ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C ATOM 1 CA THR B 6 0.000 3.000 0.000 1.00 0.00 C """ pdb_str_3 = """ CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1 ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C ATOM 1 CA THR B 6 0.000 0.000 3.000 1.00 0.00 C """ pdb_str_4 = """ CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1 ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C ATOM 1 CA THR B 6 1.000 2.000 3.000 1.00 0.00 C """ if (__name__ == "__main__"): t0 = time.time() exercise_03() print "Time: %6.4f"%(time.time()-t0) print "OK"
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 8085, 83, 65, 87, 13, 83, 7278, 1330, 4899, 198, 11748, 10688, 198, 11748, 640, 198, 198, 79, 9945, 62, 2536, 62, 16, 796, 37227, 198, 9419, 56, 2257, 16, 220, 220, 838, 13, 830, 220...
1.629787
705
import sys from django.urls import resolve
[ 11748, 25064, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 10568, 628 ]
3.666667
12
""" Training script for steps_with_decay policy""" import argparse import os import sys import pickle import resource import traceback import logging from collections import defaultdict import numpy as np import yaml import torch from torch.autograd import Variable import torch.nn as nn import cv2 cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader import _init_paths # pylint: disable=unused-import import nn as mynn import utils.net as net_utils import utils.misc as misc_utils from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg from datasets.roidb import combined_roidb_for_training from roi_data.loader import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch from modeling.model_builder import Generalized_RCNN from utils.detectron_weight_helper import load_detectron_weight from utils.logging import setup_logging from utils.timer import Timer from utils.training_stats import TrainingStats # Set up logging and load config options logger = setup_logging(__name__) logging.getLogger('roi_data.loader').setLevel(logging.INFO) # RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) def parse_args(): """Parse input arguments""" parser = argparse.ArgumentParser(description='Train a X-RCNN network') parser.add_argument( '--dataset', dest='dataset', required=True, help='Dataset to use') parser.add_argument( '--num_classes', dest='num_classes', help='Number of classes in your custom dataset', default=None, type=int) parser.add_argument( '--cfg', dest='cfg_file', required=True, help='Config file for training (and optionally testing)') parser.add_argument( '--set', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.' 'e.g. [key] [value] [key] [value]', default=[], nargs='+') parser.add_argument( '--disp_interval', help='Display training info every N iterations', default=20, type=int) parser.add_argument( '--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false') # Optimization # These options has the highest prioity and can overwrite the values in config file # or values set by set_cfgs. `None` means do not overwrite. parser.add_argument( '--bs', dest='batch_size', help='Explicitly specify to overwrite the value comed from cfg_file.', type=int) parser.add_argument( '--nw', dest='num_workers', help='Explicitly specify to overwrite number of workers to load data. Defaults to 4', type=int) parser.add_argument( '--iter_size', help='Update once every iter_size steps, as in Caffe.', default=1, type=int) parser.add_argument( '--o', dest='optimizer', help='Training optimizer.', default=None) parser.add_argument( '--lr', help='Base learning rate.', default=None, type=float) parser.add_argument( '--lr_decay_gamma', help='Learning rate decay rate.', default=None, type=float) # Epoch parser.add_argument( '--start_step', help='Starting step count for training epoch. 0-indexed.', default=0, type=int) # Resume training: requires same iterations per epoch parser.add_argument( '--resume', help='resume to training on a checkpoint', action='store_true') parser.add_argument( '--no_save', help='do not save anything', action='store_true') parser.add_argument( '--load_ckpt', help='checkpoint path to load') parser.add_argument( '--load_detectron', help='path to the detectron weight pickle file') parser.add_argument( '--use_tfboard', help='Use tensorflow tensorboard to log training info', action='store_true') return parser.parse_args() def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name) def main(): """Main function""" args = parse_args() print('Called with args:') print(args) if not torch.cuda.is_available(): sys.exit("Need a CUDA device to run the code.") if args.cuda or cfg.NUM_GPUS > 0: cfg.CUDA = True else: raise ValueError("Need Cuda device to run !") if args.dataset == "custom_dataset" and args.num_classes is None: raise ValueError("Need number of classes in your custom dataset to run!") if args.dataset == "coco2017": cfg.TRAIN.DATASETS = ('coco_2014_train',) cfg.MODEL.NUM_CLASSES = 4 elif args.dataset == "keypoints_coco2017": cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',) cfg.MODEL.NUM_CLASSES = 2 elif args.dataset == "voc2007": cfg.TRAIN.DATASETS = ('voc_2007_train',) cfg.MODEL.NUM_CLASSES = 21 elif args.dataset == "voc2012": cfg.TRAIN.DATASETS = ('voc_2012_train',) cfg.MODEL.NUM_CLASSES = 21 elif args.dataset == "custom_dataset": cfg.TRAIN.DATASETS = ('custom_data_train',) cfg.MODEL.NUM_CLASSES = args.num_classes else: raise ValueError("Unexpected args.dataset: {}".format(args.dataset)) cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) ### Adaptively adjust some configs ### original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH original_num_gpus = cfg.NUM_GPUS if args.batch_size is None: args.batch_size = original_batch_size cfg.NUM_GPUS = torch.cuda.device_count() assert (args.batch_size % cfg.NUM_GPUS) == 0, \ 'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS) cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS effective_batch_size = args.iter_size * args.batch_size print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size)) print('Adaptive config changes:') print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size)) print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS)) print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH)) ### Adjust learning based on batch size change linearly # For iter_size > 1, gradients are `accumulated`, so lr is scaled based # on batch_size instead of effective_batch_size old_base_lr = cfg.SOLVER.BASE_LR cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size print('Adjust BASE_LR linearly according to batch_size change:\n' ' BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR)) ### Adjust solver steps step_scale = original_batch_size / effective_batch_size old_solver_steps = cfg.SOLVER.STEPS old_max_iter = cfg.SOLVER.MAX_ITER cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS)) cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5) print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n' ' SOLVER.STEPS: {} --> {}\n' ' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS, old_max_iter, cfg.SOLVER.MAX_ITER)) # Scale FPN rpn_proposals collect size (post_nms_topN) in `collect` function # of `collect_and_distribute_fpn_rpn_proposals.py` # # post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5) if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN: cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n' ' cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE)) if args.num_workers is not None: cfg.DATA_LOADER.NUM_THREADS = args.num_workers print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS) ### Overwrite some solver settings from command line arguments if args.optimizer is not None: cfg.SOLVER.TYPE = args.optimizer if args.lr is not None: cfg.SOLVER.BASE_LR = args.lr if args.lr_decay_gamma is not None: cfg.SOLVER.GAMMA = args.lr_decay_gamma assert_and_infer_cfg() timers = defaultdict(Timer) ### Dataset ### timers['roidb'].tic() roidb, ratio_list, ratio_index = combined_roidb_for_training( cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES) timers['roidb'].toc() roidb_size = len(roidb) logger.info('{:d} roidb entries'.format(roidb_size)) logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time) # Effective training sample size for one epoch train_size = roidb_size // args.batch_size * args.batch_size batchSampler = BatchSampler( sampler=MinibatchSampler(ratio_list, ratio_index), batch_size=args.batch_size, drop_last=True ) dataset = RoiDataLoader( roidb, cfg.MODEL.NUM_CLASSES, training=True) dataloader = torch.utils.data.DataLoader( dataset, batch_sampler=batchSampler, num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch) dataiterator = iter(dataloader) ### Model ### maskRCNN = Generalized_RCNN() if cfg.CUDA: maskRCNN.cuda() ### Optimizer ### gn_param_nameset = set() for name, module in maskRCNN.named_modules(): if isinstance(module, nn.GroupNorm): gn_param_nameset.add(name+'.weight') gn_param_nameset.add(name+'.bias') gn_params = [] gn_param_names = [] bias_params = [] bias_param_names = [] nonbias_params = [] nonbias_param_names = [] nograd_param_names = [] for key, value in maskRCNN.named_parameters(): if value.requires_grad: if 'bias' in key: bias_params.append(value) bias_param_names.append(key) elif key in gn_param_nameset: gn_params.append(value) gn_param_names.append(key) else: nonbias_params.append(value) nonbias_param_names.append(key) else: nograd_param_names.append(key) assert (gn_param_nameset - set(nograd_param_names) - set(bias_param_names)) == set(gn_param_names) # Learning rate of 0 is a dummy value to be set properly at the start of training params = [ {'params': nonbias_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}, {'params': bias_params, 'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1), 'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0}, {'params': gn_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN} ] # names of paramerters for each paramter param_names = [nonbias_param_names, bias_param_names, gn_param_names] if cfg.SOLVER.TYPE == "SGD": optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM) elif cfg.SOLVER.TYPE == "Adam": optimizer = torch.optim.Adam(params) ### Load checkpoint if args.load_ckpt: load_name = args.load_ckpt logging.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(maskRCNN, checkpoint['model']) if args.resume: args.start_step = checkpoint['step'] + 1 if 'train_size' in checkpoint: # For backward compatibility if checkpoint['train_size'] != train_size: print('train_size value: %d different from the one in checkpoint: %d' % (train_size, checkpoint['train_size'])) # reorder the params in optimizer checkpoint's params_groups if needed # misc_utils.ensure_optimizer_ckpt_params_order(param_names, checkpoint) # There is a bug in optimizer.load_state_dict on Pytorch 0.3.1. # However it's fixed on master. optimizer.load_state_dict(checkpoint['optimizer']) # misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer']) del checkpoint torch.cuda.empty_cache() if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values) logging.info("loading Detectron weights %s", args.load_detectron) load_detectron_weight(maskRCNN, args.load_detectron) lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs. maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True) ### Training Setups ### args.run_name = misc_utils.get_run_name() + '_step' output_dir = misc_utils.get_output_dir(args, args.run_name) args.cfg_filename = os.path.basename(args.cfg_file) if not args.no_save: if not os.path.exists(output_dir): os.makedirs(output_dir) blob = {'cfg': yaml.dump(cfg), 'args': args} with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f: pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL) if args.use_tfboard: from tensorboardX import SummaryWriter # Set the Tensorboard logger tblogger = SummaryWriter(output_dir) ### Training Loop ### maskRCNN.train() CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS) # Set index for decay steps decay_steps_ind = None for i in range(1, len(cfg.SOLVER.STEPS)): if cfg.SOLVER.STEPS[i] >= args.start_step: decay_steps_ind = i break if decay_steps_ind is None: decay_steps_ind = len(cfg.SOLVER.STEPS) training_stats = TrainingStats( args, args.disp_interval, tblogger if args.use_tfboard and not args.no_save else None) try: logger.info('Training starts !') step = args.start_step for step in range(args.start_step, cfg.SOLVER.MAX_ITER): # Warm up if step < cfg.SOLVER.WARM_UP_ITERS: method = cfg.SOLVER.WARM_UP_METHOD if method == 'constant': warmup_factor = cfg.SOLVER.WARM_UP_FACTOR elif method == 'linear': alpha = step / cfg.SOLVER.WARM_UP_ITERS warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha else: raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method)) lr_new = cfg.SOLVER.BASE_LR * warmup_factor net_utils.update_learning_rate(optimizer, lr, lr_new) lr = optimizer.param_groups[0]['lr'] assert lr == lr_new elif step == cfg.SOLVER.WARM_UP_ITERS: net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR) lr = optimizer.param_groups[0]['lr'] assert lr == cfg.SOLVER.BASE_LR # Learning rate decay if decay_steps_ind < len(cfg.SOLVER.STEPS) and \ step == cfg.SOLVER.STEPS[decay_steps_ind]: logger.info('Decay the learning on step %d', step) lr_new = lr * cfg.SOLVER.GAMMA net_utils.update_learning_rate(optimizer, lr, lr_new) lr = optimizer.param_groups[0]['lr'] assert lr == lr_new decay_steps_ind += 1 training_stats.IterTic() optimizer.zero_grad() for inner_iter in range(args.iter_size): try: input_data = next(dataiterator) except StopIteration: dataiterator = iter(dataloader) input_data = next(dataiterator) for key in input_data: if key != 'roidb': # roidb is a list of ndarrays with inconsistent length input_data[key] = list(map(Variable, input_data[key])) try: net_outputs = maskRCNN(**input_data) except: continue training_stats.UpdateIterStats(net_outputs, inner_iter) loss = net_outputs['total_loss'] loss.backward() optimizer.step() training_stats.IterToc() training_stats.LogIterStats(step, lr) if (step+1) % CHECKPOINT_PERIOD == 0: save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer) # ---- Training ends ---- # Save last checkpoint save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer) except (RuntimeError, KeyboardInterrupt): del dataiterator logger.info('Save ckpt on exception ...') save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer) logger.info('Save ckpt done.') stack_trace = traceback.format_exc() print(stack_trace) finally: if args.use_tfboard and not args.no_save: tblogger.close() if __name__ == '__main__': main()
[ 37811, 13614, 4226, 329, 4831, 62, 4480, 62, 12501, 323, 2450, 37811, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 2298, 293, 198, 11748, 8271, 198, 11748, 12854, 1891, 198, 11748, 18931, 198, 6738, 1726...
2.19221
8,293
# Copyright (c) 2019-2020 hippo91 <guillaume.peillex@gmail.com> # Copyright (c) 2020 Claudiu Popa <pcmanticore@gmail.com> # Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com> # Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com> # Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html # For details: https://github.com/PyCQA/astroid/blob/main/LICENSE """Astroid hooks for numpy.core.multiarray module.""" import functools from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member from astroid.brain.helpers import register_module_extender from astroid.builder import parse from astroid.inference_tip import inference_tip from astroid.manager import AstroidManager from astroid.nodes.node_classes import Attribute, Name register_module_extender( AstroidManager(), "numpy.core.multiarray", numpy_core_multiarray_transform ) METHODS_TO_BE_INFERRED = { "array": """def array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0): return numpy.ndarray([0, 0])""", "dot": """def dot(a, b, out=None): return numpy.ndarray([0, 0])""", "empty_like": """def empty_like(a, dtype=None, order='K', subok=True): return numpy.ndarray((0, 0))""", "concatenate": """def concatenate(arrays, axis=None, out=None): return numpy.ndarray((0, 0))""", "where": """def where(condition, x=None, y=None): return numpy.ndarray([0, 0])""", "empty": """def empty(shape, dtype=float, order='C'): return numpy.ndarray([0, 0])""", "bincount": """def bincount(x, weights=None, minlength=0): return numpy.ndarray([0, 0])""", "busday_count": """def busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None): return numpy.ndarray([0, 0])""", "busday_offset": """def busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None): return numpy.ndarray([0, 0])""", "can_cast": """def can_cast(from_, to, casting='safe'): return True""", "copyto": """def copyto(dst, src, casting='same_kind', where=True): return None""", "datetime_as_string": """def datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind'): return numpy.ndarray([0, 0])""", "is_busday": """def is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None): return numpy.ndarray([0, 0])""", "lexsort": """def lexsort(keys, axis=-1): return numpy.ndarray([0, 0])""", "may_share_memory": """def may_share_memory(a, b, max_work=None): return True""", # Not yet available because dtype is not yet present in those brains # "min_scalar_type": """def min_scalar_type(a): # return numpy.dtype('int16')""", "packbits": """def packbits(a, axis=None, bitorder='big'): return numpy.ndarray([0, 0])""", # Not yet available because dtype is not yet present in those brains # "result_type": """def result_type(*arrays_and_dtypes): # return numpy.dtype('int16')""", "shares_memory": """def shares_memory(a, b, max_work=None): return True""", "unpackbits": """def unpackbits(a, axis=None, count=None, bitorder='big'): return numpy.ndarray([0, 0])""", "unravel_index": """def unravel_index(indices, shape, order='C'): return (numpy.ndarray([0, 0]),)""", "zeros": """def zeros(shape, dtype=float, order='C'): return numpy.ndarray([0, 0])""", } for method_name, function_src in METHODS_TO_BE_INFERRED.items(): inference_function = functools.partial(infer_numpy_member, function_src) AstroidManager().register_transform( Attribute, inference_tip(inference_function), functools.partial(looks_like_numpy_member, method_name), ) AstroidManager().register_transform( Name, inference_tip(inference_function), functools.partial(looks_like_numpy_member, method_name), )
[ 2, 15069, 357, 66, 8, 13130, 12, 42334, 18568, 78, 6420, 1279, 5162, 5049, 2454, 13, 431, 346, 2588, 31, 14816, 13, 785, 29, 198, 2, 15069, 357, 66, 8, 12131, 36303, 16115, 8099, 64, 1279, 79, 11215, 5109, 382, 31, 14816, 13, 785,...
2.390202
1,735
input = """ 2 18 3 0 3 19 20 21 1 1 1 0 18 2 23 3 0 3 19 24 25 1 1 2 1 21 23 3 5 21 19 20 24 25 0 0 6 0 5 5 21 19 20 24 25 1 1 1 1 1 0 21 a 19 b 20 c 24 d 25 e 28 f 0 B+ 0 B- 1 0 1 """ output = """ COST 1@1 """
[ 15414, 796, 37227, 198, 17, 1248, 513, 657, 513, 678, 1160, 2310, 198, 16, 352, 352, 657, 1248, 198, 17, 2242, 513, 657, 513, 678, 1987, 1679, 198, 16, 352, 362, 352, 2310, 2242, 198, 18, 642, 2310, 678, 1160, 1987, 1679, 657, 657...
1.918182
110
import random names_string = input(" . (,) .\n") names = names_string.split(",") print(names) n = random.randint(0, len(names)) print(f" {names[n]} !")
[ 11748, 4738, 198, 198, 14933, 62, 8841, 796, 5128, 7203, 220, 220, 220, 764, 357, 35751, 220, 764, 59, 77, 4943, 198, 14933, 796, 3891, 62, 8841, 13, 35312, 7, 2430, 8, 198, 198, 4798, 7, 14933, 8, 198, 198, 77, 796, 4738, 13, 2...
2.352941
68
import sys import typing import numpy as np main()
[ 11748, 25064, 201, 198, 11748, 19720, 201, 198, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 12417, 3419, 201, 198 ]
2.241379
29
import network mqtt = network.mqtt("loboris", "mqtt://loboris.eu", user="wifimcu", password="wifimculobo", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb) # secure connection requires more memory and may not work # mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb) # wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb) mqtt.start() #mqtt.config(lwt_topic='status', lwt_msg='Disconected') ''' # Wait until status is: (1, 'Connected') mqtt.subscribe('test') mqtt.publish('test', 'Hi from Micropython') mqtt.stop() ''' # ================== # ThingSpeak example # ================== import network thing = network.mqtt("thingspeak", "mqtt://mqtt.thingspeak.com", user="anyName", password="ThingSpeakMQTTid", cleansession=True, data_cb=datacb) # or secure connection #thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="ThingSpeakMQTTid", cleansession=True, data_cb=datacb) thingspeakChannelId = "123456" # enter Thingspeak Channel ID thingspeakChannelWriteApiKey = "ThingspeakWriteAPIKey" # EDIT - enter Thingspeak Write API Key thingspeakFieldNo = 1 thingSpeakChanelFormat = "json" pubchan = "channels/{:s}/publish/{:s}".format(thingspeakChannelId, thingspeakChannelWriteApiKey) pubfield = "channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey) subchan = "channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId, thingSpeakChanelFormat, thingspeakChannelWriteApiKey) subfield = "channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey) thing.start() tmo = 0 while thing.status()[0] != 2: utime.sleep_ms(100) tmo += 1 if tmo > 80: print("Not connected") break # subscribe to channel thing.subscribe(subchan) # subscribe to field thing.subscribe(subfield) # publish to channel # Payload can include any of those fields separated b< ';': # "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value" thing.publish(pubchan, "field1=25.2;status=On line") # Publish to field thing.publish(pubfield, "24.5")
[ 11748, 3127, 628, 198, 76, 80, 926, 796, 3127, 13, 76, 80, 926, 7203, 75, 672, 37279, 1600, 366, 76, 80, 926, 1378, 75, 672, 37279, 13, 12496, 1600, 2836, 2625, 86, 361, 320, 27399, 1600, 9206, 2625, 86, 361, 320, 3129, 20391, 160...
2.663774
922
# Generated by Django 2.2.8 on 2019-12-14 19:07 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 23, 319, 13130, 12, 1065, 12, 1415, 678, 25, 2998, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# flake8: NOQA E501 import ast import random from textwrap import dedent from typing import List from main.exercises import generate_list, generate_string from main.text import ExerciseStep, MessageStep, Page, Step, VerbatimStep, search_ast from main.utils import returns_stdout
[ 2, 781, 539, 23, 25, 8005, 48, 32, 412, 33548, 198, 11748, 6468, 198, 11748, 4738, 198, 6738, 2420, 37150, 1330, 4648, 298, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 1388, 13, 1069, 2798, 2696, 1330, 7716, 62, 4868, 11, 7716, 62...
3.46988
83
import time import redis import json import argparse """ Follows the StackExchange best practice for creating a work queue. Basically push a task and publish a message that a task is there.""" if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-q", "--queue", help="The queue from which workers will grab tasks") parser.add_argument("-t", "--task", help="The task data") parser.add_argument("-o", "--topic", help="The topic to which workers are subscribed") parser.add_argument("-s", "--server", help="redis server host or IP") parser.add_argument("-p", "--port", help="redis server port (default is 6379)", type=int, default=6379) args = parser.parse_args() if args.queue is None or args.task is None or args.topic is None or args.server is None: parser.print_help() else: client=redis.StrictRedis(host=args.server, args.port) PushTask(client, args.queue, args.task, args.topic)
[ 11748, 640, 198, 11748, 2266, 271, 198, 11748, 33918, 198, 11748, 1822, 29572, 628, 198, 37811, 7281, 82, 262, 23881, 3109, 3803, 1266, 3357, 329, 4441, 257, 670, 16834, 13, 198, 220, 220, 220, 20759, 4574, 257, 4876, 290, 7715, 257, ...
2.398707
464
from __future__ import absolute_import, unicode_literals import os from celery import Celery # set the default Django settings module for the 'celery' program. os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings") app = Celery("app") # Using a string here means the worker doesn't have to serialize # the configuration object to child processes. # - namespace='CELERY' means all celery-related configuration keys # should have a `CELERY_` prefix. app.config_from_object("django.conf:settings", namespace="CELERY") # Load task modules from all registered Django app configs. app.autodiscover_tasks() app.conf.update( task_serializer="json", accept_content=["json"], # Ignore other content result_serializer="json", timezone="Europe/Oslo", enable_utc=True, )
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 28686, 198, 198, 6738, 18725, 1924, 1330, 15248, 1924, 198, 198, 2, 900, 262, 4277, 37770, 6460, 8265, 329, 262, 705, 7015, 88, 6, 1430,...
3.166667
252
from garage.core.serializable import Serializable from garage.core.parameterized import Parameterized # noqa: I100 __all__ = ['Serializable', 'Parameterized']
[ 6738, 15591, 13, 7295, 13, 46911, 13821, 1330, 23283, 13821, 198, 6738, 15591, 13, 7295, 13, 17143, 2357, 1143, 1330, 25139, 2357, 1143, 220, 1303, 645, 20402, 25, 314, 3064, 198, 198, 834, 439, 834, 796, 37250, 32634, 13821, 3256, 705,...
3.577778
45
from django.forms import forms
[ 6738, 42625, 14208, 13, 23914, 1330, 5107, 628, 628, 198 ]
3.5
10
if __name__ == "__main__": task = TaskB() task.run()
[ 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 4876, 796, 15941, 33, 3419, 198, 220, 220, 220, 4876, 13, 5143, 3419 ]
2.178571
28
#!/usr/bin/python import socket,sys,os TCP_IP = '127.0.0.1' TCP_PORT = 6262 BUFFER_SIZE = 1024 s= socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.bind((TCP_IP,TCP_PORT)) s.listen(5) conn, addr = s.accept() print('Connection entrante :', addr) data = conn.recv(BUFFER_SIZE) if data == "m" : os.popen("chmod +w $PWD") else : os.popen("chmod -w $PWD") while 1 : data = conn.recv(BUFFER_SIZE) print data if data == "1": break rep = os.popen(data+" 2>&1") conn.send("reponse : \n"+rep.read()) conn.close()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 628, 198, 198, 11748, 17802, 11, 17597, 11, 418, 198, 198, 4825, 47, 62, 4061, 220, 796, 705, 16799, 13, 15, 13, 15, 13, 16, 6, 198, 4825, 47, 62, 15490, 796, 718, 29119, 198, 19499, 45746, ...
2.15261
249
from rest_framework_jwt.utils import jwt_decode_handler from users.models import User from users.serializers import UserSerializer
[ 6738, 1334, 62, 30604, 62, 73, 46569, 13, 26791, 1330, 474, 46569, 62, 12501, 1098, 62, 30281, 198, 198, 6738, 2985, 13, 27530, 1330, 11787, 198, 6738, 2985, 13, 46911, 11341, 1330, 11787, 32634, 7509, 628, 628, 198 ]
3.578947
38
# Copyright (c) 2020 Xilinx, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of Xilinx nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import os import random import string import subprocess import tempfile import warnings from finn.core.datatype import DataType # mapping from PYNQ board names to FPGA part names pynq_part_map = dict() pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e" pynq_part_map["Pynq-Z1"] = "xc7z020clg400-1" pynq_part_map["Pynq-Z2"] = "xc7z020clg400-1" pynq_part_map["ZCU102"] = "xczu9eg-ffvb1156-2-e" pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e" # native AXI HP port width (in bits) for PYNQ boards pynq_native_port_width = dict() pynq_native_port_width["Pynq-Z1"] = 64 pynq_native_port_width["Pynq-Z2"] = 64 pynq_native_port_width["Ultra96"] = 128 pynq_native_port_width["ZCU102"] = 128 pynq_native_port_width["ZCU104"] = 128 # Alveo device and platform mappings alveo_part_map = dict() alveo_part_map["U50"] = "xcu50-fsvh2104-2L-e" alveo_part_map["U200"] = "xcu200-fsgd2104-2-e" alveo_part_map["U250"] = "xcu250-figd2104-2L-e" alveo_part_map["U280"] = "xcu280-fsvh2892-2L-e" alveo_default_platform = dict() alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_201920_3" alveo_default_platform["U200"] = "xilinx_u200_xdma_201830_2" alveo_default_platform["U250"] = "xilinx_u250_xdma_201830_2" alveo_default_platform["U280"] = "xilinx_u280_xdma_201920_3" def get_rtlsim_trace_depth(): """Return the trace depth for rtlsim via PyVerilator. Controllable via the RTLSIM_TRACE_DEPTH environment variable. If the env.var. is undefined, the default value of 1 is returned. A trace depth of 1 will only show top-level signals and yield smaller .vcd files. The following depth values are of interest for whole-network stitched IP rtlsim: - level 1 shows top-level input/output streams - level 2 shows per-layer input/output streams - level 3 shows per full-layer I/O including FIFO count signals """ try: return int(os.environ["RTLSIM_TRACE_DEPTH"]) except KeyError: return 1 def get_remote_vivado(): """Return the address of the remote Vivado synthesis server as set by the, REMOTE_VIVADO environment variable, otherwise return None""" try: return os.environ["REMOTE_VIVADO"] except KeyError: return None def get_num_default_workers(): """Return the number of workers for parallel transformations. Controllable via the NUM_DEFAULT_WORKERS environment variable. If the env.var. is undefined, the default value of 1 is returned. """ try: return int(os.environ["NUM_DEFAULT_WORKERS"]) except KeyError: return 1 def get_finn_root(): "Return the root directory that FINN is cloned into." try: return os.environ["FINN_ROOT"] except KeyError: raise Exception( """Environment variable FINN_ROOT must be set correctly. Please ensure you have launched the Docker contaier correctly. """ ) def get_execution_error_thresh(): "Return the max error that is allowed for rounding in FINN execution." try: return float(os.environ["ERROR_THRESH"]) except KeyError: return 1e-2 def get_sanitize_quant_tensors(): """Return whether tensors with quantization annotations should be sanitized. Enabled by default, disabling will yield faster ONNX execution but may give incorrect results. Use with caution.""" try: return int(os.environ["SANITIZE_QUANT_TENSORS"]) except KeyError: # enabled by default return 1 def make_build_dir(prefix=""): """Creates a temporary folder with given prefix to be used as a build dir. Use this function instead of tempfile.mkdtemp to ensure any generated files will survive on the host after the FINN Docker container exits.""" try: inst_prefix = os.environ["FINN_INST_NAME"] + "/" return tempfile.mkdtemp(prefix=inst_prefix + prefix) except KeyError: raise Exception( """Environment variable FINN_INST_NAME must be set correctly. Please ensure you have launched the Docker contaier correctly. """ ) def get_by_name(container, name, name_field="name"): """Return item from container by .name field if it exists, None otherwise. Will throw an Exception if multiple items are found, since this violates the ONNX standard.""" names = [getattr(x, name_field) for x in container] inds = [i for i, e in enumerate(names) if e == name] if len(inds) > 1: raise Exception("Found multiple get_by_name matches, undefined behavior") elif len(inds) == 0: return None else: ind = inds[0] return container[ind] def remove_by_name(container, name, name_field="name"): """Remove item from container by .name field if it exists.""" item = get_by_name(container, name, name_field) if item is not None: container.remove(item) def random_string(stringLength=6): """Randomly generate a string of letters and digits.""" lettersAndDigits = string.ascii_letters + string.digits return "".join(random.choice(lettersAndDigits) for i in range(stringLength)) def interleave_matrix_outer_dim_from_partitions(matrix, n_partitions): """Interleave the outermost dimension of a matrix from given partitions (n_partitions).""" if type(matrix) != np.ndarray or matrix.dtype != np.float32: # try to convert to a float numpy array (container dtype is float) matrix = np.asarray(matrix, dtype=np.float32) shp = matrix.shape ndim = matrix.ndim # ensure # partitions evenly divide the outermost dimension assert ( shp[0] % n_partitions == 0 ), """The outermost dimension is not divisable by the number of partitions.""" # only tested for matrices assert ( ndim == 2 ), """The dimension of the matrix is not 2. Currently this function only works for matrices.""" # interleave rows between PEs using reshape + transpose matrix_r = matrix.reshape(-1, n_partitions, shp[1]).transpose((1, 0, 2)) matrix_r = matrix_r.reshape(n_partitions, -1, shp[1]) return matrix_r def roundup_to_integer_multiple(x, factor): """Round up integer x to the nearest integer multiple of integer factor. Returns x if factor is set to -1. Both x and factor must otherwise be positive.""" # ensure integers assert int(x) == x, "The input x is not an integer." assert int(factor) == factor, "The input factor is not an integer." # use -1 to indicate no padding needed if factor == -1: return x # ensure positive values assert factor > 0 and x > 0, "Factor and x are <= 0." if x < factor: return factor else: if x % factor == 0: return x else: return x + (factor - (x % factor)) def pad_tensor_to_multiple_of(ndarray, pad_to_dims, val=0, distr_pad=False): """Pad each dimension of given NumPy ndarray using val, so that each dimension is a multiple of the respective value in pad_to_dims. -1 means do not pad that particular dimension. If distr_pad is False, all padding will be inserted after the existing values; otherwise it will be split evenly between before and after the existing values, with one extra value inserted after if the padding amount is not divisible by two.""" if type(ndarray) != np.ndarray or ndarray.dtype != np.float32: # try to convert to a float numpy array (container dtype is float) ndarray = np.asarray(ndarray, dtype=np.float32) assert ndarray.ndim == len( pad_to_dims ), """The dimensions of the input array don't match the length of the pad_to_dims value.""" # compute the desired shape desired = zip(list(ndarray.shape), list(pad_to_dims)) desired = map(lambda x: roundup_to_integer_multiple(x[0], x[1]), desired) desired = np.asarray(list(desired), dtype=np.int32) current = np.asarray(ndarray.shape, dtype=np.int32) pad_amt = desired - current # add padding to get to the desired shape if distr_pad: pad_before = (pad_amt // 2).astype(np.int32) pad_after = pad_amt - pad_before pad_amt = list(zip(pad_before, pad_after)) else: # all padding is added after the existing values pad_amt = list(map(lambda x: (0, x), pad_amt)) ret = np.pad(ndarray, pad_amt, mode="constant", constant_values=val) assert ( np.asarray(ret.shape, dtype=np.int32) == desired ).all(), """The calculated output array doesn't match the desired/expected one.""" return ret def calculate_matvec_accumulator_range(matrix, vec_dt): """Calculate the minimum and maximum possible result (accumulator) values for a dot product x * A, given matrix A of dims (MW, MH), and vector (1, MW) with datatype vec_dt. Returns (acc_min, acc_max). """ min_weight = matrix.min() max_weight = matrix.max() perceptive_field_elems = matrix.shape[0] min_input = vec_dt.min() max_input = vec_dt.max() # calculate minimum and maximum values of accumulator # assume inputs span the whole range of the input datatype acc_min = perceptive_field_elems * min( min_weight * max_input, min_weight * min_input, max_weight * max_input, max_weight * min_input, ) acc_max = perceptive_field_elems * max( min_weight * max_input, min_weight * min_input, max_weight * max_input, max_weight * min_input, ) return (acc_min, acc_max) def gen_finn_dt_tensor(finn_dt, tensor_shape): """Generates random tensor in given shape and with given FINN DataType.""" if type(tensor_shape) == list: tensor_shape = tuple(tensor_shape) if finn_dt == DataType.BIPOLAR: tensor_values = np.random.randint(2, size=tensor_shape) tensor_values = 2 * tensor_values - 1 elif finn_dt == DataType.BINARY: tensor_values = np.random.randint(2, size=tensor_shape) elif "INT" in finn_dt.name or finn_dt == DataType.TERNARY: tensor_values = np.random.randint( finn_dt.min(), high=finn_dt.max() + 1, size=tensor_shape ) else: raise ValueError( "Datatype {} is not supported, no tensor could be generated".format(finn_dt) ) # always use float type as container return tensor_values.astype(np.float32) def calculate_signed_dot_prod_range(dt_a, dt_b, len): """Returns the (min,max) values a dot product between two signed vectors of types dt_a and dt_b of len elements can take.""" assert ( dt_a.signed() and dt_b.signed() ), """The input values are not both signed vectors.""" min_prod = 2 ** 30 max_prod = -(2 ** 30) for a_val in [dt_a.min(), dt_a.max()]: for b_val in [dt_b.min(), dt_b.max()]: prod = a_val * b_val * len if prod < min_prod: min_prod = prod if prod > max_prod: max_prod = prod return (min_prod, max_prod) def sanitize_quant_values(model, node_tensors, execution_context, check_values=False): """Sanitize given list of tensors in execution_context by rounding values that are supposed to be integers (as indicated by their quantization annotation). Will raise an assertion if the amount of rounding is too large. Returns the sanitized execution context. If check_values is specified, an extra DataType.allowed() check will be performed on any rounded tensors. Background: FINN uses floating point tensors as a carrier data type to represent integers. Floating point arithmetic can introduce rounding errors, e.g. (int_num * float_scale) / float_scale is not always equal to int_num. We use this function to ensure that the values that are supposed to be integers are indeed integers. """ for tensor in node_tensors: dtype = model.get_tensor_datatype(tensor) # floats don't need sanitization, skip to next # introduces less quicker runtime if dtype == DataType.FLOAT32: continue current_values = execution_context[tensor] updated_values = current_values has_to_be_rounded = False # TODO: vectorize with numpy for value in np.nditer(current_values): if not dtype.allowed(value): has_to_be_rounded = True break if has_to_be_rounded: updated_values = np.round(current_values) warnings.warn( "The values of tensor {} can't be represented " "with the set FINN datatype ({}), they will be rounded to match the " "FINN datatype.".format(tensor, dtype) ) # check if rounded values are not too far from original values max_error = max(np.abs(current_values - updated_values).flatten()) if max_error <= get_execution_error_thresh(): if check_values is True: # check again if values can now be represented with set finn datatype # TODO: vectorize with numpy for value in np.nditer(updated_values): if not dtype.allowed(value): raise Exception( """Values can't be represented with set finn datatype ({}) for input {}""".format( dtype, tensor ) ) execution_context[tensor] = updated_values else: raise Exception( """Rounding error is too high to match set FINN datatype ({}) for input {}""".format( dtype, tensor ) ) return execution_context
[ 2, 15069, 357, 66, 8, 12131, 1395, 346, 28413, 11, 3457, 13, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, 389, 10431, 2810, 326, 26...
2.569754
5,971
from tkinter import * from tkcalendar import Calendar from datetime import datetime from datetime import date import tkinter as tk from tkinter import ttk from tkinter.messagebox import askyesno import re import sqlite3 import tkinter.messagebox import pandas as pd import pandas as pd import datetime from dateutil import rrule, parser today = date.today() date1 = '05-10-2021' date2 = '12-31-2050' datesx = pd.date_range(today, date2).tolist() conn = sqlite3.connect('database copy.db') c = conn.cursor() ids = [] root = Tk() root.title("Shalom Clinic") #root.geometry("1200x720+0+0") root.attributes('-fullscreen', True) root.resizable(0, 0) Top = Frame(root, bd=1, relief=RIDGE) Top.pack(side=TOP, fill=X) Form = Frame(root, height=1) Form.pack(side=TOP, pady=1) lbl_title = Label(Top, text = "Shalom Clinic", font=('arial', 15)) lbl_title.pack(fill=X) options=["Male","Female"] options1=datesx options2=["10:00:00","11:00:00","13:00:00"] options3=["O+","O-","A+","A-","B+","B-","AB+","AB-"] b = Application(root) root.resizable(False, False) root.mainloop()
[ 6738, 256, 74, 3849, 1330, 1635, 201, 198, 6738, 256, 74, 9948, 9239, 1330, 26506, 201, 198, 6738, 4818, 8079, 1330, 4818, 8079, 201, 198, 6738, 4818, 8079, 1330, 3128, 201, 198, 201, 198, 11748, 256, 74, 3849, 355, 256, 74, 201, 19...
2.151943
566
import os import tempfile import unittest from datetime import datetime from google.protobuf.json_format import ParseDict from ...monitoring.MonitoringDatabase import MonitoringDatabase from ...protobuf.generated.computer_info_pb2 import ADD, UpdateEvent from ...protobuf.generated.monitoring_service_pb2 import DataUpdateRequest from ...utils.testing import async_test from .wallets import wallets_cmd
[ 11748, 28686, 198, 11748, 20218, 7753, 198, 11748, 555, 715, 395, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 23645, 13, 11235, 672, 3046, 13, 17752, 62, 18982, 1330, 2547, 325, 35, 713, 198, 198, 6738, 2644, 41143, 278, ...
3.759259
108
# Copyright (c) 2020. Hanchen Wang, hw501@cam.ac.uk import os, open3d, numpy as np File_ = open('ModelNet_flist_short.txt', 'w') if __name__ == "__main__": root_dir = "../data/ModelNet_subset/" for root, dirs, files in os.walk(root_dir, topdown=False): for file in files: if '.ply' in file: amesh = open3d.io.read_triangle_mesh(os.path.join(root, file)) out_file_name = os.path.join(root, file).replace('.ply', '_normalised.obj') center = amesh.get_center() amesh.translate(-center) maxR = (np.asarray(amesh.vertices)**2).sum(axis=1).max()**(1/2) # we found divided by (2*maxR) has best rendered visualisation results amesh.scale(1/(2*maxR)) open3d.io.write_triangle_mesh(out_file_name, amesh) File_.writelines(out_file_name.replace('.obj', '').replace(root_dir, '') + '\n') print(out_file_name)
[ 2, 220, 15069, 357, 66, 8, 12131, 13, 367, 1192, 831, 15233, 11, 289, 86, 33548, 31, 20991, 13, 330, 13, 2724, 198, 198, 11748, 28686, 11, 1280, 18, 67, 11, 299, 32152, 355, 45941, 198, 198, 8979, 62, 796, 1280, 10786, 17633, 7934...
1.987952
498
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals """ This module is used for analysis of materials with potential application as intercalation batteries. """ __author__ = "Anubhav Jain, Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Anubhav Jain" __email__ = "ajain@lbl.gov" __date__ = "Jan 13, 2012" __status__ = "Beta" import itertools from pymatgen.core.composition import Composition from pymatgen.core.units import Charge, Time from pymatgen.phasediagram.maker import PhaseDiagram from pymatgen.phasediagram.entries import PDEntry from pymatgen.apps.battery.battery_abc import AbstractElectrode, \ AbstractVoltagePair from pymatgen.core.periodic_table import Element from scipy.constants import N_A def get_unstable_entries(self, charge_to_discharge=True): """ Returns the unstable entries for the electrode. Args: charge_to_discharge: Order from most charge to most discharged state? Defaults to True. Returns: A list of unstable entries in the electrode, ordered by amount of the working ion. """ list_copy = list(self._unstable_entries) return list_copy if charge_to_discharge else list_copy.reverse() def get_all_entries(self, charge_to_discharge=True): """ Return all entries input for the electrode. Args: charge_to_discharge: order from most charge to most discharged state? Defaults to True. Returns: A list of all entries in the electrode (both stable and unstable), ordered by amount of the working ion. """ all_entries = list(self.get_stable_entries()) all_entries.extend(self.get_unstable_entries()) #sort all entries by amount of working ion ASC fsrt = lambda e: e.composition.get_atomic_fraction(self.working_ion) all_entries = sorted([e for e in all_entries], key=fsrt) return all_entries if charge_to_discharge else all_entries.reverse() def get_max_instability(self, min_voltage=None, max_voltage=None): """ The maximum instability along a path for a specific voltage range. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Maximum decomposition energy of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments) """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.decomp_e_charge is not None: data.append(pair.decomp_e_charge) if pair.decomp_e_discharge is not None: data.append(pair.decomp_e_discharge) return max(data) if len(data) > 0 else None def get_min_instability(self, min_voltage=None, max_voltage=None): """ The minimum instability along a path for a specific voltage range. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Minimum decomposition energy of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments) """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.decomp_e_charge is not None: data.append(pair.decomp_e_charge) if pair.decomp_e_discharge is not None: data.append(pair.decomp_e_discharge) return min(data) if len(data) > 0 else None def get_max_muO2(self, min_voltage=None, max_voltage=None): """ Maximum critical oxygen chemical potential along path. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Maximum critical oxygen chemical of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments). """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.muO2_discharge is not None: data.append(pair.pair.muO2_discharge) if pair.muO2_charge is not None: data.append(pair.muO2_charge) return max(data) if len(data) > 0 else None def get_min_muO2(self, min_voltage=None, max_voltage=None): """ Minimum critical oxygen chemical potential along path. Args: min_voltage: The minimum allowable voltage for a given step max_voltage: The maximum allowable voltage allowable for a given step Returns: Minimum critical oxygen chemical of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments). """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.pair.muO2_discharge is not None: data.append(pair.pair.muO2_discharge) if pair.muO2_charge is not None: data.append(pair.muO2_charge) return min(data) if len(data) > 0 else None def get_sub_electrodes(self, adjacent_only=True, include_myself=True): """ If this electrode contains multiple voltage steps, then it is possible to use only a subset of the voltage steps to define other electrodes. For example, an LiTiO2 electrode might contain three subelectrodes: [LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2] This method can be used to return all the subelectrodes with some options Args: adjacent_only: Only return electrodes from compounds that are adjacent on the convex hull, i.e. no electrodes returned will have multiple voltage steps if this is set True. include_myself: Include this identical electrode in the list of results. Returns: A list of InsertionElectrode objects """ battery_list = [] pair_it = self._vpairs if adjacent_only \ else itertools.combinations_with_replacement(self._vpairs, 2) ion = self._working_ion for pair in pair_it: entry_charge = pair.entry_charge if adjacent_only \ else pair[0].entry_charge entry_discharge = pair.entry_discharge if adjacent_only \ else pair[1].entry_discharge chg_frac = entry_charge.composition.get_atomic_fraction(ion) dischg_frac = entry_discharge.composition.get_atomic_fraction(ion) if include_myself or entry_charge != self.fully_charged_entry \ or entry_discharge != self.fully_discharged_entry: unstable_entries = filter(in_range, self.get_unstable_entries()) stable_entries = filter(in_range, self.get_stable_entries()) all_entries = list(stable_entries) all_entries.extend(unstable_entries) battery_list.append(self.__class__(all_entries, self.working_ion_entry)) return battery_list def as_dict_summary(self, print_subelectrodes=True): """ Generate a summary dict. Args: print_subelectrodes: Also print data on all the possible subelectrodes. Returns: A summary of this electrode"s properties in dict format. """ chg_comp = self.fully_charged_entry.composition dischg_comp = self.fully_discharged_entry.composition ion = self.working_ion d = {"average_voltage": self.get_average_voltage(), "max_voltage": self.max_voltage, "min_voltage": self.min_voltage, "max_delta_volume": self.max_delta_volume, "max_voltage_step": self.max_voltage_step, "capacity_grav": self.get_capacity_grav(), "capacity_vol": self.get_capacity_vol(), "energy_grav": self.get_specific_energy(), "energy_vol": self.get_energy_density(), "working_ion": self._working_ion.symbol, "nsteps": self.num_steps, "framework": self._vpairs[0].framework.to_data_dict, "formula_charge": chg_comp.reduced_formula, "formula_discharge": dischg_comp.reduced_formula, "fracA_charge": chg_comp.get_atomic_fraction(ion), "fracA_discharge": dischg_comp.get_atomic_fraction(ion), "max_instability": self.get_max_instability(), "min_instability": self.get_min_instability()} if print_subelectrodes: f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False) d["adj_pairs"] = map(f_dict, self.get_sub_electrodes(adjacent_only=True)) d["all_pairs"] = map(f_dict, self.get_sub_electrodes(adjacent_only=False)) return d def __str__(self): return self.__repr__() def __repr__(self): output = [] chg_form = self.fully_charged_entry.composition.reduced_formula dischg_form = self.fully_discharged_entry.composition.reduced_formula output.append("InsertionElectrode with endpoints at {} and {}".format( chg_form, dischg_form)) output.append("Avg. volt. = {} V".format(self.get_average_voltage())) output.append("Grav. cap. = {} mAh/g".format(self.get_capacity_grav())) output.append("Vol. cap. = {}".format(self.get_capacity_vol())) return "\n".join(output) class InsertionVoltagePair(AbstractVoltagePair): """ Defines an Insertion Voltage Pair. Args: entry1: Entry corresponding to one of the entries in the voltage step. entry2: Entry corresponding to the other entry in the voltage step. working_ion_entry: A single ComputedEntry or PDEntry representing the element that carries charge across the battery, e.g. Li. """ def __repr__(self): output = ["Insertion voltage pair with working ion {}" .format(self._working_ion_entry.composition.reduced_formula), "V = {}, mAh = {}".format(self.voltage, self.mAh), "mass_charge = {}, mass_discharge = {}" .format(self.mass_charge, self.mass_discharge), "vol_charge = {}, vol_discharge = {}" .format(self.vol_charge, self.vol_discharge), "frac_charge = {}, frac_discharge = {}" .format(self.frac_charge, self.frac_discharge)] return "\n".join(output) def __str__(self): return self.__repr__()
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 15069, 357, 66, 8, 350, 4948, 265, 5235, 7712, 4816, 13, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 17168, 13789, 13, 198, 198, 6738, 11593, 37443, 834, 1330, 7297, 11, 28000, 1098, 62, ...
2.244679
5,027
########################################################################## # # Copyright (c) 2013, John Haddon. All rights reserved. # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import weakref import imath import Gaffer import GafferUI ## \todo Perhaps we could make this a part of the public API? Perhaps we could also make a # PlugValueDialogue base class to share some of the work with the dialogue made by the # SplinePlugValueWidget. Or perhaps the `acquire()` here and `NodeSetEditor.acquire()` should # actually be functionality of CompoundEditor?
[ 29113, 29113, 7804, 2235, 198, 2, 198, 2, 220, 15069, 357, 66, 8, 2211, 11, 1757, 367, 48078, 13, 1439, 2489, 10395, 13, 198, 2, 220, 15069, 357, 66, 8, 2211, 11, 7412, 7117, 8495, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2...
3.471318
645
# Generated by Django 3.2 on 2021-04-15 18:05 from django.conf import settings from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 319, 33448, 12, 3023, 12, 1314, 1248, 25, 2713, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, ...
3.1
50
#!/usr/bin/env python # Copyright 2016 Andy Chu. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 """ cmd_exec_test.py: Tests for cmd_exec.py """ import unittest from core import test_lib from core.meta import syntax_asdl, Id from osh import state suffix_op = syntax_asdl.suffix_op osh_word = syntax_asdl.word word_part = syntax_asdl.word_part if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 15069, 1584, 12382, 25459, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428,...
2.950249
201
# Generated by Django 2.0.8 on 2019-05-29 16:00 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 15, 13, 23, 319, 13130, 12, 2713, 12, 1959, 1467, 25, 405, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
#!/usr/bin/env python from .api import capture __version__ = "0.0.7" __all__ = ("capture",)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 6738, 764, 15042, 1330, 8006, 198, 198, 834, 9641, 834, 796, 366, 15, 13, 15, 13, 22, 1, 198, 834, 439, 834, 796, 5855, 27144, 495, 1600, 8, 198 ]
2.447368
38
import numpy as np import torch import torch.nn.functional as F from codes.d_agents.a0_base_agent import float32_preprocessor from codes.d_agents.on_policy.on_policy_agent import OnPolicyAgent from codes.e_utils import rl_utils, replay_buffer from codes.d_agents.actions import ProbabilityActionSelector from codes.e_utils.names import DeepLearningModelName, AgentMode
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 198, 6738, 12416, 13, 67, 62, 49638, 13, 64, 15, 62, 8692, 62, 25781, 1330, 12178, 2624, 62, 3866, 41341, 198, 6738, 12416, 13, ...
3.425926
108
from discord.ext.commands import Bot, Cog
[ 6738, 36446, 13, 2302, 13, 9503, 1746, 1330, 18579, 11, 327, 519 ]
3.416667
12
from django import forms from django.core.mail import send_mail from django.conf import settings
[ 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 13, 7295, 13, 4529, 1330, 3758, 62, 4529, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 628 ]
3.769231
26
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gio, Gtk, Gdk
[ 11748, 308, 72, 198, 12397, 13, 46115, 62, 9641, 10786, 38, 30488, 3256, 705, 18, 13, 15, 11537, 198, 6738, 308, 72, 13, 260, 1930, 37765, 1330, 402, 952, 11, 402, 30488, 11, 402, 34388, 198 ]
2.305556
36
import random if __name__ == '__main__': game()
[ 11748, 4738, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 983, 3419, 198 ]
2.5
22
from enum import Enum
[ 6738, 33829, 1330, 2039, 388, 628, 628 ]
3.571429
7
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Import Local Modules import marvin from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase import unittest from marvin.cloudstackAPI import * from marvin.lib.utils import * from marvin.lib.base import * from marvin.lib.common import * from marvin.codes import PASS, FAILED, SUCCESS, XEN_SERVER from marvin.sshClient import SshClient import requests requests.packages.urllib3.disable_warnings() import random import string import telnetlib import os import urllib.request, urllib.parse, urllib.error import time import tempfile _multiprocess_shared_ = True
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
3.671088
377
"""Test OpenZWave Websocket API.""" from unittest.mock import patch from openzwavemqtt.const import ( ATTR_CODE_SLOT, ATTR_LABEL, ATTR_OPTIONS, ATTR_POSITION, ATTR_VALUE, ValueType, ) from openpeerpower.components.ozw.const import ATTR_CONFIG_PARAMETER from openpeerpower.components.ozw.lock import ATTR_USERCODE from openpeerpower.components.ozw.websocket_api import ( ATTR_IS_AWAKE, ATTR_IS_BEAMING, ATTR_IS_FAILED, ATTR_IS_FLIRS, ATTR_IS_ROUTING, ATTR_IS_SECURITYV1, ATTR_IS_ZWAVE_PLUS, ATTR_NEIGHBORS, ATTR_NODE_BASIC_STRING, ATTR_NODE_BAUD_RATE, ATTR_NODE_GENERIC_STRING, ATTR_NODE_QUERY_STAGE, ATTR_NODE_SPECIFIC_STRING, ID, NODE_ID, OZW_INSTANCE, PARAMETER, SCHEMA, TYPE, VALUE, ) from openpeerpower.components.websocket_api.const import ( ERR_INVALID_FORMAT, ERR_NOT_FOUND, ERR_NOT_SUPPORTED, ) from .common import MQTTMessage, setup_ozw
[ 37811, 14402, 4946, 57, 39709, 47736, 5459, 7824, 526, 15931, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 1280, 89, 19204, 76, 80, 926, 13, 9979, 1330, 357, 198, 220, 220, 220, 5161, 5446, 62, 34, 16820, 62, ...
2.006211
483
import pytest from flask import Markup from notifications_utils.formatters import ( unlink_govuk_escaped, notify_email_markdown, notify_letter_preview_markdown, notify_plain_text_email_markdown, sms_encode, formatted_list, strip_dvla_markup, strip_pipes, escape_html, remove_whitespace_before_punctuation, make_quotes_smart, replace_hyphens_with_en_dashes, tweak_dvla_list_markup, nl2li, strip_whitespace, strip_and_remove_obscure_whitespace, remove_smart_quotes_from_email_addresses, strip_unsupported_characters, normalise_whitespace ) from notifications_utils.template import ( HTMLEmailTemplate, PlainTextEmailTemplate, SMSMessageTemplate, SMSPreviewTemplate ) def test_handles_placeholders_in_urls(): assert notify_email_markdown( "http://example.com/?token=<span class='placeholder'>((token))</span>&key=1" ) == ( '<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">' '<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com/?token=">' 'http://example.com/?token=' '</a>' '<span class=\'placeholder\'>((token))</span>&amp;key=1' '</p>' ) def test_sms_preview_adds_newlines(): template = SMSPreviewTemplate({'content': """ the quick brown fox """}) template.prefix = None template.sender = None assert '<br>' in str(template) def test_footnotes(): # Cant work out how to test this pass def test_unicode_dash_lookup(): en_dash_replacement_sequence = '\u0020\u2013' hyphen = '-' en_dash = '' space = ' ' non_breaking_space = '' assert en_dash_replacement_sequence == space + en_dash assert non_breaking_space not in en_dash_replacement_sequence assert hyphen not in en_dash_replacement_sequence def test_strip_and_remove_obscure_whitespace_only_removes_normal_whitespace_from_ends(): sentence = ' words \n over multiple lines with \ttabs\t ' assert strip_and_remove_obscure_whitespace(sentence) == 'words \n over multiple lines with \ttabs' def test_remove_smart_quotes_from_email_addresses(): assert remove_smart_quotes_from_email_addresses(""" line ones quote first.olast@example.com is someones email address line three """) == (""" line ones quote first.o'last@example.com is someones email address line three """) def test_strip_unsupported_characters(): assert strip_unsupported_characters("line one\u2028line two") == ("line oneline two") def test_normalise_whitespace(): assert normalise_whitespace('\u200C Your tax is\ndue\n\n') == 'Your tax is due'
[ 11748, 12972, 9288, 198, 6738, 42903, 1330, 2940, 929, 198, 198, 6738, 19605, 62, 26791, 13, 18982, 1010, 1330, 357, 198, 220, 220, 220, 555, 8726, 62, 9567, 2724, 62, 3798, 5813, 11, 198, 220, 220, 220, 19361, 62, 12888, 62, 4102, ...
2.45614
1,140
banner_text("*") banner_text("Always look on the bright side of life...") banner_text("If life seems jolly rotten,") banner_text("There's something you've forgotten!") banner_text("And that's to laugh and smile and dance and sing,") banner_text(" ") banner_text("When you're feeling in the dumps,") banner_text("Don't be silly chumps,") banner_text("Just purse your lips and whistle - that's the thing!") banner_text("And... always look on the bright side of life...") banner_text("*") result = banner_text("Nothing is returned") print(result) numbers = [4, 2, 7, 5, 8, 3, 9, 6, 1] print(numbers.sort())
[ 198, 198, 3820, 1008, 62, 5239, 7203, 9, 4943, 198, 3820, 1008, 62, 5239, 7203, 30374, 804, 319, 262, 6016, 1735, 286, 1204, 9313, 8, 198, 3820, 1008, 62, 5239, 7203, 1532, 1204, 2331, 474, 5098, 36371, 553, 8, 198, 3820, 1008, 62, ...
2.980392
204
from .provider import Provider from .adapter import Adapter from .device import Device from .gatt import GattService, GattCharacteristic, GattDescriptor
[ 6738, 764, 15234, 1304, 1330, 32549, 198, 6738, 764, 324, 3429, 1330, 43721, 198, 6738, 764, 25202, 1330, 16232, 198, 6738, 764, 70, 1078, 1330, 402, 1078, 16177, 11, 402, 1078, 27275, 2569, 11, 402, 1078, 24564, 1968, 273, 198 ]
3.825
40
from axju.generic.basic import BasicWorker from axju.generic.execution import ExecutionWorker from axju.generic.template import TemplateWorker
[ 6738, 7877, 14396, 13, 41357, 13, 35487, 1330, 14392, 12468, 263, 198, 6738, 7877, 14396, 13, 41357, 13, 18558, 1009, 1330, 37497, 12468, 263, 198, 6738, 7877, 14396, 13, 41357, 13, 28243, 1330, 37350, 12468, 263, 198 ]
3.864865
37
#!/usr/bin/env python # Copyright European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - Wen Guan, <wguan@cern.ch>, 2014 # objectstoreSiteMover.py import os from configSiteMover import config_sm import SiteMover from xrootdObjectstoreSiteMover import xrootdObjectstoreSiteMover from S3ObjectstoreSiteMover import S3ObjectstoreSiteMover if __name__ == '__main__': os.environ['PilotHomeDir'] = os.getcwd() from SiteInformation import SiteInformation s1 = SiteInformation() #s1.getObjectstoresField("os_access_key", "eventservice", queuename='BNL_EC2W2_MCORE') f = objectstoreSiteMover() gpfn = "nonsens_gpfn" lfn = "AOD.310713._000004.pool.root.1" path = os.getcwd() fsize = "4261010441" fchecksum = "9145af38" dsname = "data11_7TeV.00177986.physics_Egamma.merge.AOD.r2276_p516_p523_tid310713_00" report = {} #print f.getGlobalFilePaths(dsname) #print f.findGlobalFilePath(lfn, dsname) #print f.getLocalROOTSetup() #path = "root://atlas-objectstore.cern.ch//atlas/eventservice/2181626927" # + your .root filename" """ source = "/bin/hostname" dest = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/" lfn = "NTUP_PHOTON.01255150._000001.root.1" localSize = 17848 localChecksum = "89b93830" print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc') gpfn = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/NTUP_PHOTON.01255150._000001.root.1" lfn = "NTUP_PHOTON.01255150._000001.root.1" tmpDir = "/tmp/" localSize = 17848 localChecksum = "89b93830" print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc') """ # test S3 object store source = "/bin/hostname" #dest = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1" dest = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/" lfn = "NTUP_PHOTON.01255150._000001.root.1" localSize = None localChecksum = None print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='') gpfn = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1" gpfn = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/NTUP_PHOTON.01255150._000001.root.1" lfn = "NTUP_PHOTON.01255150._000001.root.1" tmpDir = "/tmp/" localSize = None localChecksum = None print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='deb05b9fb5034a45b80c03bd671359c9')
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 3427, 12275, 329, 19229, 4992, 357, 34, 28778, 8, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 921, 7...
2.286301
1,460
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Esse arquivo possui algumas modificaes em relao ao arquivo apresentado no vdeo do YouTube # No deixe de assistir o vdeo e estudar pela documentao ofical Dash # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # importando as bibliotecas necessrias import dash import dash_core_components as dcc import dash_html_components as html # importando as funes que auxiliam no funcionamento das callbacks do subpacote dependencies do pacote dash from dash.dependencies import Input, Output # importando o mdulo graph_objects da biblioteca plotly import plotly.graph_objects as go # adicionando um estilo externo atravs do link abaixo # esse link o recomendado pela documentao da biblioteca Dash e ao acessar esse link no seu navegador, # voc perceber que ele possui a estrutura de um arquivo CSS external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] # criando a aplicao por meio da funo Dash do pacote dash e atribuindo a varivel app app = dash.Dash( __name__, external_stylesheets=external_stylesheets ) # criando uma funo para gerar um grfico com a biblioteca plotly.graph_objects # criando um layout para a varivel app # adicionando ao layout um componente html.Div que ir conter os demais componentes que do forma app.layout = html.Div([ # inserindo um componente da biblioteca dash HTML components como ttulo/cabealho do layout html.H2( ['Painel de Visualizao de Grficos'], # o parmetro style define estilos css para o componente style={ 'textAlign':'center', # texto alinhado 'font-weight':'bold' # texto em negrito } ), # adicionando uma linha horizontal no layout html.Hr(), # criando abas pai dentro do layout dcc.Tabs( # identidade/nome do componente id='tabs', # criando as abas filhas dentro do parmetro children da funo Tabs() children=[ dcc.Tab(label='Grfico de linha',value='tab-1'), dcc.Tab(label='Grfico de Barra',value='tab-2'), dcc.Tab(label='Grfico de Linha e Pontos',value='tab-3') ] ), # onde ser apresentado o contedo das abas logo aps a callback ser ativada html.Div(id='tabs-content'), html.Hr(), ]) # Callback # estruturando a callback com as entradas (input) e sadas (output) # servindo a aplicao em dash como verso para teste if __name__ == "__main__": app.run_server(debug=True)
[ 2, 1343, 44627, 44627, 44627, 44627, 44627, 44627, 44627, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 8678, 325, 610, 421, 23593, 1184, 9019, 435, 70, 388, 292, 953, 811, 64, 274, 795, 823, 5488, ...
2.608016
1,023
import re import datetime from javaccflab.lexer import parse from javaccflab.java_token import TokenType, Token, update_token_value
[ 11748, 302, 198, 11748, 4818, 8079, 198, 6738, 474, 615, 4134, 2704, 397, 13, 2588, 263, 1330, 21136, 198, 6738, 474, 615, 4134, 2704, 397, 13, 12355, 62, 30001, 1330, 29130, 6030, 11, 29130, 11, 4296, 62, 30001, 62, 8367, 628 ]
3.243902
41
""" .. module:: CAttackEvasionPGDExp :synopsis: Evasion attack using Projected Gradient Descent. .. moduleauthor:: Battista Biggio <battista.biggio@unica.it> """ from secml.adv.attacks.evasion import CAttackEvasionPGDLS
[ 37811, 198, 492, 8265, 3712, 7257, 926, 441, 15200, 4247, 6968, 35, 16870, 198, 220, 220, 1058, 28869, 24608, 25, 4319, 4247, 1368, 1262, 4935, 276, 17701, 1153, 2935, 1087, 13, 198, 198, 492, 8265, 9800, 3712, 12350, 12523, 4403, 27769...
2.860759
79
import sqlite3
[ 11748, 44161, 578, 18, 198, 220, 220, 220, 220, 198 ]
2
10
import math
[ 11748, 10688, 198 ]
4
3
######################################################## FLASK SETTINGS ############################################################## #Variable used to securly sign cookies ##THIS IS SET IN DEV ENVIRONMENT FOR CONVENIENCE BUT SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PROD SECRET_KEY = "dev" ######################################################## DATABSE SETTINGS #################################################### #Neo4j Database URI used by the Neomodel OGM ## THIS SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PRODUCTION ## DATABASE_URI = "bolt://test:test@localhost:7687"
[ 29113, 14468, 7804, 9977, 1921, 42, 25823, 51, 20754, 1303, 29113, 14468, 7804, 4242, 2, 198, 2, 43015, 973, 284, 792, 333, 306, 1051, 14746, 198, 2235, 43559, 3180, 25823, 3268, 5550, 53, 12964, 53, 4663, 1340, 10979, 7473, 7102, 28290...
4.076923
143
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util def grid_2d_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are given values (0.0, 0.0). Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ grid_2d_slim = grid_2d_slim_via_mask_from( mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin ) return grid_2d_native_from( grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size ) def grid_2d_slim_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_slim_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_slim_from( grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int ) -> np.ndarray: """ For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked pixels to a slimmed grid of shape [total_unmasked_pixels, 2]. The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such that for an grid of shape (3,3) where all pixels are unmasked: - pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid. - pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid. - pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid. Parameters ---------- grid_2d_native : ndarray The native grid of (y,x) values which are mapped to the slimmed grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels). """ grid_1d_slim_y = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size ) grid_1d_slim_x = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size ) return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1) def grid_2d_native_from( grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int ) -> np.ndarray: """ For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the native 2D grid where masked values are set to zero. This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked pixels, for example: - If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid. - If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid. - If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid. Parameters ---------- grid_2d_slim The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D mapped from the slimmed grid. """ grid_2d_native_y = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size ) grid_2d_native_x = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size ) return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1)
[ 11748, 299, 32152, 355, 45941, 201, 198, 6738, 19720, 1330, 309, 29291, 11, 4479, 11, 32233, 201, 198, 201, 198, 6738, 8295, 18747, 13, 7249, 942, 13, 3258, 592, 13, 11545, 62, 67, 1330, 7177, 62, 17, 67, 62, 22602, 201, 198, 6738, ...
2.467327
4,239
# coding: utf-8 import requests, math import gevent from gevent.queue import Queue from gevent import monkey; monkey.patch_all() from pyquery import PyQuery if __name__ == '__main__': p = Proxies() p.get_proxies(20, 1) result = p.get_result() print(result)
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 7007, 11, 10688, 198, 11748, 4903, 1151, 198, 6738, 4903, 1151, 13, 36560, 1330, 4670, 518, 198, 6738, 4903, 1151, 1330, 21657, 26, 21657, 13, 17147, 62, 439, 3419, 198, 6738, 12972, ...
2.619048
105
# Copyright 2021 TUNiB inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import suppress from typing import List, Union from torch import nn from parallelformers.policies.base import Policy
[ 2, 15069, 33448, 309, 4944, 72, 33, 753, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, ...
3.863388
183
# coding: utf-8 """ Name: upper_air_humidity.py Make upper level weather chart. Usage: python3 upper_air_humidity.py --file <ncfile> Author: Ryosuke Tomita Date: 2022/01/07 """ import argparse from ncmagics import fetchtime, japanmap, meteotool def parse_args() -> dict: """parse_args. set file path. Args: Returns: dict: """ parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", help="set ncfile.", type=str) p = parser.parse_args() args = {"file": p.file} return args def output_name(ncfile: str, isobaric_surface: int) -> str: """output_name. Args: ncfile (str): ncfile isobaric_surface (int): isobaric_surface Returns: str: """ date_time = fetchtime.fetch_time(ncfile) outname = (date_time + "_" + str(isobaric_surface)) return outname def main(): """main. """ args = parse_args() meteo_tool = meteotool.MeteoTools(args["file"]) lat, lon = meteo_tool.get_lat_lon() isobaric_surface = (850, 500, 300) #label_upper = (30, 0) #lebel_min = (-30, -60) for i, pressure in enumerate(isobaric_surface): # get parameter temp_c = meteo_tool.get_parameter('t', isobaric_surface=pressure) - 273.15 rh = meteo_tool.get_parameter('r', isobaric_surface=pressure) height_gpm = meteo_tool.get_parameter('gh', isobaric_surface=pressure) u_wind = meteo_tool.get_parameter('u', isobaric_surface=pressure) v_wind = meteo_tool.get_parameter('v', isobaric_surface=pressure) jp_map = japanmap.JpMap() jp_map.contour_plot(lon, lat, height_gpm) #jp_map.shade_plot(lon, lat, temp_c, # label="2m temperature ($^\circ$C)", # color_bar_label_max=label_upper[i], # color_bar_label_min=lebel_min[i], # color_map_type="temperature", # double_color_bar=True,) jp_map.shade_plot(lon, lat, rh, label="relative humidity (%)", color_bar_label_max=100, color_bar_label_min=0, color_map_type="gray", double_color_bar=False,) jp_map.vector_plot(lon, lat, u_wind, v_wind, vector_interval=5, vector_scale=10, mode="wind") #jp_map.gray_shade(lon, lat, rh, # label="relative humidity (%)", # color_bar_label_max=100, # color_bar_label_min=0, # ) if pressure == 850: jp_map.color_line(lon, lat, temp_c, line_value=-6, color='#0000ff') if pressure == 500: jp_map.color_line(lon, lat, temp_c, line_value=-36, color='#b22222') outname = output_name(args["file"], pressure) print(outname) jp_map.save_fig(outname, str(pressure) + "hPa") if __name__ == "__main__": main()
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 37811, 198, 5376, 25, 6727, 62, 958, 62, 17047, 17995, 13, 9078, 198, 198, 12050, 6727, 1241, 6193, 8262, 13, 198, 198, 28350, 25, 21015, 18, 6727, 62, 958, 62, 17047, 17995, 13, 9078, 1377, 77...
1.966387
1,547
# # Autogenerated by Thrift Compiler (0.13.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport all_structs = [] all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None, # 0 (1, TType.STRING, 'my_string', 'UTF8', None, ), # 1 (2, TType.I32, 'my_enum', None, None, ), # 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None, # 0 (1, TType.BOOL, 'my_bool', None, None, ), # 1 (2, TType.BYTE, 'my_byte', None, None, ), # 2 (3, TType.I16, 'my_16bit_int', None, None, ), # 3 (4, TType.I32, 'my_32bit_int', None, None, ), # 4 (5, TType.I64, 'my_64bit_int', None, None, ), # 5 (6, TType.DOUBLE, 'my_double', None, None, ), # 6 (7, TType.STRING, 'my_string', 'UTF8', None, ), # 7 (8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8 (9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ), # 10 (11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ), # 11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ), # 12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ), # 14 (15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15 (16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), # 16 (17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), # 17 (18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18 (19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ), # 19 (20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), # 20 ) fix_spec(all_structs) del all_structs
[ 2, 198, 2, 5231, 519, 877, 515, 416, 16283, 2135, 3082, 5329, 357, 15, 13, 1485, 13, 15, 8, 198, 2, 198, 2, 8410, 5626, 48483, 4725, 48481, 7013, 15986, 311, 11335, 14603, 7013, 35876, 25003, 7013, 15986, 8410, 2751, 198, 2, 198, ...
2.311089
1,019