content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from typing import List, Tuple, Union import numpy as np import scipy.special from PIL import Image, ImageFilter if __name__ == "__main__": import argparse from PIL import ImageOps parser = argparse.ArgumentParser() parser.add_argument("--operation", choices=("dilate", "erode"), default="dilate") parser.add_argument("images", type=argparse.FileType("rb"), nargs="+") args = parser.parse_args() transformer = Dilate() if args.operation == "dilate" else Erode() for f in args.images: x = Image.open(f, "r").convert("L") x = ImageOps.invert(x) y = transformer(x) w, h = x.size z = Image.new("L", (w, 2 * h)) z.paste(x, (0, 0)) z.paste(y, (0, h)) z = z.resize(size=(w // 2, h), resample=Image.BICUBIC) z.show() input()
[ 6738, 19720, 1330, 7343, 11, 309, 29291, 11, 4479, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 629, 541, 88, 13, 20887, 198, 6738, 350, 4146, 1330, 7412, 11, 7412, 22417, 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, ...
2.282609
368
import sys from fabric.utils import error, puts from git import RemoteProgress
[ 11748, 25064, 198, 198, 6738, 9664, 13, 26791, 1330, 4049, 11, 7584, 198, 6738, 17606, 1330, 21520, 32577, 628, 628, 198 ]
4
21
# -*- coding: utf-8 -*- from collections import Counter from konlpy.tag import Okt
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 17268, 1330, 15034, 198, 6738, 479, 261, 75, 9078, 13, 12985, 1330, 6762, 83, 628 ]
2.741935
31
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import import unittest from barf.arch import ARCH_X86_MODE_32 from barf.arch import ARCH_X86_MODE_64 from barf.arch.x86.parser import X86Parser def main(): unittest.main() if __name__ == '__main__': main()
[ 2, 15069, 357, 66, 8, 1946, 11, 7557, 49443, 1583, 13, 25995, 14668, 418, 2584, 198, 2, 1439, 2489, 10395, 13, 198, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, 389, 10431,...
3.368421
475
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # sema_signal.py # # An example of using a semaphore for signaling between threads import threading import time done = threading.Semaphore(0) # Resource control. item = None t1 = threading.Thread(target=producer) t2 = threading.Thread(target=consumer) t1.start() t2.start() """ Semaphore Uses: 1. Resource control You can limit the number of threads performing certain operations.For example, performing database queries making network connections 2. Signaling Semaphores can be used to send "signals" between threads. For example, having one thread wake up another thread """
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 2, 5026, 64, 62, 12683, 282, 13, 9078, 198, 2, 198, 2, 1052, 1672, 286, 1262, 257, 5026, 6570, 382, 329, 22049, 10...
2.918803
234
'''Analysis utility functions. :Author: Jonathan Karr <karr@mssm.edu> :Date: 2016-03-26 :Copyright: 2016-2018, Karr Lab :License: MIT ''' # TODO(Arthur): IMPORTANT: refactor and replace from matplotlib import pyplot from matplotlib import ticker from wc_lang import Model, Submodel from scipy.constants import Avogadro import numpy as np import re
[ 7061, 6, 32750, 10361, 5499, 13, 198, 198, 25, 13838, 25, 11232, 509, 3258, 1279, 74, 3258, 31, 76, 824, 76, 13, 15532, 29, 198, 25, 10430, 25, 1584, 12, 3070, 12, 2075, 198, 25, 15269, 25, 1584, 12, 7908, 11, 509, 3258, 3498, 1...
2.94958
119
from setuptools import setup, find_packages with open("README.md", "r") as readme_file: readme = readme_file.read() requirements = [ 'xgboost>=0.90', 'catboost>=0.26', 'bayesian-optimization>=1.2.0', 'numpy>=1.19.5', 'pandas>=1.1.5', 'matplotlib>=3.2.2', 'seaborn>=0.11.1', 'plotly>=4.4.1', 'pyyaml>=5.4.1' ] setup( name="bonsai-tree", version="1.2", author="Landon Buechner", author_email="mechior.magi@gmail.com", description="Bayesian Optimization + Gradient Boosted Trees", long_description=readme, url="https://github.com/magi-1/bonsai", packages=find_packages(), package_data={'': ['*.yml']}, install_requires=requirements, license = 'MIT', classifiers=[ "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], )
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 4480, 1280, 7203, 15675, 11682, 13, 9132, 1600, 366, 81, 4943, 355, 1100, 1326, 62, 7753, 25, 198, 220, 220, 220, 1100, 1326, 796, 1100, 1326, 62, 7753, 13, 961, 34...
2.201456
412
""" increment_version.py written in Python3 author: C. Lockhart <chris@lockhartlab.org> """ import yaml # Read in version with open('version.yml', 'r') as f: version = yaml.safe_load(f.read()) # Strip "dev" out of micro version['micro'] = int(str(version['micro']).replace('dev', '')) # Update patch version['micro'] += 1 # Add "dev" back to patch if version['micro'] != 0: version['micro'] = 'dev' + str(version['micro']) # Output version with open('version.yml', 'w') as f: yaml.safe_dump(version, f, sort_keys=False) # Transform version dict to string version = '.'.join([str(version[key]) for key in ['major', 'minor', 'micro']]) # Write version string to pathogen/_version.py with open('pathogen/version.py', 'w') as f: f.write("__version__ = '{}'\n".format(version)) # Return print(version)
[ 37811, 198, 24988, 434, 62, 9641, 13, 9078, 198, 15266, 287, 11361, 18, 198, 9800, 25, 327, 13, 13656, 18647, 1279, 354, 2442, 31, 5354, 18647, 23912, 13, 2398, 29, 198, 37811, 628, 198, 11748, 331, 43695, 198, 198, 2, 4149, 287, 21...
2.761745
298
import sys import numpy try: from collections.abc import Iterable except ImportError: from collections import Iterable from .. import util from ..element import Element from ..ndmapping import NdMapping, item_check, sorted_context from .interface import Interface from . import pandas from .util import cached Interface.register(IbisInterface)
[ 11748, 25064, 198, 11748, 299, 32152, 198, 198, 28311, 25, 198, 220, 220, 220, 422, 17268, 13, 39305, 1330, 40806, 540, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 422, 17268, 1330, 40806, 540, 198, 198, 6738, 11485, 1330, 7736, ...
3.747368
95
# -*- coding: utf-8 -*- """ Handles the tournament logic """ import datetime from chess.utils.utils import get_new_id from chess.models.actors import Player from chess.models.round import Round TOURNAMENT_ID_WIDTH = 8 NB_ROUND = 4 NB_PLAYERS = 8 NB_MATCH = 4
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 628, 198, 37811, 198, 12885, 829, 262, 7756, 9156, 198, 37811, 628, 198, 11748, 4818, 8079, 198, 198, 6738, 19780, 13, 26791, 13, 26791, 1330, 651, 62, 3605, 62, 312, 198, ...
2.68
100
# !/usr/bin python """ # # set-config - a small python program to setup the configuration environment for data-collect.py # data-collect.py contain the python program to gather Metrics from vROps # Author Sajal Debnath <sdebnath@vmware.com> # """ # Importing the required modules import json import base64 import os,sys # Getting the absolute path from where the script is being run # Getting the path where config.json file should be kept path = get_script_path() fullpath = path+"/"+"config.json" # Getting the data for the config.json file final_data = get_the_inputs() # Saving the data to config.json file with open(fullpath, 'w') as outfile: json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False)
[ 2, 5145, 14, 14629, 14, 8800, 21015, 198, 198, 37811, 198, 2, 198, 2, 900, 12, 11250, 532, 257, 1402, 21015, 1430, 284, 9058, 262, 8398, 2858, 329, 1366, 12, 33327, 13, 9078, 198, 2, 1366, 12, 33327, 13, 9078, 3994, 262, 21015, 14...
3.200837
239
""" Copyright 2018 Inmanta Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contact: code@inmanta.com """ import os import re import signal import subprocess import sys from subprocess import TimeoutExpired from threading import Timer import pytest import inmanta.util from inmanta import const def run_with_tty(args, killtime=3, termtime=2): """Could not get code for actual tty to run stable in docker, so we are faking it """ env = {const.ENVIRON_FORCE_TTY: "true"} return run_without_tty(args, env=env, killtime=killtime, termtime=termtime) def test_verify_that_colorama_package_is_not_present(): """ The colorama package turns the colored characters in TTY-based terminal into uncolored characters. As such, this package should not be present. """ assert not is_colorama_package_available() def check_logs(log_lines, regexes_required_lines, regexes_forbidden_lines, timed): compiled_regexes_requires_lines = get_compiled_regexes(regexes_required_lines, timed) compiled_regexes_forbidden_lines = get_compiled_regexes(regexes_forbidden_lines, timed) for line in log_lines: print(line) for regex in compiled_regexes_requires_lines: if not any(regex.match(line) for line in log_lines): pytest.fail("Required pattern was not found in log lines: %s" % (regex.pattern,)) for regex in compiled_regexes_forbidden_lines: if any(regex.match(line) for line in log_lines): pytest.fail("Forbidden pattern found in log lines: %s" % (regex.pattern,)) def test_init_project(tmpdir): args = [sys.executable, "-m", "inmanta.app", "project", "init", "-n", "test-project", "-o", tmpdir, "--default"] (stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10) test_project_path = os.path.join(tmpdir, "test-project") assert return_code == 0 assert os.path.exists(test_project_path) (stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10) assert return_code != 0 assert len(stderr) == 1 assert "already exists" in stderr[0]
[ 37811, 198, 220, 220, 220, 15069, 2864, 554, 76, 4910, 628, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 220, 220, 220, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351...
2.853896
924
n = int(input()) row = 0 for i in range(100): if 2 ** i <= n <= 2 ** (i + 1) - 1: row = i break k = 0 if row % 2 != 0: k = 2 cri = seki(k, row // 2) if n < cri: print("Aoki") else: print("Takahashi") else: k = 1 cri = seki(k, row // 2) if n < cri: print("Takahashi") else: print("Aoki")
[ 77, 796, 493, 7, 15414, 28955, 198, 808, 796, 657, 198, 1640, 1312, 287, 2837, 7, 3064, 2599, 198, 220, 220, 220, 611, 362, 12429, 1312, 19841, 299, 19841, 362, 12429, 357, 72, 1343, 352, 8, 532, 352, 25, 198, 220, 220, 220, 220, ...
1.772512
211
# CoDVote plugin for BigBrotherBot(B3) (www.bigbrotherbot.net) # Copyright (C) 2015 ph03n1x # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # Changelog: # v1.0.1 - Fixed vote remaining in progress if requirements for vote unmet. # v1.0.2 - Added "!vote maps" to show what maps can be called into vote. # - Fixed issue where person who called vote needed to vote as well. Changed to automatic yes vote. __version__ = '1.0.2' __author__ = 'ph03n1x' import b3, threading import b3.plugin import b3.events
[ 2, 1766, 35, 37394, 13877, 329, 4403, 39461, 20630, 7, 33, 18, 8, 357, 2503, 13, 14261, 37343, 13645, 13, 3262, 8, 198, 2, 15069, 357, 34, 8, 1853, 872, 3070, 77, 16, 87, 198, 2, 198, 2, 770, 1430, 318, 1479, 3788, 26, 345, 46...
3.442478
339
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for manipulating variables in Federated personalization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf TRAIN_NAME = "Train" VALIDATION_NAME = "Validation" TEST_NAME = "Test" LOSS_NAME = "loss" LOSS_SUMMARY_NAME = "perplexity" # Vars type. VARS_TYPE_ALL = "all" VARS_TYPE_SHARED = "shared" VARS_TYPE_PERSONAL = "personal" def get_var_dict(vars_): """Gets a dict of var base_name (e.g. 'w') to the variable.""" var_dict = {} for v in vars_: var_base_name = get_base_name(v) var_dict[var_base_name] = v return var_dict def generate_update_ops(vars_): """Generates update ops and placeholders. For each var, it generates a placeholder to feed in the new values. Then it takes the mean of the inputs along dimension 0. Args: vars_: Vars for which the update ops will be generated. Returns: update_ops: A list of update ops. dict_update_placeholders: A dict of var base name to its update-placeholder. """ update_ops = [] dict_update_placeholders = {} for v in vars_: # For every var in the scope, add a placeholder to feed in the new values. # The placeholder may need to hold multiple values, this happens # when updating the server from many clients. var_in_shape = [None] + v.shape.as_list() var_in_name = get_update_placeholder_name(v) var_in = tf.placeholder(v.dtype, shape=var_in_shape, name=var_in_name) var_in_mean = tf.reduce_mean(var_in, 0) update_op = v.assign(var_in_mean) update_ops.append(update_op) dict_update_placeholders[get_base_name(v)] = var_in return update_ops, dict_update_placeholders def add_prefix(prefix, name): """Adds prefix to name.""" return "/".join((prefix, name)) def add_suffix(suffix, name): """Adds subfix to name.""" return "/".join((name, suffix)) def get_attribute_dict(class_instance): """Gets a dict of attributeds of a class instance.""" # first start by grabbing the Class items attribute_dict = dict((x, y) for x, y in class_instance.__class__.__dict__.items() if x[:2] != "__") # then update the class items with the instance items attribute_dict.update(class_instance.__dict__) return attribute_dict
[ 2, 15069, 2864, 3012, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, ...
2.929648
995
# Assumptions: validate_crud_functions available # Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port> from __future__ import print_function from mysqlsh import mysqlx mySession = mysqlx.get_session(__uripwd) ensure_schema_does_not_exist(mySession, 'js_shell_test') schema = mySession.create_schema('js_shell_test') # Creates a test collection and inserts data into it collection = schema.create_collection('collection1') result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA01", "name": 'jack', "age": 17, "gender": 'male'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA02", "name": 'adam', "age": 15, "gender": 'male'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA03", "name": 'brian', "age": 14, "gender": 'male'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA04", "name": 'alma', "age": 13, "gender": 'female'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA05", "name": 'carol', "age": 14, "gender": 'female'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA06", "name": 'donna', "age": 16, "gender": 'female'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA07", "name": 'angel', "age": 14, "gender": 'male'}).execute() # ------------------------------------------------ # collection.remove Unit Testing: Dynamic Behavior # ------------------------------------------------ #@ CollectionRemove: valid operations after remove crud = collection.remove('some_condition') validate_crud_functions(crud, ['sort', 'limit', 'bind', 'execute']) #@ CollectionRemove: valid operations after sort crud = crud.sort(['name']) validate_crud_functions(crud, ['limit', 'bind', 'execute']) #@ CollectionRemove: valid operations after limit crud = crud.limit(1) validate_crud_functions(crud, ['bind', 'execute']) #@ CollectionRemove: valid operations after bind crud = collection.remove('name = :data').bind('data', 'donna') validate_crud_functions(crud, ['bind', 'execute']) #@ CollectionRemove: valid operations after execute result = crud.execute() validate_crud_functions(crud, ['limit', 'bind', 'execute']) #@ Reusing CRUD with binding print('Deleted donna:', result.affected_items_count, '\n') result=crud.bind('data', 'alma').execute() print('Deleted alma:', result.affected_items_count, '\n') # ---------------------------------------------- # collection.remove Unit Testing: Error Conditions # ---------------------------------------------- #@# CollectionRemove: Error conditions on remove crud = collection.remove() crud = collection.remove(' ') crud = collection.remove(5) crud = collection.remove('test = "2') #@# CollectionRemove: Error conditions sort crud = collection.remove('some_condition').sort() crud = collection.remove('some_condition').sort(5) crud = collection.remove('some_condition').sort([]) crud = collection.remove('some_condition').sort(['name', 5]) crud = collection.remove('some_condition').sort('name', 5) #@# CollectionRemove: Error conditions on limit crud = collection.remove('some_condition').limit() crud = collection.remove('some_condition').limit('') #@# CollectionRemove: Error conditions on bind crud = collection.remove('name = :data and age > :years').bind() crud = collection.remove('name = :data and age > :years').bind(5, 5) crud = collection.remove('name = :data and age > :years').bind('another', 5) #@# CollectionRemove: Error conditions on execute crud = collection.remove('name = :data and age > :years').execute() crud = collection.remove('name = :data and age > :years').bind('years', 5).execute() # --------------------------------------- # collection.remove Unit Testing: Execution # --------------------------------------- #@ CollectionRemove: remove under condition //! [CollectionRemove: remove under condition] result = collection.remove('age = 15').execute() print('Affected Rows:', result.affected_items_count, '\n') docs = collection.find().execute().fetch_all() print('Records Left:', len(docs), '\n') //! [CollectionRemove: remove under condition] #@ CollectionRemove: remove with binding //! [CollectionRemove: remove with binding] result = collection.remove('gender = :heorshe').limit(2).bind('heorshe', 'male').execute() print('Affected Rows:', result.affected_items_count, '\n') //! [CollectionRemove: remove with binding] docs = collection.find().execute().fetch_all() print('Records Left:', len(docs), '\n') #@ CollectionRemove: full remove //! [CollectionRemove: full remove] result = collection.remove('1').execute() print('Affected Rows:', result.affected_items_count, '\n') docs = collection.find().execute().fetch_all() print('Records Left:', len(docs), '\n') //! [CollectionRemove: full remove] # Cleanup mySession.drop_schema('js_shell_test') mySession.close()
[ 2, 2195, 388, 8544, 25, 26571, 62, 6098, 463, 62, 12543, 2733, 1695, 198, 2, 2195, 8139, 11593, 333, 541, 16993, 318, 5447, 355, 1279, 7220, 31175, 27, 79, 16993, 29, 31, 27, 4774, 31175, 27, 33803, 62, 634, 29, 198, 6738, 11593, ...
3.180683
1,522
import turtle turtle.bgcolor('black') wn=turtle.Screen() tr=turtle.Turtle() move=1 tr.speed("fastest") for i in range (360): tr.write("ADITYA",'false','center',font=('Showcard gothic',50)) tr.penup() tr.goto(-200,100) tr.pendown() tr.color("orange") tr.right(move) tr.forward(100) tr.penup() tr.color("white") tr.pendown() tr.right(30) tr.forward(60) tr.pendown() tr.color("light green") tr.left(10) tr.forward(50) tr.right(70) tr.penup() tr.pendown() tr.color('light blue') tr.forward(50) tr.color('light green') tr.pu() tr.pd() tr.color("light blue") tr.forward(100) tr.color('brown') tr.forward(200) tr.pu() tr.pd() tr.color('light green') tr.circle(2) tr.color('light blue') tr.circle(4) tr.pu() tr.fd(20) tr.pd() tr.circle(6) tr.pu() tr.fd(40) tr.pd() tr.circle(8) tr.pu() tr.fd(80) tr.pd() tr.circle(10) tr.pu() tr.fd(120) tr.pd() tr.circle(20) tr.color('yellow') tr.circle(10) tr.pu() tr.pd() tr.color('white') tr.forward(150) tr.color('red') tr.fd(50) tr.color ('blue') tr.begin_fill() tr.penup() tr.home() move=move+1 tr.penup() tr.forward(50) turtle.done()
[ 11748, 28699, 198, 83, 17964, 13, 35904, 8043, 10786, 13424, 11537, 198, 675, 28, 83, 17964, 13, 23901, 3419, 198, 2213, 28, 83, 17964, 13, 51, 17964, 3419, 198, 198, 21084, 28, 16, 198, 2213, 13, 12287, 7203, 7217, 395, 4943, 198, ...
1.917391
690
""", , """ from .group import Group from .user import User from .user import UserIndex from .auth import Authentication from .accesspoint import AccessPoint
[ 15931, 1600, 837, 37227, 198, 198, 6738, 764, 8094, 1330, 4912, 198, 6738, 764, 7220, 1330, 11787, 198, 6738, 764, 7220, 1330, 11787, 15732, 198, 6738, 764, 18439, 1330, 48191, 198, 6738, 764, 15526, 4122, 1330, 8798, 12727, 198 ]
4.051282
39
bino = int(input()) cino = int(input()) if (bino+cino)%2==0: print("Bino") else: print("Cino")
[ 65, 2879, 796, 493, 7, 15414, 28955, 198, 66, 2879, 796, 493, 7, 15414, 28955, 198, 361, 357, 65, 2879, 10, 66, 2879, 8, 4, 17, 855, 15, 25, 198, 220, 220, 220, 3601, 7203, 33, 2879, 4943, 198, 17772, 25, 198, 220, 220, 220, 3...
2.019608
51
""" Script updates `README.md` with respect to files at ./easy and ./medium folders. """ import os curr_dir = os.path.dirname(__file__) with open(os.path.join(curr_dir, "README.md"), 'w') as readme: readme.write("# LeetCode\nDeliberate practice in coding.\n") langs = [l for l in os.listdir(curr_dir) if os.path.isdir(os.path.join(curr_dir, l)) and l[0] != '.'] for lang in langs: readme.write("## {}\n".format(lang)) readme.write("### Easy\n") easy = sorted(os.listdir(f"{curr_dir}/{lang}/easy")) easy = [x.split("_")[0] for x in easy] easy_solved = "" for el in easy: easy_solved += "{}, ".format(el) readme.write(easy_solved[:-2] + "\n") readme.write("### Medium\n") medium = sorted(os.listdir(f"{curr_dir}/{lang}/medium")) medium = [x.split("_")[0] for x in medium] medium_solved = "" for el in medium: medium_solved += "{}, ".format(el) readme.write(medium_solved[:-2] + '\n')
[ 37811, 198, 7391, 5992, 4600, 15675, 11682, 13, 9132, 63, 351, 2461, 284, 3696, 379, 24457, 38171, 290, 24457, 24132, 24512, 13, 198, 37811, 198, 11748, 28686, 198, 198, 22019, 81, 62, 15908, 796, 28686, 13, 6978, 13, 15908, 3672, 7, ...
2.104938
486
# -*- coding: utf-8 -*- #retriever import csv from pkg_resources import parse_version from retriever.lib.models import Table from retriever.lib.templates import Script try: from retriever.lib.defaults import VERSION try: from retriever.lib.tools import open_fr, open_fw, open_csvw except ImportError: from retriever.lib.scripts import open_fr, open_fw except ImportError: from retriever import open_fr, open_fw, VERSION SCRIPT = main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 1186, 380, 964, 198, 198, 11748, 269, 21370, 198, 6738, 279, 10025, 62, 37540, 1330, 21136, 62, 9641, 198, 198, 6738, 37715, 964, 13, 8019, 13, 27530, 1330, 8655, 1...
2.777778
171
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import *
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 33918, 198, 198, 6738, 435, 541, 323, 13, 64, 404, 13, 15042, 13, 9979, 415, 13, 22973, 34184, 1187, 1330, 163...
2.446809
47
# -*- coding: utf-8 -*- """ Created on Mon Dec 2 11:06:59 2019 @author: Paul """ def read_data(filename): """ Reads csv file into a list, and converts to ints """ data = [] f = open(filename, 'r') for line in f: data += line.strip('\n').split(',') int_data = [int(i) for i in data] f.close() return int_data def run_intcode(program, input_int): """ Takes data, list of ints to run int_code on. Returns list of ints after intcode program has been run. Running Intcode program looks reads in the integers sequentially in sets of 4: data[i] == Parameter Mode + Opcode (last two digits) data[i+1] == Entry 1 data[i+2] == Entry 2 data[i+3] == Entry 3 If Opcode == 1, the value of the opcode at index location = entry 1 and 2 in the program are summed and stored at the index location of entry 3. If Opcode == 2, the value of the opcode at index location = entry 1 and 2 in the program are multiplied and stored at the index location of entry 3. If Opcode == 3, the the single integer (input) is saved to the position given by index 1. If Opcode == 4, the program outputs the value of its only parameter. E.g. 4,50 would output the value at address 50. If Opcode == 5 and entry 1 is != 0, the intcode position moves to the index stored at entry 2. Otherwise it does nothing. If Opcode == 6 and entry 1 is 0, the intcode postion moves to the index stored at entry 2. Otherwise it does nothing. If Opcode == 7 and entry 1> entry 2, store 1 in position given by third param, otherwise store 0 at position given by third param. If Opcode == 7 and entry 1 = entry 2, store 1 in position given by third param, otherwise store 0 at position given by third param. If Opcode == 99, the program is completed and will stop running. Parameters are digits to the left of the opcode, read left to right: Parameter 0 -> Position mode - the entry is treated as an index location Parameter 1 -> Immediate mode - the entry is treated as a value """ data = program[:] answer = -1 params = [0, 0, 0] param_modes = ['', '', ''] i = 0 while (i < len(program)): #print("i = ", i) # Determine Opcode and parameter codes: opcode_str = "{:0>5d}".format(data[i]) opcode = int(opcode_str[3:]) param_modes[0] = opcode_str[2] param_modes[1] = opcode_str[1] param_modes[2] = opcode_str[0] #print(opcode_str) for j in range(2): if param_modes[j] == '0': try: params[j] = data[data[i+j+1]] except IndexError: continue else: try: params[j] = data[i+j+1] except IndexError: continue #print(params, param_modes) # If opcode is 1, add relevant entries: if opcode == 1: data[data[i+3]] = params[0] + params[1] i += 4; # If opcode is 2, multiply the relevant entries: elif opcode == 2: data[data[i+3]] = params[0] * params[1] i += 4; # If opcode is 3, store input value at required location. elif opcode == 3: data[data[i+1]] = input_int i += 2; # If opcode is 4, print out the input stored at specified location. elif opcode == 4: answer = data[data[i+1]] print("Program output: ", data[data[i+1]]) i += 2; # If the opcode is 5 and the next parameter !=0, jump forward elif opcode == 5: if params[0] != 0: i = params[1] else: i += 3 # If the opcode is 6 and next parameter is 0, jump forward elif opcode == 6: if params[0] == 0: i = params[1] else: i += 3 # If the opcode is 7, carry out less than comparison and store 1/0 at loc 3 elif opcode == 7: if params[0] < params[1]: data[data[i+3]] = 1 else: data[data[i+3]] = 0 i += 4 # If the opcode is 8, carry out equality comparison and store 1/0 at loc 3 elif opcode == 8: if params[0] == params[1]: data[data[i+3]] = 1 else: data[data[i+3]] = 0 i += 4 # If the opcode is 99, halt the intcode elif opcode == 99: print("Program ended by halt code") break # If opcode is anything else something has gone wrong! else: print("Problem with the Program") break return data, answer program = read_data("day5input.txt") #print(program) result1, answer1 = run_intcode(program, 1) #print(result1) print("Part 1: Answer is: ", answer1) result2, answer2 = run_intcode(program, 5) #print(result2) print("Part 2: Answer is: ", answer2) #test_program = [1002,4,3,4,33] #test_program2 = [3,0,4,0,99] #test_program3 = [1101,100,-1,4,0] #test_program4 = [3,9,8,9,10,9,4,9,99,-1,8] # 1 if input = 8, 0 otherwise #test_program5 = [3,9,7,9,10,9,4,9,99,-1,8] # 1 if input < 8, 0 otherwise #test_program6 = [3,3,1108,-1,8,3,4,3,99] # 1 if input = 8, 0 otherwise #test_program7 = [3,3,1107,-1,8,3,4,3,99] # 1 if input < 8, 0 otherwise #test_program8 = [3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9] # 0 if input = 0, 1 otherwise #test_program9 = [3,3,1105,-1,9,1101,0,0,12,4,12,99,1] # 0 if input = 0, 1 otherwise #test_program10 = [3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0, #36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20, #1105,1,46,98,99] # 999 if input < 8, 1000 if input = 8, 1001 if input > 8
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 41972, 319, 2892, 4280, 220, 362, 1367, 25, 3312, 25, 3270, 13130, 201, 198, 201, 198, 31, 9800, 25, 3362, 201, 198, 37811, 201, 198, 201, 198, ...
1.9733
3,221
from django.db import models # Create your models here.
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 2, 13610, 534, 4981, 994, 13, 628, 220, 220, 220, 220, 220, 220, 220, 220, 628, 198 ]
2.653846
26
# -*- coding: utf-8 -*- # Copyright 2019 Open End AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys if (sys.version_info >=(3, 0)): PYT3 = True import urllib.request import urllib.parse else: PYT3 = False import urllib2 import urlparse import contextlib import json import os import py import subprocess import time import uuid from . import support here = os.path.dirname(__file__)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 15069, 13130, 4946, 5268, 9564, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 7...
3.195876
291
""" Copyright 2020 InfAI (CC SES) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __all__ = ("Router", ) from ..util import conf, get_logger, mqtt import threading import cc_lib logger = get_logger(__name__.split(".", 1)[-1])
[ 37811, 198, 220, 220, 15069, 12131, 4806, 20185, 357, 4093, 311, 1546, 8, 628, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 220, 220, 345, 743, 407, 779, 428, 2393, 2845, 287, 1...
3.392694
219
# Generated by Django 3.2.3 on 2021-05-27 13:34 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 13, 18, 319, 33448, 12, 2713, 12, 1983, 1511, 25, 2682, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
#!/usr/bin/env python import unittest from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings from day07 import supports_tls, count_tls_addresses from day07 import find_abas, supports_ssl, count_ssl_addresses if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 555, 715, 395, 198, 6738, 1110, 2998, 1330, 468, 62, 48910, 11, 651, 62, 48910, 62, 40845, 62, 37336, 11, 651, 62, 48910, 62, 6381, 40845, 62, 37336, 198, 6738, 1110, 299...
2.852941
102
import torch import numpy as np import torch.nn.functional as F from torch.autograd import Variable from basenets.MLP import MLP from basenets.Conv import Conv from torch import nn # TODO: support multi-layer value function in which action is concat before the final layer
[ 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 6738, 28034, 13, 2306, 519, 6335, 1330, 35748, 198, 6738, 1615, 268, 1039, 13, 5805, 47, 1330, 10373, 47, 198, 6738, 1615, 268, ...
3.618421
76
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SparseTensorsMap.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.ops import array_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variables from tensorflow.python.platform import benchmark from tensorflow.python.platform import test # pylint: disable=protected-access add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map take_many_sparse_from_tensors_map = ( sparse_ops._take_many_sparse_from_tensors_map) # pylint: enable=protected-access if __name__ == "__main__": test.main()
[ 2, 15069, 1853, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.492537
469
""" Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import calendar import time import zlib from hashlib import md5 import unittest from cafe.drivers.unittest.decorators import ( DataDrivenFixture, data_driven_test) from cloudcafe.objectstorage.objectstorage_api.common.constants import \ Constants from cloudroast.objectstorage.fixtures import ObjectStorageFixture from cloudroast.objectstorage.generators import ( ObjectDatasetList, CONTENT_TYPES) CONTAINER_DESCRIPTOR = 'object_smoke_test' STATUS_CODE_MSG = ('{method} expected status code {expected}' ' received status code {received}')
[ 37811, 198, 15269, 1853, 37927, 13200, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 5832, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 1639, 743, 7...
3.386228
334
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os import pytest from datadog_checks.dev import docker_run from datadog_checks.dev.conditions import CheckDockerLogs from datadog_checks.dev.subprocess import run_command from .common import BASIC_CONFIG, HERE E2E_METADATA = { 'start_commands': [ 'apt-get update', 'apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y docker.io', ], 'docker_volumes': ['/var/run/docker.sock:/var/run/docker.sock'], }
[ 2, 357, 34, 8, 16092, 324, 519, 11, 3457, 13, 2864, 12, 25579, 198, 2, 1439, 2489, 10395, 198, 2, 49962, 739, 257, 513, 12, 565, 682, 347, 10305, 3918, 5964, 357, 3826, 38559, 24290, 8, 198, 198, 11748, 28686, 198, 198, 11748, 129...
2.618421
228
import json from typing import Dict, Optional import requests from federation.hostmeta.parsers import ( parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document) from federation.utils.network import fetch_document HIGHEST_SUPPORTED_NODEINFO_VERSION = 2.1
[ 11748, 33918, 198, 6738, 19720, 1330, 360, 713, 11, 32233, 198, 198, 11748, 7007, 198, 198, 6738, 36986, 13, 4774, 28961, 13, 79, 945, 364, 1330, 357, 198, 220, 220, 220, 21136, 62, 17440, 10951, 62, 22897, 11, 21136, 62, 17440, 10951...
3.256637
113
import numpy as np from typing import Any, Dict, List, Tuple, NoReturn import argparse import os def parse_arguments() -> Any: """Parse command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( "--data_dir", default="", type=str, help="Directory where the features (npy files) are saved", ) parser.add_argument("--mode", required=True, type=str, help="train/val/test/sample", choices=['train', 'test', 'val','sample']) parser.add_argument("--obs_len", default=2, type=int, help="Observed length of the trajectory in seconds", choices=[1,2,3,4,5]) parser.add_argument("--filter", default='ekf', type=str, help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol", choices=['ekf', 'none', 'ekf-savgol', 'savgol']) return parser.parse_args() if __name__== '__main__': #_filters = ['none', 'ekf', 'savgol', 'ekf-savgol'] #_modes = ['train', 'val', 'test', 'sample'] #_obs_len = [2,5] #seg = _obs_len[0] #mode = _modes[3] #filter_name = _filters[0] args = parse_arguments() if args.mode == 'test': args.obs_len = 2 assert os.path.exists(args.data_dir),\ f'[Analysis][main][ERROR] data_dir not found!({args.data_dir})' data_file = 'features_{}_{}s_{}.npy'.format(args.mode, args.obs_len, args.filter) assert os.path.exists(os.path.join(args.data_dir, data_file)),\ f'[Analysis][main][ERROR] data_file not found!({data_file})' print ('[Analysis] loading dataset....') # (m, 4) # [mean_v, mean_acc, mean_deac, std_jy] data = np.load(os.path.join(args.data_dir,data_file)) print ('[Analysis] mode:{} | filter:{} | obs_len:{}'.format(args.mode, args.filter, args.obs_len)) print ('[Analysis] data shape:{}'.format(data.shape)) print ('[Analysis] stats:') stats(data)
[ 11748, 299, 32152, 355, 45941, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 7343, 11, 309, 29291, 11, 1400, 13615, 628, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 198, 4299, 21136, 62, 853, 2886, 3419, 4613, 4377, 25, 198, 19...
2.255924
844
# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import freezegun import mock import oslo_messaging as om from watcher.common import rpc from watcher import notifications from watcher.objects import service as w_service from watcher.tests.db import base from watcher.tests.objects import utils
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 2177, 3116, 295, 3970, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, ...
3.625
240
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/ # # Say you have an array for which the ith element is the price of a given stock on day i. # # Design an algorithm to find the maximum profit. # You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times). # However, you may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again). if __name__ == '__main__': prices = [8,9,2,5] s = Solution() print s.maxProfit(prices)
[ 2, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 13466, 12, 2435, 12, 1462, 12, 17846, 12, 392, 12, 7255, 12, 13578, 12, 4178, 14, 198, 2, 198, 2, 13816, 345, 423, 281, 7177, 329, 543, 262, 340, 71, 5002, 318, 262, ...
3.222222
171
import json import os import responses from django.urls import reverse from .. import TestAdminMixin, TestLociMixin
[ 11748, 33918, 198, 11748, 28686, 198, 198, 11748, 9109, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 198, 6738, 11485, 1330, 6208, 46787, 35608, 259, 11, 6208, 43, 1733, 35608, 259, 628 ]
3.5
34
import torch from torch import nn from torch.nn.parameter import Parameter from einops import rearrange, reduce, repeat
[ 11748, 28034, 198, 6738, 28034, 1330, 299, 77, 198, 6738, 28034, 13, 20471, 13, 17143, 2357, 1330, 25139, 2357, 198, 198, 6738, 304, 259, 2840, 1330, 37825, 858, 11, 4646, 11, 9585, 198 ]
3.666667
33
from . import test_helpers from . import test_image_opener from . import test_image_metrick from . import test_compare_tools from . import test_compare_api
[ 6738, 764, 1330, 1332, 62, 16794, 364, 198, 6738, 764, 1330, 1332, 62, 9060, 62, 404, 877, 198, 6738, 764, 1330, 1332, 62, 9060, 62, 4164, 5557, 198, 6738, 764, 1330, 1332, 62, 5589, 533, 62, 31391, 198, 6738, 764, 1330, 1332, 62, ...
3.297872
47
from django.contrib import admin from django.urls import path from .views import index, email, post_detail, posts, hot_takes, take_detail from . import views app_name = "core" urlpatterns = [ path('',views.index,name="index"), path('email/',views.email,name="email"), path('post/<slug>/',views.post_detail,name='post'), path('posts/',views.posts,name='posts'), path('takes/',views.hot_takes,name='takes'), path('take/<slug>/',views.take_detail,name='take'), ]
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 33571, 1330, 6376, 11, 3053, 11, 1281, 62, 49170, 11, 6851, 11, 3024, 62, 83, 1124, 11, 1011, 62, 49170, 198, 6738, 7...
2.664835
182
from .grid import render_table
[ 6738, 764, 25928, 1330, 8543, 62, 11487 ]
4.285714
7
from mongoengine import * from dotenv import load_dotenv from os import getenv from cassandra.cluster import Cluster from cassandra.auth import PlainTextAuthProvider from cassandra.cqlengine import connection from cassandra.cqlengine.management import sync_table from cassandra.query import ordered_dict_factory from model.discover import * from model.blueprint import * from model.disk import * from model.storage import * from model.project import * from model.network import * from model.user import * load_dotenv() cass_db = getenv("CASS_DB") cass_password = getenv("CASS_PASSWORD") cass_user = getenv("CASS_USER")
[ 6738, 285, 25162, 18392, 1330, 1635, 198, 6738, 16605, 24330, 1330, 3440, 62, 26518, 24330, 198, 6738, 28686, 1330, 651, 24330, 198, 6738, 30606, 15918, 13, 565, 5819, 1330, 38279, 198, 6738, 30606, 15918, 13, 18439, 1330, 28847, 8206, 30...
3.412088
182
# stdlib import importlib import sys from typing import Any from typing import Any as TypeAny from typing import Dict as TypeDict from typing import Optional # third party from packaging import version # syft relative from ..ast.globals import Globals from ..lib.python import create_python_ast from ..lib.torch import create_torch_ast from ..lib.torchvision import create_torchvision_ast from ..logger import critical from ..logger import traceback_and_raise from .misc import create_union_ast # now we need to load the relevant frameworks onto the node lib_ast = create_lib_ast(None)
[ 2, 14367, 8019, 198, 11748, 1330, 8019, 198, 11748, 25064, 198, 6738, 19720, 1330, 4377, 198, 6738, 19720, 1330, 4377, 355, 5994, 7149, 198, 6738, 19720, 1330, 360, 713, 355, 5994, 35, 713, 198, 6738, 19720, 1330, 32233, 198, 198, 2, ...
3.562874
167
#!/usr/bin/env python # coding: utf-8 # In[ ]: import pysam import os import pandas as pd import numpy as np import time import argparse import sys from multiprocessing import Pool # In[ ]: # ##arguments for testing # bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam' # bam_file_name = 'MBC_1041_1_ULP' # mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph' # ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa' # chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes' # out_dir = './tmp/' # map_q = 20 # size_range = [15,500] # CPU = 4 # In[ ]: parser = argparse.ArgumentParser() parser.add_argument('--bam_file', help='sample_bam_file', required=True) parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True) parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True) parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True) parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True) parser.add_argument('--out_dir',help='folder for GC bias results',required=True) parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True) parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True) parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True) args = parser.parse_args() bam_file_path = args.bam_file bam_file_name = args.bam_file_name mapable_path=args.mapable_regions ref_seq_path = args.ref_seq chrom_sizes_path = args.chrom_sizes out_dir = args.out_dir map_q = args.map_q size_range = args.size_range CPU = args.CPU # In[ ]: print('arguments provided:') print('\tbam_file_path = "'+bam_file_path+'"') print('\tbam_file_name = "'+bam_file_name+'"') print('\tmapable_regions = "'+mapable_path+'"') print('\tref_seq_path = "'+ref_seq_path+'"') print('\tchrom_sizes_path = "'+chrom_sizes_path+'"') print('\tout_dir = "'+out_dir+'"') print('\tmap_q = '+str(map_q)) print('\tsize_range = '+str(size_range)) print('\tCPU = '+str(CPU)) # In[ ]: mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0] out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt' print('out_file',out_file) # In[ ]: #create a directory for the GC data if not os.path.exists(out_dir +'/'+mapable_name): os.mkdir(out_dir +'/'+mapable_name) if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'): os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/') # In[ ]: #import filter mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None) #remove non standard chromosomes and X and Y chroms = ['chr'+str(m) for m in range(1,23)] mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)] print('chroms:', chroms) print('number_of_intervals:',len(mapable_intervals)) sys.stdout.flush() # In[ ]: # In[ ]: start_time = time.time() p = Pool(processes=CPU) #use the available CPU sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU GC_dict_list = p.map(collect_reads, sublists, 1) # In[ ]: all_GC_df = pd.DataFrame() for i,GC_dict in enumerate(GC_dict_list): GC_df = pd.DataFrame() for length in GC_dict.keys(): current = pd.Series(GC_dict[length]).reset_index() current = current.rename(columns={'index':'num_GC',0:'number_of_fragments'}) current['length']=length current = current[['length','num_GC','number_of_fragments']] GC_df = GC_df.append(current, ignore_index=True) GC_df = GC_df.set_index(['length','num_GC']) all_GC_df[i] = GC_df['number_of_fragments'] del(GC_df,GC_dict) all_GC_df = all_GC_df.sum(axis=1) all_GC_df = pd.DataFrame(all_GC_df).rename(columns = {0:'number_of_fragments'}) all_GC_df = all_GC_df.reset_index() all_GC_df.to_csv(out_file,sep='\t',index=False) # In[ ]: print('done') # In[ ]: # In[ ]: # In[ ]:
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 554, 58, 2361, 25, 628, 198, 11748, 279, 893, 321, 198, 11748, 28686, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152...
2.407969
1,782
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Mar 1 18:17:07 2021 @author: jm """ # %% required libraries import numpy as np import pandas as pd from sqlalchemy import create_engine # %% connect to DB # create connection using pymssql engine = create_engine('mssql+pymssql://sa:<YourStrong@Passw0rd>@localhost:1433/rga') connection = engine.connect() # %% read data sets from where I will build the dimension tables # read employee roster data employee_roster = pd.read_excel("datasources/Employee_Roster_Data.xlsx", sheet_name = 'Sheet1') # read skills data skills = pd.read_excel("datasources/skills.xlsx", sheet_name = "Sheet1") # read hours data hours = pd.read_excel("datasources/hours.xlsx", sheet_name = "Sheet1") # %% dimensions created from source employee_roster # %% create DIM_Currency # get unique values currencies = sorted(employee_roster['Currency'].unique()) # create a data frame DIM_Currency = pd.DataFrame({'id_currency': (np.arange(len(currencies)) + 1), 'currency': currencies}) # send data frame to DB DIM_Currency.to_sql('DIM_Currency', con = connection, if_exists = 'append', index = False) # %% create DIM_Department # get unique values departments = sorted(pd.concat([employee_roster['Department'], skills['Department']], axis = 0).unique()) # create a data frame DIM_Department = pd.DataFrame({'id_department': (np.arange(len(departments)) + 1), 'department': departments}) # send data frame to DB DIM_Department.to_sql('DIM_Department', con = connection, if_exists = 'append', index = False) # %% create DIM_Gender # get unique values genders = sorted(pd.concat([employee_roster['Gender'], skills['Gender']], axis = 0).unique()) # create a data frame DIM_Gender = pd.DataFrame({'id_gender': (np.arange(len(genders)) + 1), 'gender': genders}) # send data frame to DB DIM_Gender.to_sql('DIM_Gender', con = connection, if_exists = 'append', index = False) # %% create DIM_User # check if 'UserId' values in 'skills' are in 'User_ID' in 'employee_roster' # we get 20134 'True' values, meaning that all 'UserId' in 'skills' are already # in 'User_ID' in employee_roster users_check_1 = np.isin(skills['UserId'], employee_roster['User_ID']).sum() # check if 'UserId' values in 'hours' are in 'User_ID' in 'employee_roster' # we get 7659 'True' values, meaning that NOT all 'UserId' in 'hours' are already # in 'User_ID' in employee_roster users_check_2 = np.isin(hours['UserId'], employee_roster['User_ID']).sum() # get unique values users = sorted(pd.concat([employee_roster['User_ID'], skills['UserId'], hours['UserId']], axis = 0).unique()) # create a data frame to use pd.merge() df_users = pd.DataFrame({'User_ID': users}) # left join 'df_user' with 'employee_roster' on 'UserID' users_final = pd.merge(df_users, employee_roster, on = 'User_ID', how ='left') # select only columns I need users_final = users_final[['User_ID', 'Email_ID', 'Fullname']] # rename columns users_final.rename(columns = {'User_ID': 'id_user', 'Email_ID': 'id_email', 'Fullname': 'fullname'}, inplace = True) # send data frame to DB users_final.to_sql('DIM_User', con = connection, if_exists = 'append', index = False) # %% dimensions created from source skills # %% create DIM_AttributeGroup # get unique values att_group = sorted(skills['Attribute Group'].unique()) # create a data frame DIM_AttributeGroup = pd.DataFrame({'id_att_group': (np.arange(len(att_group)) + 1), 'attribute_group': att_group}) # send data frame to DB DIM_AttributeGroup.to_sql('DIM_AttributeGroup', con = connection, if_exists = 'append', index = False) # %% create DIM_AttributeSubGroup # get unique values att_sub_group = sorted(skills['Attribute Sub-Group'].unique()) # create a data frame DIM_AttributeSubGroup = pd.DataFrame({'id_att_sub_group': (np.arange(len(att_sub_group)) + 1), 'attribute_sub_group': att_sub_group}) # send data frame to DB DIM_AttributeSubGroup.to_sql('DIM_AttributeSubGroup', con = connection, if_exists = 'append', index = False) # %% create DIM_AttributeName # get unique values att_name = sorted(skills['Attribute Name'].unique()) # create a data frame DIM_AttributeName = pd.DataFrame({'id_att_name': (np.arange(len(att_name)) + 1), 'attribute_name': att_name}) # send data frame to DB DIM_AttributeName.to_sql('DIM_AttributeName', con = connection, if_exists = 'append', index = False)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 2892, 1526, 220, 352, 1248, 25, 1558, 25, 2998, 33448, 198, 198, 31, 9800, 25, 474, 76, ...
2.882004
1,517
import discord from discord.ext import commands arrow = "<a:right:877425183839891496>" kwee = "<:kannawee:877036162122924072>" kdance = "<a:kanna_dance:877038778798207016>" kbored = "<:kanna_bored:877036162827583538>" ksmug = "<:kanna_smug:877038777896427560>" heart = "<a:explosion_heart:877426228775227392>"
[ 11748, 36446, 198, 6738, 36446, 13, 2302, 1330, 9729, 198, 198, 6018, 796, 33490, 64, 25, 3506, 25, 42802, 32114, 1507, 2548, 2670, 4531, 1415, 4846, 24618, 198, 74, 732, 68, 796, 33490, 25, 74, 1236, 707, 1453, 25, 5774, 2154, 2623, ...
2.313433
134
for ch in "Hello world!": d = ord(ch) h = hex(d) o = oct(d) b = bin(d) print ch, d, h, o, b
[ 1640, 442, 287, 366, 15496, 995, 0, 1298, 198, 220, 220, 288, 796, 2760, 7, 354, 8, 198, 220, 220, 289, 796, 17910, 7, 67, 8, 198, 220, 220, 267, 796, 19318, 7, 67, 8, 198, 220, 220, 275, 796, 9874, 7, 67, 8, 628, 220, 220, ...
1.87931
58
# Copyright (c) 2016 Roger Light <roger@atchoo.org> # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Eclipse Public License v1.0 # and Eclipse Distribution License v1.0 which accompany this distribution. # # The Eclipse Public License is available at # http://www.eclipse.org/legal/epl-v10.html # and the Eclipse Distribution License is available at # http://www.eclipse.org/org/documents/edl-v10.php. # # Contributors: # Roger Light - initial API and implementation """ This module provides some helper functions to allow straightforward subscribing to topics and retrieving messages. The two functions are simple(), which returns one or messages matching a set of topics, and callback() which allows you to pass a callback for processing of messages. """ import paho.mqtt.client as paho import paho.mqtt as mqtt import ssl def _on_connect(c, userdata, flags, rc): """Internal callback""" if rc != 0: raise mqtt.MQTTException(paho.connack_string(rc)) if type(userdata['topics']) is list: for t in userdata['topics']: c.subscribe(t, userdata['qos']) else: c.subscribe(userdata['topics'], userdata['qos']) def _on_message_callback(c, userdata, message): """Internal callback""" userdata['callback'](c, userdata['userdata'], message) def _on_message_simple(c, userdata, message): """Internal callback""" if userdata['msg_count'] == 0: return # Don't process stale retained messages if 'retained' was false if userdata['retained'] == False and message.retain == True: return userdata['msg_count'] = userdata['msg_count'] - 1 if userdata['messages'] is None and userdata['msg_count'] == 0: userdata['messages'] = message c.disconnect() return userdata['messages'].append(message) if userdata['msg_count'] == 0: c.disconnect() def callback(callback, topics, qos=0, userdata=None, hostname="localhost", port=1883, client_id="", keepalive=60, will=None, auth=None, tls=None, protocol=paho.MQTTv311, transport="tcp"): """Subscribe to a list of topics and process them in a callback function. This function creates an MQTT client, connects to a broker and subscribes to a list of topics. Incoming messages are processed by the user provided callback. This is a blocking function and will never return. callback : function of the form "on_message(client, userdata, message)" for processing the messages received. topics : either a string containing a single topic to subscribe to, or a list of topics to subscribe to. qos : the qos to use when subscribing. This is applied to all topics. userdata : passed to the callback hostname : a string containing the address of the broker to connect to. Defaults to localhost. port : the port to connect to the broker on. Defaults to 1883. client_id : the MQTT client id to use. If "" or None, the Paho library will generate a client id automatically. keepalive : the keepalive timeout value for the client. Defaults to 60 seconds. will : a dict containing will parameters for the client: will = {'topic': "<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}. Topic is required, all other parameters are optional and will default to None, 0 and False respectively. Defaults to None, which indicates no will should be used. auth : a dict containing authentication parameters for the client: auth = {'username':"<username>", 'password':"<password>"} Username is required, password is optional and will default to None if not provided. Defaults to None, which indicates no authentication is to be used. tls : a dict containing TLS configuration parameters for the client: dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>", 'keyfile':"<keyfile>", 'tls_version':"<tls_version>", 'ciphers':"<ciphers">} ca_certs is required, all other parameters are optional and will default to None if not provided, which results in the client using the default behaviour - see the paho.mqtt.client documentation. Defaults to None, which indicates that TLS should not be used. transport : set to "tcp" to use the default setting of transport which is raw TCP. Set to "websockets" to use WebSockets as the transport. """ if qos < 0 or qos > 2: raise ValueError('qos must be in the range 0-2') callback_userdata = { 'callback':callback, 'topics':topics, 'qos':qos, 'userdata':userdata} client = paho.Client(client_id=client_id, userdata=callback_userdata, protocol=protocol, transport=transport) client.on_message = _on_message_callback client.on_connect = _on_connect if auth is not None: username = auth['username'] try: password = auth['password'] except KeyError: password = None client.username_pw_set(username, password) if will is not None: will_topic = will['topic'] try: will_payload = will['payload'] except KeyError: will_payload = None try: will_qos = will['qos'] except KeyError: will_qos = 0 try: will_retain = will['retain'] except KeyError: will_retain = False client.will_set(will_topic, will_payload, will_qos, will_retain) if tls is not None: ca_certs = tls['ca_certs'] try: certfile = tls['certfile'] except KeyError: certfile = None try: keyfile = tls['keyfile'] except KeyError: keyfile = None try: tls_version = tls['tls_version'] except KeyError: tls_version = ssl.PROTOCOL_SSLv23; try: ciphers = tls['ciphers'] except KeyError: ciphers = None client.tls_set(ca_certs, certfile, keyfile, tls_version=tls_version, ciphers=ciphers) client.connect(hostname, port, keepalive) client.loop_forever() def simple(topics, qos=0, msg_count=1, retained=True, hostname="localhost", port=1883, client_id="", keepalive=60, will=None, auth=None, tls=None, protocol=paho.MQTTv311, transport="tcp"): """Subscribe to a list of topics and return msg_count messages. This function creates an MQTT client, connects to a broker and subscribes to a list of topics. Once "msg_count" messages have been received, it disconnects cleanly from the broker and returns the messages. topics : either a string containing a single topic to subscribe to, or a list of topics to subscribe to. qos : the qos to use when subscribing. This is applied to all topics. msg_count : the number of messages to retrieve from the broker. if msg_count == 1 then a single MQTTMessage will be returned. if msg_count > 1 then a list of MQTTMessages will be returned. retained : If set to True, retained messages will be processed the same as non-retained messages. If set to False, retained messages will be ignored. This means that with retained=False and msg_count=1, the function will return the first message received that does not have the retained flag set. hostname : a string containing the address of the broker to connect to. Defaults to localhost. port : the port to connect to the broker on. Defaults to 1883. client_id : the MQTT client id to use. If "" or None, the Paho library will generate a client id automatically. keepalive : the keepalive timeout value for the client. Defaults to 60 seconds. will : a dict containing will parameters for the client: will = {'topic': "<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}. Topic is required, all other parameters are optional and will default to None, 0 and False respectively. Defaults to None, which indicates no will should be used. auth : a dict containing authentication parameters for the client: auth = {'username':"<username>", 'password':"<password>"} Username is required, password is optional and will default to None if not provided. Defaults to None, which indicates no authentication is to be used. tls : a dict containing TLS configuration parameters for the client: dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>", 'keyfile':"<keyfile>", 'tls_version':"<tls_version>", 'ciphers':"<ciphers">} ca_certs is required, all other parameters are optional and will default to None if not provided, which results in the client using the default behaviour - see the paho.mqtt.client documentation. Defaults to None, which indicates that TLS should not be used. transport : set to "tcp" to use the default setting of transport which is raw TCP. Set to "websockets" to use WebSockets as the transport. """ if msg_count < 1: raise ValueError('msg_count must be > 0') # Set ourselves up to return a single message if msg_count == 1, or a list # if > 1. if msg_count == 1: messages = None else: messages = [] userdata = {'retained':retained, 'msg_count':msg_count, 'messages':messages} callback(_on_message_simple, topics, qos, userdata, hostname, port, client_id, keepalive, will, auth, tls, protocol, transport) return userdata['messages']
[ 2, 15069, 357, 66, 8, 1584, 13637, 4401, 1279, 305, 1362, 31, 963, 2238, 13, 2398, 29, 198, 2, 198, 2, 1439, 2489, 10395, 13, 770, 1430, 290, 262, 19249, 5696, 198, 2, 389, 925, 1695, 739, 262, 2846, 286, 262, 30991, 5094, 13789, ...
2.62199
3,820
from collections import defaultdict if __name__ == "__main__": d = defaultdict(set) # with open('aoc_day_24_sample.txt') as f: with open("aoc_day_24_input.txt") as f: sample = f.readlines() # sample = [ # '0/1', # '1/2', # '1/3', # '1/4', # '5/0', # '2/5', # '3/6', # '4/500' # ] for component in sample: a, b = map(int, component.split("/")) d[a].add(component) d[b].add(component) solution()
[ 6738, 17268, 1330, 4277, 11600, 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 288, 796, 4277, 11600, 7, 2617, 8, 198, 220, 220, 220, 1303, 351, 1280, 10786, 64, 420, 62, 820, 62, ...
1.841549
284
import numpy as np import network if __name__ == "__main__": main()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 3127, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.678571
28
#draw the predictions from real-time.py import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib import style style.use('fivethirtyeight') fig = plt.figure() ax1 = fig.add_subplot(1,1,1) ani = animation.FuncAnimation(fig, animate, interval=1000) plt.show()
[ 2, 19334, 262, 16277, 422, 1103, 12, 2435, 13, 9078, 198, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2603, 29487, 8019, 13, 11227, 341, 355, 11034, 198, 6738, 2603, 29487, 8019, 1330, 3918, 198, 7635, 13...
2.92
100
""" Code to load a policy and generate rollout data. Adapted from https://github.com/berkeleydeeprlcourse. Example usage: python run_policy.py ../trained_policies/Humanoid-v1/policy_reward_11600/lin_policy_plus.npz Humanoid-v1 --render \ --num_rollouts 20 """ import numpy as np import gym if __name__ == '__main__': main()
[ 37811, 198, 198, 10669, 284, 3440, 257, 2450, 290, 7716, 38180, 1366, 13, 30019, 276, 422, 3740, 1378, 12567, 13, 785, 14, 527, 13490, 22089, 45895, 17319, 13, 198, 16281, 8748, 25, 198, 220, 220, 220, 21015, 1057, 62, 30586, 13, 9078...
2.710938
128
""" Generates Tisserand plots """ from enum import Enum import numpy as np from astropy import units as u from matplotlib import pyplot as plt from poliastro.plotting._base import BODY_COLORS from poliastro.twobody.mean_elements import get_mean_elements from poliastro.util import norm
[ 37811, 2980, 689, 309, 747, 263, 392, 21528, 37227, 198, 6738, 33829, 1330, 2039, 388, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 6468, 28338, 1330, 4991, 355, 334, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83...
3.11828
93
from KeyValueTree import KeyValueTree from truth.models import KeyValue as TruthKeyValue, Truth from systems.models import KeyValue as KeyValue from django.test.client import RequestFactory from api_v2.keyvalue_handler import KeyValueHandler import json factory = RequestFactory()
[ 6738, 7383, 11395, 27660, 1330, 7383, 11395, 27660, 198, 6738, 3872, 13, 27530, 1330, 7383, 11395, 355, 14056, 9218, 11395, 11, 14056, 198, 6738, 3341, 13, 27530, 1330, 7383, 11395, 355, 7383, 11395, 198, 6738, 42625, 14208, 13, 9288, 13,...
3.985915
71
from .trim import trim from .sample import sample from .sort import sort function_map = { 'trim': trim, 'sample': sample, 'sort': sort }
[ 6738, 764, 2213, 320, 1330, 15797, 198, 6738, 764, 39873, 1330, 6291, 198, 6738, 764, 30619, 1330, 3297, 628, 198, 8818, 62, 8899, 796, 1391, 198, 220, 220, 220, 705, 2213, 320, 10354, 15797, 11, 198, 220, 220, 220, 705, 39873, 10354,...
2.745455
55
from .nirspec import divspec from .nirspec import gluespec
[ 6738, 764, 32986, 16684, 1330, 2659, 16684, 198, 6738, 764, 32986, 16684, 1330, 1278, 947, 43106, 198 ]
3.470588
17
from __future__ import absolute_import from __future__ import print_function import datetime import os import random import sys import uuid import base64 import yaml import re try: import en except: print("DOWNLOD NODECUBE") print("""wget https://www.nodebox.net/code/data/media/linguistics.zip unzip linguistics.zip""") VERSION = "1.1" THEME_PROB = 0 bnf = bnfDictionary('brain.yaml') if __name__ == '__main__': poemtype = 'poem' if 'mushy' in sys.argv[1:]: poemtype = 'mushypoem' p,seed_str=generate_poem(poemtype) print(("*"*30 + "\n"*5)) filtered = [] for line in re.sub("<.*?>", " ", p).split("\n"): if len(line.strip()) > 0: filtered.append(line.strip()) else: filtered.append("pause") print(p)
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 4818, 8079, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 25064, 198, 11748, 334, 27112, 198, 11748, 2779, 2414, 198, 11748...
2.267806
351
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import _proxy from openstack.block_storage.v2 import snapshot from openstack.block_storage.v2 import stats from openstack.block_storage.v2 import type from openstack.block_storage.v2 import volume from openstack.tests.unit import test_proxy_base
[ 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, 257, 4866, 286, 262, 13789, 379, 198, 2,...
3.716216
222
#!/usr/bin/env python import argparse import CppHeaderParser import re import sys import yaml import copy import six import os.path import traceback def main(): a = Application() sys.exit(a.exec_()) if __name__ == "__main__": # Execute only if run as a script main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 1822, 29572, 198, 11748, 327, 381, 39681, 46677, 198, 11748, 302, 198, 11748, 25064, 198, 11748, 331, 43695, 198, 11748, 4866, 198, 11748, 2237, 198, 11748, 28686, 13, 6978, ...
2.803922
102
import typing as t from http.server import HTTPServer, BaseHTTPRequestHandler from . import response as resp
[ 11748, 19720, 355, 256, 198, 6738, 2638, 13, 15388, 1330, 38288, 18497, 11, 7308, 40717, 18453, 25060, 198, 198, 6738, 764, 1330, 2882, 355, 1217, 628, 628 ]
4.185185
27
#!/usr/bin/env python3 from fairseq.modules import multihead_attention as fair_multihead from pytorch_translate.attention import ( BaseAttention, attention_utils, register_attention, )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 628, 198, 6738, 3148, 41068, 13, 18170, 1330, 5021, 2256, 62, 1078, 1463, 355, 3148, 62, 41684, 2256, 198, 6738, 12972, 13165, 354, 62, 7645, 17660, 13, 1078, 1463, 1330, 357, 198, 220,...
2.941176
68
"""Support for Purrsong LavvieBot S""" import asyncio import logging import voluptuous as vol from lavviebot import LavvieBotApi import homeassistant.helpers.config_validation as cv from homeassistant import config_entries from homeassistant.const import EVENT_HOMEASSISTANT_STOP from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.const import ( CONF_PASSWORD, CONF_USERNAME ) from .const import DOMAIN _LOGGER = logging.getLogger(__name__) def setup(hass, config): """Setup of the component""" return True
[ 37811, 15514, 329, 9330, 3808, 506, 21438, 85, 494, 20630, 311, 37811, 198, 11748, 30351, 952, 198, 11748, 18931, 198, 11748, 2322, 37623, 5623, 355, 2322, 198, 6738, 21606, 85, 494, 13645, 1330, 21438, 85, 494, 20630, 32, 14415, 198, 1...
3.129944
177
from mars import main_loop import numpy as np from mars.settings import * if __name__ == "__main__": main_loop(Problem())
[ 198, 6738, 48962, 1330, 1388, 62, 26268, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 48962, 13, 33692, 1330, 1635, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 62, 26268, 7, 40781, 28...
3
43
# -*- coding: utf-8 -*- """ Check if it is Thai text """ import string _DEFAULT_IGNORE_CHARS = string.whitespace + string.digits + string.punctuation def isthaichar(ch: str) -> bool: """ Check if a character is Thai :param str ch: input character :return: True or False """ ch_val = ord(ch) if ch_val >= 3584 and ch_val <= 3711: return True return False def isthai(word: str, ignore_chars: str = ".") -> bool: """ Check if all character is Thai :param str word: input text :param str ignore_chars: characters to be ignored (i.e. will be considered as Thai) :return: True or False """ if not ignore_chars: ignore_chars = "" for ch in word: if ch not in ignore_chars and not isthaichar(ch): return False return True def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float: """ :param str text: input text :return: float, proportion of characters in the text that is Thai character """ if not text or not isinstance(text, str): return 0 if not ignore_chars: ignore_chars = "" num_thai = 0 num_ignore = 0 for ch in text: if ch in ignore_chars: num_ignore += 1 elif isthaichar(ch): num_thai += 1 num_count = len(text) - num_ignore return (num_thai / num_count) * 100
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 9787, 611, 340, 318, 18933, 2420, 198, 37811, 198, 11748, 4731, 198, 198, 62, 7206, 38865, 62, 16284, 6965, 62, 3398, 27415, 796, 4731, 13, 1929, 2737, 10223...
2.39322
590
print(b) print(c) print(d) print(e) print(f) print(g)
[ 198, 4798, 7, 65, 8, 198, 4798, 7, 66, 8, 198, 4798, 7, 67, 8, 198, 4798, 7, 68, 8, 198, 4798, 7, 69, 8, 198, 4798, 7, 70, 8 ]
1.8
30
import numpy as np from scipy.signal import savgol_filter import matplotlib.pyplot as plt import MadDog x = [] y = [] # Generating the noisy signal x, y = fill_data() print(len(y)) # Savitzky-Golay filter x_filtered, y_filtered = savitzky(x, y, 2) print("X unfiltered>> ", x) print("Y unfiltered>> ", y) print("X filtered>> ", x_filtered) print("Y filtered>> ", y_filtered) show(x_filtered, y_filtered, x, y)
[ 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 13, 12683, 282, 1330, 6799, 70, 349, 62, 24455, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 4627, 32942, 198, 198, 87, 796, 17635, 198, 88, 796, 1...
2.527273
165
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Adds an ad customizer feed. Associates the feed with customer and adds an ad that uses the feed to populate dynamic data. Tags: CustomerFeedService.mutate, FeedItemService.mutate Tags: FeedMappingService.mutate, FeedService.mutate Tags: AdGroupAdService.mutate """ __author__ = ('api.msaniscalchi@gmail.com (Mark Saniscalchi)', 'yufeng.dev@gmail.com (Yufeng Guo)') # Import appropriate classes from the client library. from googleads import adwords # See the Placeholder reference page for a list of all the placeholder types # and fields: # https://developers.google.com/adwords/api/docs/appendix/placeholders PLACEHOLDER_AD_CUSTOMIZER = '10' PLACEHOLDER_FIELD_INTEGER = '1' PLACEHOLDER_FIELD_FLOAT = '2' PLACEHOLDER_FIELD_PRICE = '3' PLACEHOLDER_FIELD_DATE = '4' PLACEHOLDER_FIELD_STRING = '5' ADGROUPS = [ 'INSERT_ADGROUP_ID_HERE', 'INSERT_ADGROUP_ID_HERE' ] FEEDNAME = 'INSERT_FEED_NAME_HERE' if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, ADGROUPS)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 198, 2, 15069, 1946, 3012, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, ...
2.98594
569
import pytest from pyminhash import MinHash from pyminhash.datasets import load_data
[ 11748, 12972, 9288, 198, 198, 6738, 12972, 1084, 17831, 1330, 1855, 26257, 198, 6738, 12972, 1084, 17831, 13, 19608, 292, 1039, 1330, 3440, 62, 7890, 628, 628, 628, 198 ]
3.172414
29
from datetime import datetime, timedelta from typing import final from tools import localize_time RSS_URL_PREFIX: final = 'https://www.youtube.com/feeds/videos.xml?channel_id={0}' LOCATION_ARGUMENT_PREFIX: final = '--location=' CHANNEL_ARGUMENT_PREFIX: final = '--channels=' LAST_CHECK_ARGUMENT_PREFIX: final = '--last-check=' TWO_WEEKS_IN_DAYS: final = 14 DEFAULT_LAST_CHECK: final = localize_time(datetime.now() - timedelta(days=TWO_WEEKS_IN_DAYS)) EMPTY: final = '' CHANNEL_POSTS_LIMIT: final = 20
[ 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 19720, 1330, 2457, 198, 198, 6738, 4899, 1330, 1957, 1096, 62, 2435, 198, 198, 49, 5432, 62, 21886, 62, 47, 31688, 10426, 25, 2457, 796, 705, 5450, 1378, 2503, 13, 11604...
2.515
200
# SPDX-License-Identifier: BSD-3-Clause from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter __all__ = ( 'PIC16Caravel', )
[ 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 347, 10305, 12, 18, 12, 2601, 682, 198, 6738, 716, 19173, 400, 1330, 2574, 4820, 21156, 11, 19937, 11, 26484, 11, 30027, 818, 2655, 353, 11, 27882, 818, 2655, 353, 198, 198, 834, 439, ...
2.59322
59
from discord.ext import commands import discord
[ 6738, 36446, 13, 2302, 1330, 9729, 198, 11748, 36446, 628, 198 ]
4.545455
11
from mrs.bucket import WriteBucket from mrs import BinWriter, HexWriter # vim: et sw=4 sts=4
[ 6738, 285, 3808, 13, 27041, 316, 1330, 19430, 33, 38811, 198, 6738, 285, 3808, 1330, 20828, 34379, 11, 22212, 34379, 198, 198, 2, 43907, 25, 2123, 1509, 28, 19, 39747, 28, 19, 198 ]
2.848485
33
""" An agent which uses demonstrations and preferences. Code adapted from Learning Reward Functions by Integrating Human Demonstrations and Preferences. """ import itertools import os import time from pathlib import Path from typing import Dict, List import arviz as az from inquire.agents.agent import Agent from inquire.environments.environment import Environment from inquire.interactions.feedback import Query, Trajectory from inquire.interactions.modalities import Preference import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm import pymc3.distributions.transforms as tr import scipy.optimize as opt import theano.tensor as tt
[ 37811, 198, 2025, 5797, 543, 3544, 18721, 290, 15387, 13, 198, 198, 10669, 16573, 422, 18252, 32307, 40480, 198, 1525, 15995, 8821, 5524, 7814, 2536, 602, 290, 49780, 13, 198, 37811, 198, 11748, 340, 861, 10141, 198, 11748, 28686, 198, ...
3.681081
185
"""This module contains all public learners and learner interfaces.""" from coba.learners.primitives import Learner, SafeLearner from coba.learners.bandit import EpsilonBanditLearner, UcbBanditLearner, FixedLearner, RandomLearner from coba.learners.corral import CorralLearner from coba.learners.vowpal import VowpalMediator from coba.learners.vowpal import VowpalArgsLearner, VowpalEpsilonLearner, VowpalSoftmaxLearner, VowpalBagLearner from coba.learners.vowpal import VowpalCoverLearner, VowpalRegcbLearner, VowpalSquarecbLearner, VowpalOffPolicyLearner from coba.learners.linucb import LinUCBLearner __all__ = [ 'Learner', 'SafeLearner', 'RandomLearner', 'FixedLearner', 'EpsilonBanditLearner', 'UcbBanditLearner', 'CorralLearner', 'LinUCBLearner', 'VowpalArgsLearner', 'VowpalEpsilonLearner', 'VowpalSoftmaxLearner', 'VowpalBagLearner', 'VowpalCoverLearner', 'VowpalRegcbLearner', 'VowpalSquarecbLearner', 'VowpalOffPolicyLearner', 'VowpalMediator' ]
[ 37811, 1212, 8265, 4909, 477, 1171, 46184, 290, 22454, 1008, 20314, 526, 15931, 198, 198, 6738, 269, 19981, 13, 35720, 364, 13, 19795, 20288, 1330, 8010, 1008, 11, 19978, 14961, 1008, 198, 6738, 269, 19981, 13, 35720, 364, 13, 3903, 270...
2.468235
425
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS Zones.""" import re import sys import dns.exception import dns.name import dns.node import dns.rdataclass import dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer import dns.transaction import dns.ttl import dns.grange def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if node is None: # empty nodes are neutral. return node_kind = node.classify() if node_kind == dns.node.NodeKind.CNAME and \ rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is not compatible with a ' 'CNAME node') elif node_kind == dns.node.NodeKind.REGULAR and \ rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is not compatible with a ' 'regular data node') # Otherwise at least one of the node and the rdataset is neutral, so # adding the rdataset is ok def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): """Read one or more rrsets from the specified text, possibly subject to restrictions. *text*, a file object or a string, is the input to process. *name*, a string, ``dns.name.Name``, or ``None``, is the owner name of the rrset. If not ``None``, then the owner name is "forced", and the input must not specify an owner name. If ``None``, then any owner names are allowed and must be present in the input. *ttl*, an ``int``, string, or None. If not ``None``, the the TTL is forced to be the specified value and the input must not specify a TTL. If ``None``, then a TTL may be specified in the input. If it is not specified, then the *default_ttl* will be used. *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If not ``None``, then the class is forced to the specified value, and the input must not specify a class. If ``None``, then the input may specify a class that matches *default_rdclass*. Note that it is not possible to return rrsets with differing classes; specifying ``None`` for the class simply allows the user to optionally type a class as that may be convenient when cutting and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class of the returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not ``None``, then the type is forced to the specified value, and the input must not specify a type. If ``None``, then a type must be present for each RR. *default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if the TTL is not forced and is not specified, then this value will be used. if ``None``, then if the TTL is not forced an error will occur if the TTL is not specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder is used. Note that codecs only apply to the owner name; dnspython does not do IDNA for names in rdata, as there is no IDNA zonefile format. *origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any relative names in the input, and also the origin to relativize to if *relativize* is ``True``. *relativize*, a bool. If ``True``, names are relativized to the *origin*; if ``False`` then any relative names in the input are made absolute by appending the *origin*. """ if isinstance(origin, str): origin = dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name, str): name = dns.name.from_text(name, origin, idna_codec) if isinstance(ttl, str): ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl) if rdclass is not None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True) as txn: tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader = Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass, force_rdtype=rdtype, default_ttl=default_ttl) reader.read() return manager.rrsets
[ 2, 15069, 357, 34, 8, 360, 77, 2777, 7535, 25767, 669, 11, 766, 38559, 24290, 329, 2420, 286, 3180, 34, 5964, 198, 198, 2, 15069, 357, 34, 8, 5816, 12, 12726, 11, 3717, 12, 9804, 399, 6351, 388, 11, 3457, 13, 198, 2, 198, 2, 2...
2.627361
2,171
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import os import sys import time import signal from re import sub import eventlet.debug from eventlet.hubs import use_hub from swift.common import utils def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): """ Loads settings from conf, then instantiates daemon ``klass`` and runs the daemon with the specified ``once`` kwarg. The section_name will be derived from the daemon ``klass`` if not provided (e.g. ObjectReplicator => object-replicator). :param klass: Class to instantiate, subclass of :class:`Daemon` :param conf_file: Path to configuration file :param section_name: Section name from conf file to load config from :param once: Passed to daemon :meth:`Daemon.run` method """ # very often the config section_name is based on the class name # the None singleton will be passed through to readconf as is if section_name == '': section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: # The message will be printed to stderr # and results in an exit code of 1. sys.exit(e) use_hub(utils.get_hub()) # once on command line (i.e. daemonize=false) will over-ride config once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.notice('Exited %s', os.getpid())
[ 2, 15069, 357, 66, 8, 3050, 12, 6999, 4946, 25896, 5693, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789...
2.659657
1,284
import resource_files resources = resource_files.ResourceFiles() # sample use case of getting yamls print(resources.get_yaml("Pod", "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf", "default", "mycluster")) # sample use case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case of getting describe info print(resources.get_logs('mycluster', 'default', "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf"))
[ 11748, 8271, 62, 16624, 198, 198, 37540, 796, 8271, 62, 16624, 13, 26198, 25876, 3419, 198, 198, 2, 6291, 779, 1339, 286, 1972, 331, 321, 7278, 198, 4798, 7, 37540, 13, 1136, 62, 88, 43695, 7203, 41565, 1600, 366, 73, 32152, 12, 147...
2.693182
176
""" Project: flask-rest Author: Saj Arora Description: Handle auth endpoints such as auth/signup, auth/login """ from api.v1 import make_json_ok_response, SageController, SageMethod from api.v1.fundamentals import helper from .auth_controller import AuthController auth_controller = { 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False), 'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False) }
[ 37811, 198, 16775, 25, 42903, 12, 2118, 198, 13838, 25, 311, 1228, 943, 5799, 198, 11828, 25, 33141, 6284, 886, 13033, 884, 355, 6284, 14, 12683, 929, 11, 6284, 14, 38235, 198, 37811, 198, 6738, 40391, 13, 85, 16, 1330, 787, 62, 177...
3.349315
146
import random as rn import numpy as np # open system dynamics of a qubit and compare numerical results with the analytical calculations # NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical # calculations. # currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving # any collapse operators. For now, only looks at excited state populations # TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics. decayRateSM = rn.random() excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]} # this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every # step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above.
[ 11748, 4738, 355, 374, 77, 198, 11748, 299, 32152, 355, 45941, 198, 198, 2, 1280, 1080, 17262, 286, 257, 627, 2545, 290, 8996, 29052, 2482, 351, 262, 30063, 16765, 198, 2, 24550, 777, 389, 635, 309, 3843, 1581, 12576, 50, 286, 262, ...
3.835294
255
#this file will contain function that related to vector state from .density import * #we may use some functions from them and dependencies
[ 2, 5661, 2393, 481, 3994, 2163, 326, 3519, 284, 15879, 1181, 198, 198, 6738, 764, 43337, 1330, 1635, 220, 220, 220, 1303, 732, 743, 779, 617, 5499, 422, 606, 290, 20086, 198 ]
4.46875
32
#! /usr/bin/env python """ Description: Gather Metadata for the uncover-ml prediction output results: Reference: email 2019-05-24 Overview Creator: (person who generated the model) Model; Name: Type and date: Algorithm: Extent: Lat/long - location on Australia map? SB Notes: None of the above is required as this information will be captured in the yaml file. Model inputs: 1. Covariates - list (in full) 2. Targets: path to shapefile: csv file SB Notes: Only covaraite list file. Targets and path to shapefile is not required as this is available in the yaml file. May be the full path to the shapefile has some merit as one can specify partial path. Model performance JSON file (in full) SB Notes: Yes Model outputs 1. Prediction grid including path 2. Quantiles Q5; Q95 3. Variance: 4. Entropy: 5. Feature rank file 6. Raw covariates file (target value - covariate value) 7. Optimisation output 8. Others ?? SB Notes: Not required as these are model dependent, and the metadata will be contained in each of the output geotif file. Model parameters: 1. YAML file (in full) 2. .SH file (in full) SB Notes: The .sh file is not required. YAML file is read as a python dictionary in uncoverml which can be dumped in the metadata. CreationDate: 31/05/19 Developer: fei.zhang@ga.gov.au Revision History: LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy Who Optional description """ # import section import os import sys import json import pickle import datetime import getpass import socket from ppretty import ppretty import uncoverml
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 198, 11828, 25, 198, 220, 220, 220, 402, 1032, 3395, 14706, 329, 262, 23658, 12, 4029, 17724, 5072, 2482, 25, 198, 198, 26687, 25, 3053, 13130, 12, 2713, 12, 1731, 198, 2906...
2.957371
563
print("hiiiiiiiiiiiiiiiix")
[ 4798, 7203, 71, 4178, 4178, 4178, 4178, 4178, 4178, 15479, 844, 4943, 628 ]
2.230769
13
import numpy as np import pandas as pd from pandas.util import testing as tm
[ 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 19798, 292, 13, 22602, 1330, 4856, 355, 256, 76, 628, 198 ]
3.16
25
#!/usr/bin/env python #=============================================================================# # # # NAME: do_RMsynth_1D.py # # # # PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.# # # # MODIFIED: 16-Nov-2018 by J. West # # MODIFIED: 23-October-2019 by A. Thomson # # # #=============================================================================# # # # The MIT License (MIT) # # # # Copyright (c) 2015 - 2018 Cormac R. Purcell # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # #=============================================================================# import sys import os import time import traceback import json import math as m import numpy as np import matplotlib.pyplot as plt from RMutils.util_RM import do_rmsynth from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import nanmedian from RMutils.util_misc import toscalar from RMutils.util_misc import create_frac_spectra from RMutils.util_misc import poly5 from RMutils.util_misc import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2: print('RM-tools will no longer run with Python 2! Please use Python 3.') exit() C = 2.997924538e8 # Speed of light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType="variance", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None): """Run RM synthesis on 1D data. Args: data (list): Contains frequency and polarization data as either: [freq_Hz, I, Q, U, dI, dQ, dU] freq_Hz (array_like): Frequency of each channel in Hz. I (array_like): Stokes I intensity in each channel. Q (array_like): Stokes Q intensity in each channel. U (array_like): Stokes U intensity in each channel. dI (array_like): Error in Stokes I intensity in each channel. dQ (array_like): Error in Stokes Q intensity in each channel. dU (array_like): Error in Stokes U intensity in each channel. or [freq_Hz, q, u, dq, du] freq_Hz (array_like): Frequency of each channel in Hz. q (array_like): Fractional Stokes Q intensity (Q/I) in each channel. u (array_like): Fractional Stokes U intensity (U/I) in each channel. dq (array_like): Error in fractional Stokes Q intensity in each channel. du (array_like): Error in fractional Stokes U intensity in each channel. Kwargs: polyOrd (int): Order of polynomial to fit to Stokes I spectrum. phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday depth channel size (rad/m^2). nSamples (float): Number of samples across the RMSF. weightType (str): Can be "variance" or "uniform" "variance" -- Weight by uncertainty in Q and U. "uniform" -- Weight uniformly (i.e. with 1s) fitRMSF (bool): Fit a Gaussian to the RMSF? noStokesI (bool: Is Stokes I data provided? phiNoise_radm2 (float): ???? nBits (int): Precision of floating point numbers. showPlots (bool): Show plots? debug (bool): Turn on debugging messages & plots? verbose (bool): Verbosity. log (function): Which logging function to use. units (str): Units of data. Returns: mDict (dict): Summary of RM synthesis results. aDict (dict): Data output by RM synthesis. """ # Sanity checks if not os.path.exists(args.dataFile[0]): print("File does not exist: '%s'." % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2*nBits) # freq_Hz, I, Q, U, dI, dQ, dU try: if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data if verbose: log("... success.") except Exception: if verbose: log("...failed.") # freq_Hz, q, u, dq, du try: if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data if verbose: log("... success.") noStokesI = True except Exception: if verbose: log("...failed.") if debug: log(traceback.format_exc()) sys.exit() if verbose: log("Successfully read in the Stokes spectra.") # If no Stokes I present, create a dummy spectrum = unity if noStokesI: if verbose: log("Warn: no Stokes I data in use.") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 dQUArr = (dQArr + dUArr)/2.0 # Fit the Stokes I spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr, QArr = QArr, UArr = UArr, dIArr = dIArr, dQArr = dQArr, dUArr = dUArr, polyOrd = polyOrd, verbose = True, debug = debug) # Plot the data and the Stokes I model fit if verbose: log("Plotting the input data and spectral index fit.") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr = IArr, qArr = qArr, uArr = uArr, dIArr = dIArr, dqArr = dqArr, duArr = duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr = IModHirArr, fig = specFig, units = units) # Use the custom navigation toolbar (does not work on Mac OS X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # if not plt.isinteractive(): # specFig.show() # DEBUG (plot the Q, U and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM # Faraday depth sampling. Zero always centred on middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting as 1/sigma^2 or all 1s (uniform) if weightType=="variance": weightArr = 1.0 / np.power(dQUArr, 2.0) else: weightType = "uniform" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log("Weight type is '%s'." % weightType) startTime = time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, nBits = nBits, verbose = verbose, log = log) # Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits, verbose = verbose, log = log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime) if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime) # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model # Multiply the dirty FDF by Ifreq0 to recover the PI freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) # Measure the parameters of the dirty FDF # Use the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF = dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict["Ifreq0"] = toscalar(Ifreq0) mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]]) mDict["IfitStat"] = fitDict["fitStatus"] mDict["IfitChiSqRed"] = fitDict["chiSqRed"] mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2) mDict["freq0_Hz"] = toscalar(freq0_Hz) mDict["fwhmRMSF"] = toscalar(fwhmRMSF) mDict["dQU"] = toscalar(nanmedian(dQUArr)) mDict["dFDFth"] = toscalar(dFDFth) mDict["units"] = units if fitDict["fitStatus"] >= 128: log("WARNING: Stokes I model contains negative values!") elif fitDict["fitStatus"] >= 64: log("Caution: Stokes I model has low signal-to-noise.") #Add information on nature of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels])) mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels])) mDict["N_channels"]=good_channels.size mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of the q and u spectra mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr, uArr = uArr, dqArr = dqArr, duArr = duArr, fracPol = mDict["fracPol"], psi0_deg = mDict["polAngle0Fit_deg"], RM_radm2 = mDict["phiPeakPIfit_rm2"]) mDict.update(mD) # Debugging plots for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD["xArrQ"], qArr=pD["yArrQ"], dqArr=pD["dyArrQ"], sigmaAddqArr=pD["sigmaAddArrQ"], chiSqRedqArr=pD["chiSqRedArrQ"], probqArr=pD["probArrQ"], uArr=pD["yArrU"], duArr=pD["dyArrU"], sigmaAdduArr=pD["sigmaAddArrU"], chiSqReduArr=pD["chiSqRedArrU"], probuArr=pD["probArrU"], mDict=mDict) if saveOutput: if verbose: print("Saving debug plots:") outFilePlot = prefixOut + ".debug-plots.pdf" if verbose: print("> " + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else: tmpFig.show() #add array dictionary aDict = dict() aDict["phiArr_radm2"] = phiArr_radm2 aDict["phi2Arr_radm2"] = phi2Arr_radm2 aDict["RMSFArr"] = RMSFArr aDict["freqArr_Hz"] = freqArr_Hz aDict["weightArr"]=weightArr aDict["dirtyFDF"]=dirtyFDF if verbose: # Print the results to the screen log() log('-'*80) log('RESULTS:\n') log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"])) log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"])) log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"])) log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9)) log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units)) log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"], mDict["dAmpPeakPIfit"],units)) log('QU Noise = %.4g %s' % (mDict["dQU"],units)) log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units)) log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units)) log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units)) log('FDF SNR = %.4g ' % (mDict["snrPIfit"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"])) log() log('-'*80) # Plot the RM Spread Function and dirty FDF if showPlots or saveOutput: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF, vLine = mDict["phiPeakPIfit_rm2"], fig = fdfFig, units = units) # Use the custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # fdfFig.show() # Pause if plotting enabled if showPlots: plt.show() elif saveOutput or debug: if verbose: print("Saving RMSF and dirty FDF plot:") outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf" if verbose: print("> " + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight') # #if verbose: print "Press <RETURN> to exit ...", # input() return mDict, aDict def readFile(dataFile, nBits, verbose=True, debug=False): """ Read the I, Q & U data from the ASCII file. Inputs: datafile (str): relative or absolute path to file. nBits (int): number of bits to store the data as. verbose (bool): Print verbose messages to terminal? debug (bool): Print full traceback in case of failure? Returns: data (list of arrays): List containing the columns found in the file. If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU], else [freq_Hz, q, u, dq, du]. """ # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2*nBits) # Output prefix is derived from the input file name # Read the data-file. Format=space-delimited, comments="#". if verbose: print("Reading the data file '%s':" % dataFile) # freq_Hz, I, Q, U, dI, dQ, dU try: if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print("... success.") data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr] except Exception: if verbose: print("...failed.") # freq_Hz, q, u, dq, du try: if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print("... success.") data=[freqArr_Hz, QArr, UArr, dQArr, dUArr] noStokesI = True except Exception: if verbose: print("...failed.") if debug: print(traceback.format_exc()) sys.exit() if verbose: print("Successfully read in the Stokes spectra.") return data #-----------------------------------------------------------------------------# #-----------------------------------------------------------------------------# if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 23926, 25609, 46249, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 2...
1.80334
12,753
from .message_passing import MessagePassing from .gcn_conv import GCNConv from .gat_conv import GATConv from .se_layer import SELayer from .aggregator import Meanaggregator from .maggregator import meanaggr __all__ = [ 'MessagePassing', 'GCNConv', 'GATConv', 'SELayer', 'Meanaggregator' ]
[ 6738, 764, 20500, 62, 6603, 278, 1330, 16000, 14478, 278, 198, 6738, 764, 70, 31522, 62, 42946, 1330, 20145, 45, 3103, 85, 198, 6738, 764, 41268, 62, 42946, 1330, 402, 1404, 3103, 85, 198, 6738, 764, 325, 62, 29289, 1330, 311, 3698, ...
2.5
124
# Copyright 2021 The NetKet Authors - All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable from . import struct def get_afun_if_module(mod_or_fun) -> Callable: """Returns the apply function if it's a module. Does nothing otherwise.""" if hasattr(mod_or_fun, "apply"): return mod_or_fun.apply else: return mod_or_fun def wrap_afun(mod_or_fun): """Wraps a callable to be a module-like object with the method `apply`. Does nothing if it already has an apply method. """ if hasattr(mod_or_fun, "apply"): return mod_or_fun else: return WrappedApplyFun(mod_or_fun)
[ 2, 15069, 33448, 383, 3433, 42, 316, 46665, 532, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.08971
379
# coding=utf-8 import ee from . import utils import json import csv from .. import tools def fromShapefile(filename, crs=None, start=None, end=None): """ Convert an ESRI file (.shp and .dbf must be present) to a ee.FeatureCollection At the moment only works for shapes with less than 1000 records and doesn't handle complex shapes. :param filename: the name of the filename. If the shape is not in the same path than the script, specify a path instead. :type filename: str :param start: :return: the FeatureCollection :rtype: ee.FeatureCollection """ import shapefile wgs84 = ee.Projection('EPSG:4326') # read the filename reader = shapefile.Reader(filename) fields = reader.fields[1:] field_names = [field[0] for field in fields] field_types = [field[1] for field in fields] types = dict(zip(field_names, field_types)) features = [] projection = utils.getProjection(filename) if not crs else crs # catch a string with format "EPSG:XXX" if isinstance(projection, str): if 'EPSG:' in projection: projection = projection.split(':')[1] projection = 'EPSG:{}'.format(projection) # filter records with start and end start = start if start else 0 if not end: records = reader.shapeRecords() end = len(records) else: end = end + 1 if (end-start)>1000: msg = "Can't process more than 1000 records at a time. Found {}" raise ValueError(msg.format(end-start)) for i in range(start, end): # atr = dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i) atr = {} for fld, rec in zip(field_names, sr.record): fld_type = types[fld] if fld_type == 'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C', 'N', 'F']: value = rec else: continue atr[fld] = value geom = sr.shape.__geo_interface__ if projection is not None: geometry = ee.Geometry(geom, projection) \ .transform(wgs84, 1) else: geometry = ee.Geometry(geom) feat = ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None): """ Create a list of Features from a GeoJSON file. Return a python tuple with ee.Feature inside. This is due to failing when attempting to create a FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating it yourself casting the result of this function to a ee.List or using it directly as a FeatureCollection argument. :param filename: the name of the file to load :type filename: str :param crs: a coordinate reference system in EPSG format. If not specified it will try to get it from the geoJSON, and if not there it will rise an error :type: crs: str :return: a tuple of features. """ if filename: with open(filename, 'r') as geoj: content = geoj.read() geodict = json.loads(content) else: geodict = data features = [] # Get crs from GeoJSON if not crs: filecrs = geodict.get('crs') if filecrs: name = filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs = [part for part in splitcrs if part] try: if cleancrs[-1] == 'CRS84': crs = 'EPSG:4326' elif cleancrs[-2] == 'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{} not recognized'.format(name)) except IndexError: raise ValueError('{} not recognized'.format(name)) else: crs = 'EPSG:4326' for n, feat in enumerate(geodict.get('features')): properties = feat.get('properties') geom = feat.get('geometry') ty = geom.get('type') coords = geom.get('coordinates') if ty == 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty == 'Polygon': coords = utils.removeZ(coords) if utils.hasZ(coords) else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features) def fromKML(filename=None, data=None, crs=None, encoding=None): """ Create a list of Features from a KML file. Return a python tuple with ee.Feature inside. This is due to failing when attempting to create a FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating it yourself casting the result of this function to a ee.List or using it directly as a FeatureCollection argument. :param filename: the name of the file to load :type filename: str :param crs: a coordinate reference system in EPSG format. If not specified it will try to get it from the geoJSON, and if not there it will rise an error :type: crs: str :return: a tuple of features. """ geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding) features = geojsondict['features'] for feat in features: # remove styleUrl prop = feat['properties'] if 'styleUrl' in prop: prop.pop('styleUrl') # remove Z value if needed geom = feat['geometry'] ty = geom['type'] if ty == 'GeometryCollection': geometries = geom['geometries'] for g in geometries: c = g['coordinates'] utils.removeZ(c) else: coords = geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000): """ Get the FeatureCollection as a dict object """ size = collection.size() condition = size.gte(4999) collections = ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size = collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats = content['features'] for i in range(0, collections_size): c = ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c = content_c['features'] feats = feats + feats_c content['features'] = feats return content def toGeoJSON(collection, name, path=None, split_at=4000): """ Export a FeatureCollection to a GeoJSON file :param collection: The collection to export :type collection: ee.FeatureCollection :param name: name of the resulting file :type name: str :param path: The path where to save the file. If None, will be saved in the current folder :type path: str :param split_at: limit to avoid an EE Exception :type split_at: int :return: A GeoJSON (.geojson) file. :rtype: file """ import json import os if not path: path = os.getcwd() # name if name[-8:-1] != '.geojson': fname = name+'.geojson' content = toDict(collection, split_at) with open(os.path.join(path, fname), 'w') as thefile: thefile.write(json.dumps(content)) return thefile def toCSV(collection, filename, split_at=4000): """ Alternative to download a FeatureCollection as a CSV """ d = toDict(collection, split_at) fields = list(d['columns'].keys()) fields.append('geometry') features = d['features'] ext = filename[-4:] if ext != '.csv': filename += '.csv' with open(filename, 'w') as thecsv: writer = csv.DictWriter(thecsv, fields) writer.writeheader() # write rows for feature in features: properties = feature['properties'] fid = feature['id'] geom = feature['geometry']['type'] # match fields properties['system:index'] = fid properties['geometry'] = geom # write row writer.writerow(properties) return thecsv def toLocal(collection, filename, filetype=None, selectors=None, path=None): """ Download a FeatureCollection to a local file a CSV or geoJSON file. This uses a different method than `toGeoJSON` and `toCSV` :param filetype: The filetype of download, either CSV or JSON. Defaults to CSV. :param selectors: The selectors that should be used to determine which attributes will be downloaded. :param filename: The name of the file to be downloaded """ if not filetype: filetype = 'CSV' url = collection.getDownloadURL(filetype, selectors, filename) thefile = utils.downloadFile(url, filename, filetype, path) return thefile def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs): """ This function can create folders and ImageCollections on the fly. The rest is the same to Export.image.toAsset. You can pass the same params as the original function :param table: the feature collection to upload :type table: ee.FeatureCollection :param assetPath: path to upload the image (only PATH, without filename) :type assetPath: str :param name: filename for the image (AssetID will be assetPath + name) :type name: str :return: the tasks :rtype: ee.batch.Task """ # Check if the user is specified in the asset path is_user = (assetPath.split('/')[0] == 'users') if not is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath = "{}/{}".format(user, assetPath) if create: # Recrusive create path path2create = assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) # Asset ID (Path + name) assetId = '/'.join([assetPath, name]) # Description description = utils.matchDescription(name) # Init task task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start() if verbose: print('Exporting {} to {}'.format(name, assetPath)) return task
[ 2, 19617, 28, 40477, 12, 23, 198, 11748, 304, 68, 198, 6738, 764, 1330, 3384, 4487, 198, 11748, 33918, 198, 11748, 269, 21370, 198, 6738, 11485, 1330, 4899, 628, 198, 4299, 422, 33383, 7753, 7, 34345, 11, 1067, 82, 28, 14202, 11, 92...
2.40635
4,378
# Rock-paper-scissors-lizard-Spock template # The key idea of this program is to equate the strings # "rock", "paper", "scissors", "lizard", "Spock" to numbers # as follows: # # 0 - rock # 1 - Spock # 2 - paper # 3 - lizard # 4 - scissors import random rpsls("rock") rpsls("Spock") rpsls("paper") rpsls("lizard") rpsls("scissors")
[ 2, 4631, 12, 20189, 12, 1416, 32555, 12, 75, 8669, 12, 4561, 735, 11055, 198, 198, 2, 383, 1994, 2126, 286, 428, 1430, 318, 284, 45423, 262, 13042, 198, 2, 366, 10823, 1600, 366, 20189, 1600, 366, 1416, 32555, 1600, 366, 75, 8669, ...
2.650794
126
""" Contains functions to generate and combine a clustering ensemble. """ import numpy as np import pandas as pd from sklearn.metrics import pairwise_distances from sklearn.metrics import adjusted_rand_score as ari from sklearn.metrics import adjusted_mutual_info_score as ami from sklearn.metrics import normalized_mutual_info_score as nmi from tqdm import tqdm from clustering.utils import reset_estimator, compare_arrays def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None): """ It generates an ensemble from the data given a set of clusterers (a clusterer is an instance of a clustering algorithm with a fixed set of parameters). Args: data: A numpy array, pandas dataframe, or any other structure supported by the clusterers as data input. clusterers: A dictionary with clusterers specified in this format: { 'k-means #1': KMeans(n_clusters=2), ... } attributes: A list of attributes to save in the final dataframe; for example, including "n_clusters" will extract this attribute from the estimator and include it in the final dataframe returned. affinity_matrix: If the clustering algorithm is AgglomerativeClustering (from sklearn) and the linkage method is different than ward (which only support euclidean distance), the affinity_matrix is given as data input to the estimator instead of data. Returns: A pandas DataFrame with all the partitions generated by the clusterers. Columns include the clusterer name/id, the partition, the estimator parameters (obtained with the get_params() method) and any other attribute specified. """ ensemble = [] for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)): # get partition # # for agglomerative clustering both data and affinity_matrix should be # given; for ward linkage, data is used, and for the other linkage # methods the affinity_matrix is used if (type(clus_obj).__name__ == "AgglomerativeClustering") and ( clus_obj.linkage != "ward" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float) # remove from partition noisy points (for example, if using DBSCAN) partition[partition < 0] = np.nan # get number of clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] # stop if n_clusters <= 1 if n_clusters <= 1: reset_estimator(clus_obj) continue res = pd.Series( { "clusterer_id": clus_name, "clusterer_params": str(clus_obj.get_params()), "partition": partition, } ) for attr in attributes: if attr == "n_clusters" and not hasattr(clus_obj, attr): res[attr] = n_clusters else: res[attr] = getattr(clus_obj, attr) ensemble.append(res) # for some estimators such as DBSCAN this is needed, because otherwise # the estimator saves references of huge data structures not needed in # this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index("clusterer_id") def get_ensemble_distance_matrix(ensemble, n_jobs=1): """ Given an ensemble, it computes the coassociation matrix (a distance matrix for all objects using the ensemble information). For each object pair, the coassociation matrix contains the percentage of times the pair of objects was clustered together in the ensemble. Args: ensemble: A numpy array representing a set of clustering solutions on the same data. Each row is a clustering solution (partition) and columns are objects. n_jobs: The number of jobs used by the pairwise_distance matrix from sklearn. Returns: A numpy array representing a square distance matrix for all objects (coassociation matrix). """ return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan" ) def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False): """ It combines a clustering ensemble using a set of methods that the user can specify. Each of these methods combines the ensemble and returns a single partition. This function returns the combined partition that maximizes the selection criterion. Args: ensemble: a clustering ensemble (rows are partitions, columns are objects). k: the final number of clusters for the combined partition. methods: a list of methods to apply on the ensemble; each returns a combined partition. selection_criterion: a function that represents the selection criterion; this function has to accept an ensemble as the first argument, and a partition as the second one. n_jobs: number of jobs. use_tqdm: ensembles/disables the use of tqdm to show a progress bar. Returns: Returns a tuple: (partition, best method name, best criterion value) """ from concurrent.futures import ProcessPoolExecutor, as_completed methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods} for future in tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100, ): method_name = tasks[future] part = future.result() criterion_value = selection_criterion(ensemble, part) methods_results[method_name] = { "partition": part, "criterion_value": criterion_value, } # select the best performing method according to the selection criterion best_method = max( methods_results, key=lambda x: methods_results[x]["criterion_value"] ) best_method_results = methods_results[best_method] return ( best_method_results["partition"], best_method, best_method_results["criterion_value"], ) def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs): """ Runs a consensus clustering method on the ensemble data, obtains the consolidated partition with the desired number of clusters, and computes a series of performance measures. Args: method_func: A consensus function (first argument is either the ensemble or the coassociation matrix derived from the ensemble). ensemble_data: A numpy array with the ensemble data that will be given to the specified method. For evidence accumulation methods, this is the coassociation matrix (a square matrix with the distance between object pairs derived from the ensemble). ensemble: A numpy array representing the ensemble (partitions in rows, objects in columns). k: The number of clusters to obtain from the ensemble data using the specified method. kwargs: Other parameters passed to `method_func`. Returns: It returns a tuple with the data partition derived from the ensemble data using the specified method, and some performance measures of this partition. """ part = method_func(ensemble_data, k, **kwargs) nmi_values = np.array( [ compare_arrays(ensemble_member, part, nmi, use_weighting=True) for ensemble_member in ensemble ] ) ami_values = np.array( [ compare_arrays(ensemble_member, part, ami, use_weighting=True) for ensemble_member in ensemble ] ) ari_values = np.array( [ compare_arrays(ensemble_member, part, ari, use_weighting=True) for ensemble_member in ensemble ] ) performance_values = { "ari_mean": np.mean(ari_values), "ari_median": np.median(ari_values), "ari_std": np.std(ari_values), "ami_mean": np.mean(ami_values), "ami_median": np.median(ami_values), "ami_std": np.std(ami_values), "nmi_mean": np.mean(nmi_values), "nmi_median": np.median(nmi_values), "nmi_std": np.std(nmi_values), } return part, performance_values
[ 37811, 198, 4264, 1299, 5499, 284, 7716, 290, 12082, 257, 32966, 1586, 34549, 13, 198, 37811, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 1341, 35720, 13, 4164, 10466, 1330, 5166, 3083, 62, 17080...
2.48577
3,584
import logging from django.db import transaction, connection from django.utils import timezone from django.utils.timezone import localtime from chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base import AppLogicBaseService from chart.models import Employees, Departments """ employees """ def _regist_departments(self, department_no, department_name): """ departments """ self.regist_model = Departments() self.regist_model.department_no = department_no self.regist_model.department_name = department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id, department_id, department_date_from): """ """ self.update_model = Employees() self.update_model.pk = employees_id self.update_model.department_id = department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt = localtime(timezone.now()) self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])
[ 11748, 18931, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 8611, 11, 4637, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 198, 6738, 42625, 14208, 13, 26791, 13, 2435, 11340, 1330, 1957, 2435, 198, 198, 6738, 8262, 13, 31438, 13...
2.676983
517
from flask import render_template
[ 6738, 42903, 1330, 8543, 62, 28243, 628, 198 ]
4.5
8
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '.\hastakayit_gui.ui' # # Created by: PyQt5 UI code generator 5.11.3 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets import mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt, QDate, QDateTime # Veritaban balants iin sql cmlecii oluturuldu. db = mysql.connector.connect( host="localhost", user="root", passwd="12345", database="cilth_vt" ) cursor = db.cursor() if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow2() ui.setupUi2(MainWindow) MainWindow.show() sys.exit(app.exec_())
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 5178, 7822, 7560, 422, 3555, 334, 72, 2393, 45302, 59, 71, 459, 461, 323, 270, 62, 48317, 13, 9019, 6, 198, 2, 198, 2, 15622, 416, 25, 9485, 48, 83, 20, ...
2.450617
324
#!/usr/bin/env python3 ''' Copyright (c) 2021, Collins Aerospace. Developed with the sponsorship of Defense Advanced Research Projects Agency (DARPA). Permission is hereby granted, free of charge, to any person obtaining a copy of this data, including any software or models in source or binary form, as well as any drawings, specifications, and documentation (collectively &quot;the Data&quot;), to deal in the Data without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Data, and to permit persons to whom the Data is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Data. THE DATA IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. ''' import os import re import sys from github3 import GitHub from pprint import pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\d+\.\d+\.\d+(-(\d{12}))?-.*') if __name__ == '__main__': manage_daily_builds(sys.argv[1])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 7061, 6, 198, 15269, 357, 66, 8, 33448, 11, 14006, 43226, 13, 198, 19246, 276, 351, 262, 27418, 286, 5947, 13435, 4992, 29898, 7732, 357, 35, 1503, 4537, 737, 198, 198, 5990...
3.153982
565
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Test components of specific crowdsourcing tasks. """ import json import os import unittest import pandas as pd import parlai.utils.testing as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import check_stdout except ImportError: pass if __name__ == "__main__": unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, 287,...
3.103774
212
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Chatham Financial <oss@chathamfinancial.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rabbitmq_plugin short_description: Adds or removes plugins to RabbitMQ description: - Enables or disables RabbitMQ plugins version_added: "1.1" author: Chris Hoffman options: names: description: - Comma-separated list of plugin names required: true default: null aliases: [name] new_only: description: - Only enable missing plugins - Does not disable plugins that are not in the names list required: false default: "no" choices: [ "yes", "no" ] state: description: - Specify if plugins are to be enabled or disabled required: false default: enabled choices: [enabled, disabled] prefix: description: - Specify a custom install prefix to a Rabbit required: false version_added: "1.3" default: null ''' EXAMPLES = ''' # Enables the rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled ''' # import module snippets from ansible.module_utils.basic import * main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 357, 66, 8, 2211, 11, 609, 37520, 11302, 1279, 793, 31, 354, 37520, 46921, 13, 785, 29, 198, 2, 198, 2, 770, 239...
3.131579
570
# Copyright 2016 - Nokia, ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain from six.moves import reduce from oslo_log import log from vitrage.common.constants import DatasourceProperties as DSProps from vitrage.common.constants import GraphAction from vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static import StaticFields from vitrage.utils import file as file_utils LOG = log.getLogger(__name__)
[ 2, 15069, 1584, 532, 26182, 11, 1168, 9328, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 74...
3.611307
283
import numpy as np
[ 11748, 299, 32152, 355, 45941, 628 ]
3.333333
6
import sys
[ 11748, 25064, 198 ]
3.666667
3
#!/usr/bin/python3 # vim:se tw=0 sts=4 ts=4 et ai: """ Copyright 2014 Osamu Aoki Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import argparse import os import pwd import sys import time import debmake.read ########################################################################### # undefined environment variable -> '' ####################################################################### # Initialize parameters ####################################################################### ####################################################################### # Test code ####################################################################### if __name__ == '__main__': for p, v in para().items(): print("para['{}'] = \"{}\"".format(p,v))
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 43907, 25, 325, 665, 28, 15, 39747, 28, 19, 40379, 28, 19, 2123, 257, 72, 25, 198, 37811, 198, 15269, 220, 1946, 8834, 321, 84, 317, 18228, 198, 198, 5990, 3411, 318, 29376, 7520, ...
4.200483
414
from django.core.exceptions import NON_FIELD_ERRORS from rest_framework import status, viewsets, serializers from rest_framework.decorators import list_route from rest_framework.response import Response from rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group import GroupFilter from jet_django.pagination import CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) for action in build_actions: decorator = list_route(methods=['post']) route = decorator(route) setattr(Viewset, action._meta.name, route) return Viewset
[ 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 44521, 62, 44603, 62, 24908, 50, 198, 6738, 1334, 62, 30604, 1330, 3722, 11, 5009, 1039, 11, 11389, 11341, 198, 6738, 1334, 62, 30604, 13, 12501, 273, 2024, 1330, 1351, 62, 38629, 1...
3.248408
314