content stringlengths 1 1.05M | input_ids listlengths 1 883k | ratio_char_token float64 1 22.9 | token_count int64 1 883k |
|---|---|---|---|
bluelabs_format_hints = {
'field-delimiter': ',',
'record-terminator': "\n",
'compression': 'GZIP',
'quoting': None,
'quotechar': '"',
'doublequote': False,
'escape': '\\',
'encoding': 'UTF8',
'dateformat': 'YYYY-MM-DD',
'timeonlyformat': 'HH24:MI:SS',
'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF',
'datetimeformat': 'YYYY-MM-DD HH24:MI:SS',
'header-row': False,
}
csv_format_hints = {
'field-delimiter': ',',
'record-terminator': "\n",
'compression': 'GZIP',
'quoting': 'minimal',
'quotechar': '"',
'doublequote': True,
'escape': None,
'encoding': 'UTF8',
'dateformat': 'MM/DD/YY',
'timeonlyformat': 'HH24:MI:SS',
'datetimeformattz': 'MM/DD/YY HH24:MI',
'datetimeformat': 'MM/DD/YY HH24:MI',
'header-row': True,
}
vertica_format_hints = {
'field-delimiter': '\001',
'record-terminator': '\002',
'compression': None,
'quoting': None,
'quotechar': '"',
'doublequote': False,
'escape': None,
'encoding': 'UTF8',
'dateformat': 'YYYY-MM-DD',
'timeonlyformat': 'HH24:MI:SS',
'datetimeformat': 'YYYY-MM-DD HH:MI:SS',
'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF',
'header-row': False,
}
| [
65,
2290,
417,
8937,
62,
18982,
62,
71,
29503,
796,
1391,
198,
220,
220,
220,
705,
3245,
12,
12381,
320,
2676,
10354,
46083,
3256,
198,
220,
220,
220,
705,
22105,
12,
23705,
1352,
10354,
37082,
77,
1600,
198,
220,
220,
220,
705,
558... | 2.120069 | 583 |
# FROM THE OP PAPER-ISH
MINI_BATCH_SIZE = 32
MEMORY_SIZE = 10**6
BUFFER_SIZE = 100
LHIST = 4
GAMMA = 0.99
UPDATE_FREQ_ONlINE = 4
UPDATE_TARGET = 2500 # This was 10**4 but is measured in actor steps, so it's divided update_freq_online
TEST_FREQ = 5*10**4 # Measure in updates
TEST_STEPS = 10**4
LEARNING_RATE = 0.00025
G_MOMENTUM = 0.95
EPSILON_INIT = 1.0
EPSILON_FINAL = 0.1
EPSILON_TEST = 0.05
EPSILON_LIFE = 10**6
REPLAY_START = 5*10**4
NO_OP_MAX = 30
UPDATES = 5*10**6
CLIP_REWARD = 1.0
CLIP_ERROR = 1.0
# MISC
PLAY_STEPS = 3000
BUFFER_SAMPLES = 20
CROP = (0, -1)
FRAMESIZE = [84,84]
FRAMESIZETP = (84,84)
#DROPS = [0.0,0.15,0.1,0.0]
DROPS = [0.0, 0.0, 0.0, 0.0]
Games = ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'pong',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest',
'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'zaxxon']
GamesExtras = ['defender','phoenix','berzerk','skiing','yars_revenge','solaris','pitfall',]
ACTION_MEANING = {
0: "NOOP",
1: "FIRE",
2: "UP",
3: "RIGHT",
4: "LEFT",
5: "DOWN",
6: "UPRIGHT",
7: "UPLEFT",
8: "DOWNRIGHT",
9: "DOWNLEFT",
10: "UPFIRE",
11: "RIGHTFIRE",
12: "LEFTFIRE",
13: "DOWNFIRE",
14: "UPRIGHTFIRE",
15: "UPLEFTFIRE",
16: "DOWNRIGHTFIRE",
17: "DOWNLEFTFIRE",
} | [
2,
16034,
3336,
13349,
350,
2969,
1137,
12,
18422,
198,
23678,
40,
62,
33,
11417,
62,
33489,
796,
3933,
198,
44,
3620,
15513,
62,
33489,
796,
838,
1174,
21,
198,
19499,
45746,
62,
33489,
796,
1802,
198,
43,
39,
8808,
796,
604,
198,
... | 2.07605 | 881 |
import os
import json
import logging
from collections import defaultdict
import numpy as np
import networkx as nx
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from tqdm import tqdm
from milieu.util.util import place_on_cpu, place_on_gpu
from milieu.paper.methods.method import DPPMethod
| [
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
28034,
220,
198,
6738,
28034,
13,
26791,
13,
7890,
13... | 3.367347 | 98 |
import os
import distutils.util
from telegram import Update
from telegram.ext import Updater, CommandHandler, Filters, CallbackContext
from dotenv import load_dotenv
from scripts.get_info import get_ratio
from scripts.terra import get_balances, execute_swap
load_dotenv(override=True)
notify_telegram = bool(distutils.util.strtobool(os.getenv("NOTIFY_TELEGRAM")))
if notify_telegram:
telegram_chat_id = int(os.getenv("TELEGRAM_CHAT_ID"))
token = os.getenv("TELEGRAM_TOKEN")
def ping_command(update: Update, context: CallbackContext) -> None:
"""Send a message when the command /ping is issued."""
update.message.reply_text("pong")
def help_command(update: Update, context: CallbackContext) -> None:
"""Send list of commands when /help is issued."""
update.message.reply_text(
"Commands:\n/ping check if thebot is online\n/luna get the bluna -> luna ratio\n/bluna get the luna -> bluna ratio\n/ust get the ust ratio\n/balance get the balances\n/swap_to_bluna_command to force a swap from luna to bluna\n/swap_to_luna_command to force a swap from bluna to luna"
)
def bluna_command(update: Update, context: CallbackContext) -> None:
"""Send the current luna to bluna ratio."""
luna_balance, bluna_balance, ust_balance = get_balances(notify_balance=False)
bluna_price = get_ratio("bluna", luna_balance)
update.message.reply_text(f"Luna -> bLuna ratio: {bluna_price}")
def luna_command(update: Update, context: CallbackContext) -> None:
"""Send the current luna to bluna ratio."""
luna_balance, bluna_balance, ust_balance = get_balances(notify_balance=False)
bluna_price = get_ratio("luna", bluna_balance)
update.message.reply_text(f"bLuna -> Luna ratio: {bluna_price}")
def ust_command(update: Update, context: CallbackContext) -> None:
"""Send the current luna to bluna ratio."""
luna_balance, bluna_balance, ust_balance = get_balances(notify_balance=False)
ust_price = get_ratio("ust", luna_balance)
update.message.reply_text(f"Luna -> UST price: {ust_price}")
def balance_command(update: Update, context: CallbackContext) -> None:
"""Send the current balances of the account."""
get_balances()
def swap_to_bluna_command(update: Update, context: CallbackContext) -> None:
"""Force swap to bluna."""
luna_balance, bluna_balance, ust_balance = get_balances()
price = get_ratio("bluna", luna_balance)
if luna_balance > 0 and ust_balance > 0.15:
execute_swap(luna_balance, "bluna", price)
else:
raise Exception(f"Not enough Luna {luna_balance} or UST {ust_balance}")
def swap_to_luna_command(update: Update, context: CallbackContext) -> None:
"""Force swap to luna."""
luna_balance, bluna_balance, ust_balance = get_balances()
price = get_ratio("luna", bluna_balance)
if bluna_balance > 0 and ust_balance > 0.15:
execute_swap(bluna_balance, "luna", price)
else:
raise Exception(f"Not enough bLuna {bluna_balance} or UST {ust_balance}")
| [
11748,
28686,
198,
11748,
1233,
26791,
13,
22602,
198,
198,
6738,
573,
30536,
1330,
10133,
198,
6738,
573,
30536,
13,
2302,
1330,
3205,
67,
729,
11,
9455,
25060,
11,
7066,
1010,
11,
4889,
1891,
21947,
198,
6738,
16605,
24330,
1330,
3440... | 2.718272 | 1,111 |
import os
import zipfile
import requests
def check_and_download(name, google_id, files=None, force_download=False):
"""
Checks if the meshes folder exists in the xml directory
If not it will ask the user if they want to download them
to be able to proceed
Parameters
----------
name: string
the file or directory to download
google_id: string
the google id that points to the location of the zip file.
This should be stored in the xml or config file
force_download: boolean, Optional (Default: False)
True to skip checking if the file or folder exists
"""
files_missing = False
if force_download:
files_missing = True
else:
# check if the provided name is a file or folder
if not os.path.isfile(name) and not os.path.isdir(name):
print("Checking for mesh files in : ", name)
files_missing = True
elif files is not None:
mesh_files = [
f for f in os.listdir(name) if os.path.isfile(os.path.join(name, f))
]
# files_missing = all(elem in sorted(mesh_files) for elem in sorted(files))
files_missing = set(files).difference(set(mesh_files))
if files_missing:
print("Checking for mesh files in : ", name)
print("The following files are missing: ", files_missing)
if files_missing:
yes = ["y", "yes"]
no = ["n", "no"]
answered = False
question = "Download mesh and texture files to run sim? (y/n): "
while not answered:
reply = str(input(question)).lower().strip()
if reply[0] in yes:
print("Downloading files...")
name = name.split("/")
name = "/".join(s for s in name[:-1])
download_files(google_id, name + "/tmp")
print("Sim files saved to %s" % name)
answered = True
elif reply[0] in no:
raise Exception("Please download the required files to run the demo")
else:
question = "Please Enter (y/n) "
if __name__ == "__main__":
check_and_download('/home/zhimin/code/6_all_old_code/0_abr_control/abr_control/arms/',
'1doam-DgkW7OSPnwWZQM84edzX84ot-GK', files=None, force_download=True)
# download_files('1SjWRUl-D1FZ5fB2cy4jF4X9wTsQ5LWzo', '/home/zhimin/code/6_all_old_code/0_abr_control/abr_control/arms')
| [
11748,
28686,
198,
11748,
19974,
7753,
198,
11748,
7007,
628,
198,
4299,
2198,
62,
392,
62,
15002,
7,
3672,
11,
23645,
62,
312,
11,
3696,
28,
14202,
11,
2700,
62,
15002,
28,
25101,
2599,
198,
197,
37811,
198,
197,
7376,
4657,
611,
2... | 2.66127 | 803 |
import os
import uuid
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
| [
11748,
28686,
198,
11748,
334,
27112,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
62... | 3.404762 | 42 |
__author__ = 'Chong-U Lim, culim@mit.edu'
import uinput
| [
834,
9800,
834,
796,
705,
1925,
506,
12,
52,
7576,
11,
10845,
320,
31,
2781,
13,
15532,
6,
198,
198,
11748,
334,
15414,
628,
628,
628,
628,
628
] | 2.357143 | 28 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
import sys
_EXCLUDED_PATHS = (
r'(.*[\\/])?\.git[\\/].*',
r'.+\.png$',
r'.+\.svg$',
r'.+\.skp$',
r'.+\.gypi$',
r'.+\.gyp$',
r'.+\.gn$',
r'.*\.gitignore$',
r'.*codereview.settings$',
r'.*AUTHOR$',
r'^CONTRIBUTORS\.md$',
r'.*LICENSE$',
r'.*OWNERS$',
r'.*README\.md$',
r'^dashboard[\\/]dashboard[\\/]api[\\/]examples[\\/].*.js',
r'^dashboard[\\/]dashboard[\\/]templates[\\/].*',
r'^experimental[\\/]heatmap[\\/].*',
r'^experimental[\\/]trace_on_tap[\\/]third_party[\\/].*',
r'^perf_insights[\\/]test_data[\\/].*',
r'^perf_insights[\\/]third_party[\\/].*',
r'^telemetry[\\/]third_party[\\/].*',
r'^third_party[\\/].*',
r'^tracing[\\/]\.allow-devtools-save$',
r'^tracing[\\/]bower\.json$',
r'^tracing[\\/]\.bowerrc$',
r'^tracing[\\/]tracing_examples[\\/]string_convert\.js$',
r'^tracing[\\/]test_data[\\/].*',
r'^tracing[\\/]third_party[\\/].*',
r'^py_vulcanize[\\/]third_party[\\/].*',
r'^common/py_vulcanize[\\/].*', # TODO(hjd): Remove after fixing long lines.
)
_GITHUB_BUG_ID_RE = re.compile(r'#[1-9]\d*')
_MONORAIL_BUG_ID_RE = re.compile(r'[1-9]\d*')
_MONORAIL_PROJECT_NAMES = frozenset({'chromium', 'v8', 'angleproject'})
| [
2,
15069,
1853,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
37811... | 2.050826 | 787 |
# author: jamie
# email: jinjiedeng.jjd@gmail.com
if __name__ == "__main__":
infix = input()
T = int(input())
for _ in range(T):
value = list(map(int, input().split()))
postfix = []
InfixToPostfix(infix, postfix)
print(Evaluate(postfix, value)) | [
2,
1772,
25,
16853,
494,
198,
2,
3053,
25,
474,
259,
73,
798,
1516,
13,
41098,
67,
31,
14816,
13,
785,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1167,
844,
796,
5128,
3419,
198,
220,
... | 2.140741 | 135 |
from django.contrib import admin
from .models import Target, Exercise, Workout
# Register your models here.
admin.site.register(Target)
admin.site.register(Exercise)
admin.site.register(Workout)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
201,
198,
6738,
764,
27530,
1330,
12744,
11,
32900,
11,
5521,
448,
201,
198,
201,
198,
2,
17296,
534,
4981,
994,
13,
201,
198,
28482,
13,
15654,
13,
30238,
7,
21745,
8,
201,
198,
28482... | 3.171875 | 64 |
Ciclo().nuevoCiclo() | [
198,
34,
291,
5439,
22446,
77,
518,
13038,
34,
291,
5439,
3419
] | 1.75 | 12 |
import rumps
import sys
import icon_manager
from datetime import timedelta
import timekeeper
import os
# pyinstaller --onefile -w --add-data "Icons/:Icons" --icon="Icons/timeglass.png" --clean timeglass.spec
# rumps.debug_mode(True)
if __name__ == "__main__":
default_secounds = 60 * 60
TimerApp(default_secounds).run()
| [
11748,
7440,
862,
198,
11748,
25064,
198,
11748,
7196,
62,
37153,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
640,
13884,
198,
11748,
28686,
198,
198,
2,
12972,
17350,
263,
1377,
505,
7753,
532,
86,
1377,
2860,
12,
7890,
366,... | 2.82906 | 117 |
import os
from discord.ext.commands import Bot
from Flare import Flare
bot = Bot("~~")
bot.add_cog(Flare(bot))
bot.run(os.environ.get("BOT_TOKEN"))
| [
11748,
28686,
198,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
1330,
18579,
198,
6738,
39833,
1330,
39833,
198,
198,
13645,
796,
18579,
7203,
4907,
4943,
198,
198,
13645,
13,
2860,
62,
66,
519,
7,
7414,
533,
7,
13645,
4008,
628,
198,
... | 2.52459 | 61 |
import logging
import os
import sys
import time
project_name = os.getcwd().split('/')[-1]
_logger = logging.getLogger(project_name)
_logger.addHandler(logging.StreamHandler())
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
628,
198,
16302,
62,
3672,
796,
28686,
13,
1136,
66,
16993,
22446,
35312,
10786,
14,
11537,
58,
12,
16,
60,
198,
62,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7... | 2.875 | 64 |
import setuptools
ver = {}
with open('OpenControl/_version.py') as fd:
exec(fd.read(), ver)
version = ver.get('__version__')
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="OpenControl",
version=version,
author="VNOpenAI",
author_email="phi9b2@gmail.com",
description="A python control systems package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://opencontrol.readthedocs.io/en/latest/",
project_urls={
"Bug Tracker": "https://github.com/VNOpenAI/OpenControl/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.7",
) | [
11748,
900,
37623,
10141,
198,
198,
332,
796,
23884,
198,
4480,
1280,
10786,
11505,
15988,
47835,
9641,
13,
9078,
11537,
355,
277,
67,
25,
198,
220,
220,
220,
220,
220,
220,
220,
2452,
7,
16344,
13,
961,
22784,
3326,
8,
198,
9641,
7... | 2.578947 | 342 |
'''See the shared Google Drive documentation for an inheritance diagram that
shows the relationships between the classes defined in this file.
'''
import numpy as np
import socket
import time
from riglib import source
from ismore import settings, udp_feedback_client
import ismore_bmi_lib
from utils.constants import *
#import armassist
#import rehand
from riglib.filter import Filter
from riglib.plants import Plant
import os
################################################
UDP_PLANT_CLS_DICT = {
'ArmAssist': ArmAssistPlantUDP,
'ReHand': ReHandPlantUDP,
'IsMore': IsMorePlantUDP,
'IsMoreEMGControl': IsMorePlantEMGControl,
'IsMoreHybridControl': IsMorePlantHybridBMI,
'IsMorePlantHybridBMISoftSafety': IsMorePlantHybridBMISoftSafety,
'DummyPlant': DummyPlantUDP,
}
###########################
##### Deprecated code #####
###########################
| [
7061,
6,
6214,
262,
4888,
3012,
9974,
10314,
329,
281,
24155,
16362,
326,
198,
49596,
262,
6958,
1022,
262,
6097,
5447,
287,
428,
2393,
13,
198,
7061,
6,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
17802,
198,
11748,
640,
198... | 3.089041 | 292 |
#-*- coding: utf-8 -*-
""" EOSS catalog system
functionality for the catalog endpoint
"""
from utilities.web_utils import remote_file_exists
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
import datetime
import ujson
import time
import dateparser
import falcon
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import csv
from xlsxwriter import Workbook
from dateutil.parser import parse
import numpy
from sqlalchemy import and_
import logging
from collections import defaultdict
from model.orm import Catalog_Dataset, Spatial_Reference
from api import General_Structure
from .db_calls import Persistance
from . import getKeysFromDict
from .tools import get_base_url, can_zip_response, compress_body, serialize, make_GeoJson
from api_logging import logger
GRID_SYSTEMS = {'Sentinel - 2A': 10,
'LANDSAT_ETM': 11,
'LANDSAT_ETM_SLC_OFF': 11,
'OLI_TIRS': 11,
'TIRS': 11}
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
412,
18420,
18388,
1080,
198,
11244,
329,
262,
18388,
36123,
198,
37811,
198,
6738,
20081,
13,
12384,
62,
26791,
1330,
6569,
62,
7753,
62,
1069,
1023,
198,
19... | 2.534836 | 488 |
import logging
import pytest
from ocs_ci.framework.testlib import tier1, skipif_ui_not_support, ui
from ocs_ci.ocs.ui.pvc_ui import PvcUI
from ocs_ci.framework.testlib import skipif_ocs_version
from ocs_ci.framework.pytest_customization.marks import green_squad
from ocs_ci.ocs.resources.pvc import get_all_pvc_objs, get_pvc_objs
from ocs_ci.ocs import constants
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import wait_for_resource_state, create_unique_resource_name
from ocs_ci.utility.utils import get_ocp_version
from ocs_ci.ocs.ui.views import locators
from ocs_ci.ocs.resources.pod import get_fio_rw_iops
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
12972,
9288,
198,
198,
6738,
267,
6359,
62,
979,
13,
30604,
13,
9288,
8019,
1330,
14249,
16,
11,
14267,
361,
62,
9019,
62,
1662,
62,
11284,
11,
334,
72,
198,
6738,
267,
6359,
62,
979,
13,
420,
82,
13,
901... | 2.697581 | 248 |
import pandas as pd
from feature_generation.datasets.Timeseries import Timeseries
from os.path import basename
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
3895,
62,
20158,
13,
19608,
292,
1039,
13,
28595,
10640,
1330,
3782,
10640,
198,
6738,
28686,
13,
6978,
1330,
1615,
12453,
628
] | 3.733333 | 30 |
from sys import exit
from colour import *
from rs.reaction_system import ReactionSystem
# EOF
| [
6738,
25064,
1330,
8420,
198,
6738,
9568,
1330,
1635,
198,
198,
6738,
44608,
13,
260,
2673,
62,
10057,
1330,
39912,
11964,
628,
628,
198,
2,
412,
19238,
198
] | 3.535714 | 28 |
import os
import time
import fire
import torch
import random
import numpy as np
import pandas as pd
import torch.nn.functional as F
## to detach from monitor
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from trainer import Trainer
from model import ConvNet1D
from helpers import get_dataset, test_epoch, ready, save_obj, load_obj
def read_interim_data(file_name, index_col='Policy_Number'):
'''
In: file_name
Out: interim_data
Description: read data from directory /data/interim
'''
# set the path of raw data
interim_data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.path.pardir, os.path.pardir, 'data', 'interim'
)
file_path = os.path.join(interim_data_path, file_name)
interim_data = pd.read_csv(file_path, index_col=index_col)
return(interim_data)
def write_precessed_data(df, suffix=None):
'''
In:
DataFrame(df),
str(file_name),
Out:
None
Description:
Write sample data to directory /data/interim
'''
precessed_data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.path.pardir, os.path.pardir, 'data', 'processed'
)
if suffix is None:
file_name = 'testing-set.csv'
else:
file_name = 'testing-set_{}.csv'.format(suffix)
write_sample_path = os.path.join(precessed_data_path, file_name)
df.to_csv(write_sample_path)
# empirical scale: weight_decay=0.0001
if __name__ == '__main__':
# Example usage: "python nn_train.py --epochs 100"
fire.Fire(demo) | [
11748,
28686,
201,
198,
11748,
640,
201,
198,
11748,
2046,
201,
198,
11748,
28034,
201,
198,
11748,
4738,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
... | 2.315568 | 713 |
import unittest
from src.fetchers.hvdcLineCktOwnersFetcher import getOwnersForHvdcLineCktIds
import datetime as dt
from src.appConfig import getConfig
| [
11748,
555,
715,
395,
198,
6738,
12351,
13,
34045,
3533,
13,
71,
85,
17896,
13949,
34,
21841,
23858,
364,
37,
316,
2044,
1330,
651,
23858,
364,
1890,
39,
85,
17896,
13949,
34,
21841,
7390,
82,
198,
11748,
4818,
8079,
355,
288,
83,
1... | 2.923077 | 52 |
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from __future__ import print_function
'''
>>> from iterator_ext import *
>>> from input_iterator import *
>>> x = list_int()
>>> x.push_back(1)
>>> x.back()
1
>>> x.push_back(3)
>>> x.push_back(5)
>>> for y in x:
... print(y)
1
3
5
>>> z = range(x)
>>> for y in z:
... print(y)
1
3
5
Range2 wraps a transform_iterator which doubles the elements it
traverses. This proves we can wrap input iterators
>>> z2 = range2(x)
>>> for y in z2:
... print(y)
2
6
10
>>> l2 = two_lists()
>>> for y in l2.primes:
... print(y)
2
3
5
7
11
13
>>> for y in l2.evens:
... print(y)
2
4
6
8
10
12
>>> ll = list_list()
>>> ll.push_back(x)
>>> x.push_back(7)
>>> ll.push_back(x)
>>> for a in ll: #doctest: +NORMALIZE_WHITESPACE
... for b in a:
... print(b, end='')
... print('')
...
1 3 5
1 3 5 7
'''
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
| [
2,
15069,
3271,
16660,
82,
5472,
13,
4307,
6169,
739,
262,
19835,
198,
2,
10442,
13789,
11,
10628,
352,
13,
15,
13,
357,
6214,
19249,
198,
2,
2393,
38559,
24290,
62,
16,
62,
15,
13,
14116,
393,
4866,
379,
2638,
1378,
2503,
13,
395... | 2.287402 | 508 |
# -*- Python -*-
# license
# license.
# ======================================================================
"""Looks name up in the [geonames database](http://www.geonames.org/).
[GeoNames Search Webservice API](http://www.geonames.org/export/geonames-search.html)
"""
import sys, os, urllib.request, json, time
from pathlib import Path
import logging; module_logger = logging.getLogger(__name__)
from .utilities import is_chinese
# ======================================================================
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ======================================================================
### Local Variables:
### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer))
### End:
| [
2,
532,
9,
12,
11361,
532,
9,
12,
198,
2,
5964,
198,
2,
5964,
13,
198,
2,
38093,
1421,
28,
198,
198,
37811,
41102,
1438,
510,
287,
262,
685,
6281,
1047,
6831,
16151,
4023,
1378,
2503,
13,
6281,
1047,
13,
2398,
14,
737,
198,
58,
... | 5.925234 | 214 |
import subprocess
import sys
import django.conf
import django.utils.encoding
subprocess.Popen
sys.argv
plt.<caret> | [
11748,
850,
14681,
198,
11748,
25064,
198,
198,
11748,
42625,
14208,
13,
10414,
198,
11748,
42625,
14208,
13,
26791,
13,
12685,
7656,
198,
198,
7266,
14681,
13,
47,
9654,
198,
198,
17597,
13,
853,
85,
198,
489,
83,
29847,
6651,
83,
29... | 2.785714 | 42 |
# 271. Encode and Decode Strings
'''
Design an algorithm to encode a list of strings to a string. The encoded string is then sent over the network and is decoded back to the original list of strings.
Machine 1 (sender) has the function:
string encode(vector<string> strs) {
// ... your code
return encoded_string;
}
Machine 2 (receiver) has the function:
vector<string> decode(string s) {
//... your code
return strs;
}
So Machine 1 does:
string encoded_string = encode(strs);
and Machine 2 does:
vector<string> strs2 = decode(encoded_string);
strs2 in Machine 2 should be the same as strs in Machine 1.
Implement the encode and decode methods.
Note:
The string may contain any possible characters out of 256 valid ascii characters. Your algorithm should be generalized enough to work on any possible characters.
Do not use class member/global/static variables to store states. Your encode and decode algorithms should be stateless.
Do not rely on any library method such as eval or serialize methods. You should implement your own encode/decode algorithm.
'''
from common import *
'''
Encode to numbers and decode from numbers.
O(N) runtime for both encode and decode, in which N is total # of characters in strs. O(N) storage.
Beat 5% runtime, 29% storage of all Leetcode submissions.
'''
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs))
# Tests.
codec = Codec()
strs = ['Great','Nice']
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = ['{}leading'.format(chr(0)),'Nice']
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = ['{}l:eadi.ng'.format(chr(0)),'{}leading,{}'.format(chr(0),chr(1))]
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = []
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
| [
2,
33797,
13,
2039,
8189,
290,
4280,
1098,
4285,
654,
198,
7061,
6,
198,
23067,
281,
11862,
284,
37773,
257,
1351,
286,
13042,
284,
257,
4731,
13,
383,
30240,
4731,
318,
788,
1908,
625,
262,
3127,
290,
318,
875,
9043,
736,
284,
262,... | 3.180272 | 588 |
from typing import Optional
import xml.etree.ElementTree as ET
from ...xml.XmlReader import XmlReader as XR
from ..namespaces import API
from ..namespaces import DATA
from ...deserialization.create_enum import create_enum
from ..dto.InvoiceNumberQuery import InvoiceNumberQuery
from ..dto.InvoiceDirection import InvoiceDirection
| [
6738,
19720,
1330,
32233,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
6738,
2644,
19875,
13,
55,
4029,
33634,
1330,
1395,
4029,
33634,
355,
1395,
49,
198,
6738,
11485,
14933,
43076,
1330,
7824,
198,
6738,
11485,
... | 3.719101 | 89 |
# Script to generate the necessary grammar rules for the
# markov generator output type
# Dataset:
# http://www.drmaciver.com/2009/12/i-want-one-meelyun-sentences/
import re
ALPHA = ' abcdefghijklmnopqrstuvwxyz'
# read data from file
with open('sentences', 'r', encoding="utf8") as f:
content = f.read().splitlines()
n = len(content)
freq = {}
# process sentences
for i in range(n):
content[i] = re.sub('[^a-z]+', ' ', content[i].lower())
for word in content[i].split(' '):
if len(word) < 1: continue
word = ' ' + word + ' '
# sum up next-letter frequencies
pc = ''
for j in range(len(word) - 1):
c = word[j]
if pc != ' ': c = pc + c
nc = word[j+1]
if c not in freq:
freq[c] = {}
for a in ALPHA:
freq[c][a] = 0
freq[c][nc] += 1
pc = word[j]
# normalize frequencies
for c, d in freq.items():
sum_ = sum(d.values())
for nc in d:
d[nc] /= sum_
# helper functions for printing rulesets
# print rulesets
for c, d in freq.items():
rule = make_name(c) + '='
pc = c[:-1]
c = c[-1]
for nc in d:
if d[nc] <= 0.0055: continue
mult = max(1, int(d[nc] / 0.01))
rule += make_option(pc, c, nc) * mult
print(rule[:-1])
| [
2,
12327,
284,
7716,
262,
3306,
23491,
3173,
329,
262,
198,
2,
1317,
709,
17301,
5072,
2099,
198,
198,
2,
16092,
292,
316,
25,
198,
2,
2638,
1378,
2503,
13,
7109,
20285,
1428,
13,
785,
14,
10531,
14,
1065,
14,
72,
12,
42949,
12,
... | 2.011923 | 671 |
# -*- encoding: utf-8 -*-
import dsl
from shapely.wkt import loads as wkt_loads
from . import FixtureTest
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
288,
6649,
198,
6738,
5485,
306,
13,
86,
21841,
1330,
15989,
355,
266,
21841,
62,
46030,
198,
198,
6738,
764,
1330,
376,
9602,
14402,
628
] | 2.7 | 40 |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.extractor import FrontExtractorOp
| [
2,
15069,
357,
34,
8,
2864,
12,
1238,
1828,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
6738,
1280,
85,
2879,
13,
31391,
13,
5908,
13,
8534,
13,
11321,
13,
47172,
62,
259,
223... | 3.061947 | 113 |
# -*- coding: utf-8 -*-
# mk42
# mk42/apps/users/migrations/0003_auto_20170614_0038.py
# Generated by Django 1.11.2 on 2017-06-14 00:38
from __future__ import unicode_literals
from django.db import (
migrations,
models,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
33480,
3682,
198,
2,
33480,
3682,
14,
18211,
14,
18417,
14,
76,
3692,
602,
14,
830,
18,
62,
23736,
62,
1264,
35402,
1415,
62,
405,
2548,
13,
9078,
198,
198,
... | 2.303922 | 102 |
import pickle
import pandas as pd
import torch
import torch.nn as nn
import torchvision.transforms as T
from torch.utils import data
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from datasets.celeba import CelebA1000
from datasets.facescrub import FaceScrub
from datasets.stanford_dogs import StanfordDogs
| [
11748,
2298,
293,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
309,
198,
6738,
28034,
13,
26791,
1330,
1366,
198,
6738,
28034,
... | 3.453782 | 119 |
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.FluidDynamicsApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
from KratosMultiphysics.MeshingApplication import *
import KratosMultiphysics.MappingApplication as KratosMapping
# In this example two domains are solved, a coarse background mesh and a fine mesh around
# an obstacle. The fine domain receives the values from the coarse domain as input on it's boundary
######################################################################################
######################################################################################
######################################################################################
##PARSING THE PARAMETERS
#import define_output
parameter_file_background = open("ProjectParameters_Background.json",'r')
Projectparameters_BG = Parameters( parameter_file_background.read())
parameter_file_bodyfitted = open("ProjectParameters_BodyFitted.json",'r')
Projectparameters_BF = Parameters( parameter_file_bodyfitted.read())
## Fluid model part definition
main_model_part_bg = ModelPart(Projectparameters_BG["problem_data"]["model_part_name"].GetString())
main_model_part_bg.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BG["problem_data"]["domain_size"].GetInt())
main_model_part_bf = ModelPart(Projectparameters_BF["problem_data"]["model_part_name"].GetString())
main_model_part_bf.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BF["problem_data"]["domain_size"].GetInt())
###TODO replace this "model" for real one once available
Model_BG = {Projectparameters_BG["problem_data"]["model_part_name"].GetString() : main_model_part_bg}
Model_BF = {Projectparameters_BF["problem_data"]["model_part_name"].GetString() : main_model_part_bf}
## Solver construction
solver_module = __import__(Projectparameters_BG["solver_settings"]["solver_type"].GetString())
solver_bg = solver_module.CreateSolver(main_model_part_bg, Projectparameters_BG["solver_settings"])
solver_bg.AddVariables()
solver_module = __import__(Projectparameters_BF["solver_settings"]["solver_type"].GetString())
solver_bf = solver_module.CreateSolver(main_model_part_bf, Projectparameters_BF["solver_settings"])
solver_bf.AddVariables()
## Read the model - note that SetBufferSize is done here
solver_bg.ImportModelPart()
solver_bf.ImportModelPart()
## Add AddDofs
solver_bg.AddDofs()
solver_bf.AddDofs()
## Initialize GiD I/O
from gid_output_process import GiDOutputProcess
gid_output_bg = GiDOutputProcess(solver_bg.GetComputingModelPart(),
Projectparameters_BG["problem_data"]["problem_name"].GetString() ,
Projectparameters_BG["output_configuration"])
gid_output_bg.ExecuteInitialize()
gid_output_bf = GiDOutputProcess(solver_bf.GetComputingModelPart(),
Projectparameters_BF["problem_data"]["problem_name"].GetString() ,
Projectparameters_BF["output_configuration"])
gid_output_bf.ExecuteInitialize()
##here all of the allocation of the strategies etc is done
solver_bg.Initialize()
solver_bf.Initialize()
##TODO: replace MODEL for the Kratos one ASAP
## Get the list of the skin submodel parts in the object Model
for i in range(Projectparameters_BG["solver_settings"]["skin_parts"].size()):
skin_part_name = Projectparameters_BG["solver_settings"]["skin_parts"][i].GetString()
Model_BG.update({skin_part_name: main_model_part_bg.GetSubModelPart(skin_part_name)})
for i in range(Projectparameters_BF["solver_settings"]["skin_parts"].size()):
skin_part_name = Projectparameters_BF["solver_settings"]["skin_parts"][i].GetString()
Model_BF.update({skin_part_name: main_model_part_bf.GetSubModelPart(skin_part_name)})
## Get the list of the initial conditions submodel parts in the object Model
for i in range(Projectparameters_BF["initial_conditions_process_list"].size()):
initial_cond_part_name = Projectparameters_BF["initial_conditions_process_list"][i]["Parameters"]["model_part_name"].GetString()
Model_BF.update({initial_cond_part_name: main_model_part_bf.GetSubModelPart(initial_cond_part_name)})
## Processes construction
import process_factory
# "list_of_processes_bg" contains all the processes already constructed (boundary conditions, initial conditions and gravity)
# Note that the conditions are firstly constructed. Otherwise, they may overwrite the BCs information.
list_of_processes_bg = process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["initial_conditions_process_list"] )
list_of_processes_bg += process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["boundary_conditions_process_list"] )
list_of_processes_bf = process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["initial_conditions_process_list"] )
list_of_processes_bf += process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["boundary_conditions_process_list"] )
## Processes initialization
for process in list_of_processes_bg:
process.ExecuteInitialize()
for process in list_of_processes_bf:
process.ExecuteInitialize()
# Mapper initialization
mapper_settings_file = open("MapperSettings.json",'r')
Projectparameters_Mapper = Parameters( mapper_settings_file.read())["mapper_settings"]
inlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[0])
sides_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[1])
outlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[2])
## Stepping and time settings
Dt = Projectparameters_BG["problem_data"]["time_step"].GetDouble()
end_time = Projectparameters_BG["problem_data"]["end_time"].GetDouble()
time = 0.0
step = 0
out = 0.0
gid_output_bg.ExecuteBeforeSolutionLoop()
gid_output_bf.ExecuteBeforeSolutionLoop()
for process in list_of_processes_bg:
process.ExecuteBeforeSolutionLoop()
for process in list_of_processes_bf:
process.ExecuteBeforeSolutionLoop()
while(time <= end_time):
time = time + Dt
step = step + 1
main_model_part_bg.CloneTimeStep(time)
main_model_part_bf.CloneTimeStep(time)
print("STEP = ", step)
print("TIME = ", time)
if(step >= 3):
for process in list_of_processes_bg:
process.ExecuteInitializeSolutionStep()
for process in list_of_processes_bf:
process.ExecuteInitializeSolutionStep()
gid_output_bg.ExecuteInitializeSolutionStep()
gid_output_bf.ExecuteInitializeSolutionStep()
solver_bg.Solve()
inlet_mapper.Map(VELOCITY, VELOCITY)
sides_mapper.Map(VELOCITY, VELOCITY)
outlet_mapper.Map(VELOCITY, VELOCITY)
solver_bf.Solve()
for process in list_of_processes_bg:
process.ExecuteFinalizeSolutionStep()
for process in list_of_processes_bf:
process.ExecuteFinalizeSolutionStep()
gid_output_bg.ExecuteFinalizeSolutionStep()
gid_output_bf.ExecuteFinalizeSolutionStep()
#TODO: decide if it shall be done only when output is processed or not
for process in list_of_processes_bg:
process.ExecuteBeforeOutputStep()
for process in list_of_processes_bf:
process.ExecuteBeforeOutputStep()
if gid_output_bg.IsOutputStep():
gid_output_bg.PrintOutput()
gid_output_bf.PrintOutput()
for process in list_of_processes_bg:
process.ExecuteAfterOutputStep()
for process in list_of_processes_bf:
process.ExecuteAfterOutputStep()
out = out + Dt
for process in list_of_processes_bg:
process.ExecuteFinalize()
for process in list_of_processes_bf:
process.ExecuteFinalize()
gid_output_bg.ExecuteFinalize()
gid_output_bf.ExecuteFinalize()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
11,
7297,
1303,
49123,
509,
10366,
418,
15205,
13323,
23154,
19528,
11670,
351,
21015,
362,
13,
21,
290,
362,
13,
22,
198,
198,
6738,
509,
10366,
418,
15205,
13323,
2... | 2.651651 | 3,241 |
"""
Annotate VCF files with VRS
Input Format: VCF
Output Format: VCF
The user should pass arguments for the VCF input, VCF output, &
the vrs object file name.
ex. python3 src/ga4gh/vrs/extras/vcf_annotation.py input.vcf.gz --out
./output.vcf.gz --vrs-file ./vrs_objects.pkl
"""
import argparse
import sys
import pickle
import time
from biocommons.seqrepo import SeqRepo
import pysam
from ga4gh.vrs.dataproxy import SeqRepoDataProxy
from ga4gh.vrs.extras.translator import Translator
def parse_args(argv):
"""
Parses arguments passed in by the user
param: list[str] argv Arguments passed by the user to specify file locations and names
return: argparse.Namespace Returns the options passed by the user to be assigned to proper variables
"""
ap = argparse.ArgumentParser()
ap.add_argument("VCF_IN")
ap.add_argument("--out", "-o", default="-")
ap.add_argument("--vrs-file", default="-")
opts = ap.parse_args(argv)
return opts
if __name__ == "__main__":
start_time = time.time()
options = parse_args(sys.argv[1:])
print(f"These are the options that you have selected: {options}\n")
data_proxy = SeqRepoDataProxy(SeqRepo("/usr/local/share/seqrepo/latest"))
tlr = Translator(data_proxy)
vcf_annotator = VCFAnnotator(tlr)
vcf_annotator.annotate(options.VCF_IN, options.out, options.vrs_file)
end_time = time.time()
total_time = (float(end_time) - float(start_time))
total_time_minutes = (total_time / 60)
print(f"This program took {total_time} seconds to run.")
print(f"This program took {total_time_minutes} minutes to run.")
| [
37811,
198,
2025,
1662,
378,
569,
22495,
3696,
351,
6453,
50,
198,
198,
20560,
18980,
25,
569,
22495,
198,
26410,
18980,
25,
569,
22495,
198,
198,
464,
2836,
815,
1208,
7159,
329,
262,
569,
22495,
5128,
11,
569,
22495,
5072,
11,
1222,... | 2.672697 | 608 |
import glob
import hashlib
import logging
import os
import shutil
import subprocess
from functools import wraps
from tempfile import gettempdir
from threading import Thread
import requests
from timeout_decorator import timeout
from utils.constants import Constant
from utils.format import Format
logger = logging.getLogger('testrunner')
_stepdepth = 0
def collect_remote_logs(self, ip_address, logs, store_path):
"""
Collect logs from a remote machine
:param ip_address: (str) IP of the machine to collect the logs from
:param logs: (dict: list) The different logs to collect {"files": [], "dirs": [], ""services": []}
:param store_path: (str) Path to copy the logs to
:return: (bool) True if there was an error while collecting the logs
"""
logging_errors = False
for log in logs.get("files", []):
try:
self.scp_file(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for log in logs.get("dirs", []):
try:
self.rsync(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for service in logs.get("services", []):
try:
self.ssh_run(
ip_address, f"sudo journalctl -xeu {service} > {service}.log")
self.scp_file(ip_address, f"{service}.log", store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {service}.log from {ip_address}\n {ex}")
logging_errors = True
return logging_errors
def scp_file(self, ip_address, remote_file_path, local_file_path):
"""
Copies a remote file from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_file_path: (str) Path of the file to be copied
:param local_file_path: (str) Path where to store the log
:return:
"""
cmd = (f"scp {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}"
f" {self.conf.terraform.nodeuser}@{ip_address}:{remote_file_path} {local_file_path}")
self.runshellcommand(cmd)
def rsync(self, ip_address, remote_dir_path, local_dir_path):
"""
Copies a remote dir from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_dir_path: (str) Path of the dir to be copied
:param local_dir_path: (str) Path where to store the dir
:return:
"""
cmd = (f'rsync -avz --no-owner --no-perms -e "ssh {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}" '
f'--rsync-path="sudo rsync" --ignore-missing-args {self.conf.terraform.nodeuser}@{ip_address}:{remote_dir_path} '
f'{local_dir_path}')
self.runshellcommand(cmd)
def runshellcommand(self, cmd, cwd=None, env={}, ignore_errors=False, stdin=None):
"""Running shell command in {workspace} if cwd == None
Eg) cwd is "skuba", cmd will run shell in {workspace}/skuba/
cwd is None, cmd will run in {workspace}
cwd is abs path, cmd will run in cwd
Keyword arguments:
cmd -- command to run
cwd -- dir to run the cmd
env -- environment variables
ignore_errors -- don't raise exception if command fails
stdin -- standard input for the command in bytes
"""
if not cwd:
cwd = self.conf.workspace
if not os.path.isabs(cwd):
cwd = os.path.join(self.conf.workspace, cwd)
if not os.path.exists(cwd):
raise FileNotFoundError(Format.alert("Directory {} does not exists".format(cwd)))
if logging.DEBUG >= logger.level:
logger.debug("Executing command\n"
" cwd: {} \n"
" env: {}\n"
" cmd: {}".format(cwd, str(env) if env else "{}", cmd))
else:
logger.info("Executing command {}".format(cmd))
stdout, stderr = [], []
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd,
stdin=subprocess.PIPE if stdin else None, shell=True, env=env
)
if stdin:
p.stdin.write(stdin)
p.stdin.close()
stdoutStreamer = Thread(target = self.read_fd, args = (p, p.stdout, logger.debug, stdout))
stderrStreamer = Thread(target = self.read_fd, args = (p, p.stderr, logger.error, stderr))
stdoutStreamer.start()
stderrStreamer.start()
stdoutStreamer.join()
stderrStreamer.join()
# this is redundant, at this point threads were joined and they waited for the subprocess
# to exit, however it should not hurt to explicitly wait for it again (no-op).
p.wait()
stdout, stderr = "".join(stdout), "".join(stderr)
if p.returncode != 0:
if not ignore_errors:
raise RuntimeError("Error executing command {}".format(cmd))
else:
return stderr
return stdout
def ssh_sock_fn(self):
"""generate path to ssh socket
A socket path can't be over 107 chars on Linux, so generate a short
hash of the workspace and use that in $TMPDIR (usually /tmp) so we have
a predictable, test-unique, fixed-length path.
"""
path = os.path.join(
gettempdir(),
hashlib.md5(self.conf.workspace.encode()).hexdigest(),
"ssh-agent-sock"
)
maxl = 107
if len(path) > maxl:
raise Exception(f"Socket path '{path}' len {len(path)} > {maxl}")
return path
def read_fd(self, proc, fd, logger_func, output):
"""Read from fd, logging using logger_func
Read from fd, until proc is finished. All contents will
also be appended onto output."""
while True:
contents = fd.readline().decode()
if contents == '' and proc.poll() is not None:
return
if contents:
output.append(contents)
logger_func(contents.strip())
| [
11748,
15095,
198,
11748,
12234,
8019,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
20218,
7753,
1330,
651,
29510,
15908,
198,
6738,
4704,
278,
1... | 2.186124 | 2,998 |
"""
: 1.3.0+
"""
import re
from nonebot import CommandSession
from nonebot.helpers import render_expression
def handle_cancellation(session: CommandSession):
"""
```````` `session.finish()` `SESSION_CANCEL_EXPRESSION`
:
session:
"""
return control
__all__ = [
'handle_cancellation',
]
| [
37811,
628,
628,
198,
25,
352,
13,
18,
13,
15,
10,
198,
37811,
198,
11748,
302,
198,
198,
6738,
4844,
13645,
1330,
9455,
36044,
198,
6738,
4844,
13645,
13,
16794,
364,
1330,
8543,
62,
38011,
628,
198,
4299,
5412,
62,
66,
590,
297,
... | 2.438849 | 139 |
"""Class to handle the dipoles."""
# Authors: Mainak Jas <mjas@mgh.harvard.edu>
# Sam Neymotin <samnemo@gmail.com>
import warnings
import numpy as np
from copy import deepcopy
from .viz import plot_dipole, plot_psd, plot_tfr_morlet
def simulate_dipole(net, tstop, dt=0.025, n_trials=None, record_vsoma=False,
record_isoma=False, postproc=False):
"""Simulate a dipole given the experiment parameters.
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
n_trials : int | None
The number of trials to simulate. If None, the 'N_trials' value
of the ``params`` used to create ``net`` is used (must be >0)
record_vsoma : bool
Option to record somatic voltages from cells
record_isoma : bool
Option to record somatic currents from cells
postproc : bool
If True, smoothing (``dipole_smooth_win``) and scaling
(``dipole_scalefctr``) values are read from the parameter file, and
applied to the dipole objects before returning. Note that this setting
only affects the dipole waveforms, and not somatic voltages, possible
extracellular recordings etc. The preferred way is to use the
:meth:`~hnn_core.dipole.Dipole.smooth` and
:meth:`~hnn_core.dipole.Dipole.scale` methods instead. Default: False.
Returns
-------
dpls: list
List of dipole objects for each trials
"""
from .parallel_backends import _BACKEND, JoblibBackend
if _BACKEND is None:
_BACKEND = JoblibBackend(n_jobs=1)
if n_trials is None:
n_trials = net._params['N_trials']
if n_trials < 1:
raise ValueError("Invalid number of simulations: %d" % n_trials)
if not net.connectivity:
warnings.warn('No connections instantiated in network. Consider using '
'net = jones_2009_model() or net = law_2021_model() to '
'create a predefined network from published models.',
UserWarning)
for drive_name, drive in net.external_drives.items():
if 'tstop' in drive['dynamics']:
if drive['dynamics']['tstop'] is None:
drive['dynamics']['tstop'] = tstop
for bias_name, bias in net.external_biases.items():
for cell_type, bias_cell_type in bias.items():
if bias_cell_type['tstop'] is None:
bias_cell_type['tstop'] = tstop
if bias_cell_type['tstop'] < 0.:
raise ValueError('End time of tonic input cannot be negative')
duration = bias_cell_type['tstop'] - bias_cell_type['t0']
if duration < 0.:
raise ValueError('Duration of tonic input cannot be negative')
net._instantiate_drives(n_trials=n_trials, tstop=tstop)
net._reset_rec_arrays()
if isinstance(record_vsoma, bool):
net._params['record_vsoma'] = record_vsoma
else:
raise TypeError("record_vsoma must be bool, got %s"
% type(record_vsoma).__name__)
if isinstance(record_isoma, bool):
net._params['record_isoma'] = record_isoma
else:
raise TypeError("record_isoma must be bool, got %s"
% type(record_isoma).__name__)
if postproc:
warnings.warn('The postproc-argument is deprecated and will be removed'
' in a future release of hnn-core. Please define '
'smoothing and scaling explicitly using Dipole methods.',
DeprecationWarning)
dpls = _BACKEND.simulate(net, tstop, dt, n_trials, postproc)
return dpls
def read_dipole(fname):
"""Read dipole values from a file and create a Dipole instance.
Parameters
----------
fname : str
Full path to the input file (.txt)
Returns
-------
dpl : Dipole
The instance of Dipole class
"""
dpl_data = np.loadtxt(fname, dtype=float)
dpl = Dipole(dpl_data[:, 0], dpl_data[:, 1:])
return dpl
def average_dipoles(dpls):
"""Compute dipole averages over a list of Dipole objects.
Parameters
----------
dpls: list of Dipole objects
Contains list of dipole objects, each with a `data` member containing
'L2', 'L5' and 'agg' components
Returns
-------
dpl: instance of Dipole
A new dipole object with each component of `dpl.data` representing the
average over the same components in the input list
"""
scale_applied = dpls[0].scale_applied
for dpl_idx, dpl in enumerate(dpls):
if dpl.scale_applied != scale_applied:
raise RuntimeError('All dipoles must be scaled equally!')
if not isinstance(dpl, Dipole):
raise ValueError(
f"All elements in the list should be instances of "
f"Dipole. Got {type(dpl)}")
if dpl.nave > 1:
raise ValueError("Dipole at index %d was already an average of %d"
" trials. Cannot reaverage" %
(dpl_idx, dpl.nave))
avg_data = list()
layers = dpl.data.keys()
for layer in layers:
avg_data.append(
np.mean(np.array([dpl.data[layer] for dpl in dpls]), axis=0)
)
avg_data = np.c_[avg_data].T
avg_dpl = Dipole(dpls[0].times, avg_data)
# The averaged scale should equal all scals in the input dpl list.
avg_dpl.scale_applied = scale_applied
# set nave to the number of trials averaged in this dipole
avg_dpl.nave = len(dpls)
return avg_dpl
def _rmse(dpl, exp_dpl, tstart=0.0, tstop=0.0, weights=None):
""" Calculates RMSE between data in dpl and exp_dpl
Parameters
----------
dpl: instance of Dipole
A dipole object with simulated data
exp_dpl: instance of Dipole
A dipole object with experimental data
tstart | None: float
Time at beginning of range over which to calculate RMSE
tstop | None: float
Time at end of range over which to calculate RMSE
weights | None: array
An array of weights to be applied to each point in
simulated dpl. Must have length >= dpl.data
If None, weights will be replaced with 1's for typical RMSE
calculation.
Returns
-------
err: float
Weighted RMSE between data in dpl and exp_dpl
"""
from scipy import signal
exp_times = exp_dpl.times
sim_times = dpl.times
# do tstart and tstop fall within both datasets?
# if not, use the closest data point as the new tstop/tstart
for tseries in [exp_times, sim_times]:
if tstart < tseries[0]:
tstart = tseries[0]
if tstop > tseries[-1]:
tstop = tseries[-1]
# make sure start and end times are valid for both dipoles
exp_start_index = (np.abs(exp_times - tstart)).argmin()
exp_end_index = (np.abs(exp_times - tstop)).argmin()
exp_length = exp_end_index - exp_start_index
sim_start_index = (np.abs(sim_times - tstart)).argmin()
sim_end_index = (np.abs(sim_times - tstop)).argmin()
sim_length = sim_end_index - sim_start_index
if weights is None:
# weighted RMSE with weights of all 1's is equivalent to
# normal RMSE
weights = np.ones(len(sim_times[0:sim_end_index]))
weights = weights[sim_start_index:sim_end_index]
dpl1 = dpl.data['agg'][sim_start_index:sim_end_index]
dpl2 = exp_dpl.data['agg'][exp_start_index:exp_end_index]
if (sim_length > exp_length):
# downsample simulation timeseries to match exp data
dpl1 = signal.resample(dpl1, exp_length)
weights = signal.resample(weights, exp_length)
indices = np.where(weights < 1e-4)
weights[indices] = 0
elif (sim_length < exp_length):
# downsample exp timeseries to match simulation data
dpl2 = signal.resample(dpl2, sim_length)
return np.sqrt((weights * ((dpl1 - dpl2) ** 2)).sum() / weights.sum())
| [
37811,
9487,
284,
5412,
262,
19550,
4316,
526,
15931,
198,
198,
2,
46665,
25,
8774,
461,
21961,
1279,
76,
28121,
31,
76,
456,
13,
9869,
10187,
13,
15532,
29,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
3409,
34506,
27926,
... | 2.36185 | 3,460 |
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from collections import defaultdict
from .util import get_client, get_url, backoff
# Methods (7):
# GetLegislationDetail(xs:int LegislationId, )
#
# GetLegislationDetailByDescription(ns2:DocumentType DocumentType,
# xs:int Number, xs:int SessionId)
#
# GetLegislationForSession(xs:int SessionId, )
#
# GetLegislationRange(ns2:LegislationIndexRangeSet Range, )
#
# GetLegislationRanges(xs:int SessionId,
# ns2:DocumentType DocumentType, xs:int RangeSize, )
#
# GetLegislationSearchResultsPaged(ns2:LegislationSearchConstraints
# Constraints, xs:int PageSize,
# xs:int StartIndex, )
# GetTitles()
member_cache = {}
SOURCE_URL = "http://www.legis.ga.gov/Legislation/en-US/display/{session}/{bid}"
| [
6738,
2855,
88,
13,
1416,
13484,
13,
65,
2171,
1330,
3941,
3351,
38545,
11,
3941,
198,
6738,
2855,
88,
13,
1416,
13484,
13,
29307,
1330,
19175,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
764,
22602,
1330,
651,
62,
16366,
11,
651,... | 1.971319 | 523 |
import argparse
import numpy as np
import glob
import re
from log import print_to_file
from scipy.fftpack import fftn, ifftn
from skimage.feature import peak_local_max, canny
from skimage.transform import hough_circle
import pickle as pickle
from paths import TRAIN_DATA_PATH, LOGS_PATH, PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH
from paths import TEST_DATA_PATH
#joni
minradius = 15
maxradius = 65
kernel_width = 5
center_margin = 8
num_peaks = 10
num_circles = 10 # 20
radstep = 2
#ira
minradius_mm=25
maxradius_mm=45
kernel_width=5
center_margin=8
num_peaks=10
num_circles=20
radstep=2
def extract_roi(data, pixel_spacing, minradius_mm=15, maxradius_mm=65, kernel_width=5, center_margin=8, num_peaks=10,
num_circles=10, radstep=2):
"""
Returns center and radii of ROI region in (i,j) format
"""
# radius of the smallest and largest circles in mm estimated from the train set
# convert to pixel counts
minradius = int(minradius_mm / pixel_spacing)
maxradius = int(maxradius_mm / pixel_spacing)
ximagesize = data[0]['data'].shape[1]
yimagesize = data[0]['data'].shape[2]
xsurface = np.tile(list(range(ximagesize)), (yimagesize, 1)).T
ysurface = np.tile(list(range(yimagesize)), (ximagesize, 1))
lsurface = np.zeros((ximagesize, yimagesize))
allcenters = []
allaccums = []
allradii = []
for dslice in data:
ff1 = fftn(dslice['data'])
fh = np.absolute(ifftn(ff1[1, :, :]))
fh[fh < 0.1 * np.max(fh)] = 0.0
image = 1. * fh / np.max(fh)
# find hough circles and detect two radii
edges = canny(image, sigma=3)
hough_radii = np.arange(minradius, maxradius, radstep)
hough_res = hough_circle(edges, hough_radii)
if hough_res.any():
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract num_peaks circles
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Keep the most prominent num_circles circles
sorted_circles_idxs = np.argsort(accums)[::-1][:num_circles]
for idx in sorted_circles_idxs:
center_x, center_y = centers[idx]
allcenters.append(centers[idx])
allradii.append(radii[idx])
allaccums.append(accums[idx])
brightness = accums[idx]
lsurface = lsurface + brightness * np.exp(
-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2)
lsurface = lsurface / lsurface.max()
# select most likely ROI center
roi_center = np.unravel_index(lsurface.argmax(), lsurface.shape)
# determine ROI radius
roi_x_radius = 0
roi_y_radius = 0
for idx in range(len(allcenters)):
xshift = np.abs(allcenters[idx][0] - roi_center[0])
yshift = np.abs(allcenters[idx][1] - roi_center[1])
if (xshift <= center_margin) & (yshift <= center_margin):
roi_x_radius = np.max((roi_x_radius, allradii[idx] + xshift))
roi_y_radius = np.max((roi_y_radius, allradii[idx] + yshift))
if roi_x_radius > 0 and roi_y_radius > 0:
roi_radii = roi_x_radius, roi_y_radius
else:
roi_radii = None
return roi_center, roi_radii
def group_slices(slice_stack):
"""
Groups slices into stacks with the same image orientation
:param slice_stack:
:return: list of slice stacks
"""
img_orientations = []
for s in slice_stack:
img_orientations.append(tuple(s['metadata']['ImageOrientationPatient']))
img_orientations = list(set(img_orientations))
if len(img_orientations) == 1:
return [slice_stack]
else:
slice_groups = [[] for _ in range(len(img_orientations))]
for s in slice_stack:
group = img_orientations.index(tuple(s['metadata']['ImageOrientationPatient']))
slice_groups[group].append(s)
return slice_groups
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = [PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH]
log_path = LOGS_PATH + "generate_roi.log"
with print_to_file(log_path):
for d in data_paths:
get_slice2roi(d, plot=True)
print("log saved to '%s'" % log_path)
| [
11748,
1822,
29572,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
15095,
201,
198,
11748,
302,
201,
198,
6738,
2604,
1330,
3601,
62,
1462,
62,
7753,
201,
198,
6738,
629,
541,
88,
13,
487,
83,
8002,
1330,
277,
701,
77,
11... | 2.070381 | 2,387 |
import feedparser
from bs4 import BeautifulSoup
from dateutil.parser import parse
from datetime import timedelta
import pytz
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from oauth2client.service_account import ServiceAccountCredentials
scopes = 'https://www.googleapis.com/auth/calendar'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'client_secret.json', scopes)
http_auth = credentials.authorize(Http())
CAL = build('calendar', 'v3', http=credentials.authorize(Http()))
events = get_calendar_data()
sync_to_google_calendar(events)
| [
11748,
3745,
48610,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
12972,
22877,
198,
198,
6738,
2471,
291,
75,
1153,
13,
67,
40821,... | 3.139303 | 201 |
import os
import sys
LOCALPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, LOCALPATH + '/../../../../')
from app.api.v2.db import Db
def format_book(book):
"""Formats the results to a dictionary"""
book = {
"id": book[0],
"title": book[1],
"description": book[2],
"category": book[3],
"price": book[4],
"quantity": book[5],
"minimum": book[6],
"image_url": book[7],
"created_by": book[8],
"updated_by": book[9],
"created_at": str(book[10])
}
return book
| [
11748,
28686,
198,
11748,
25064,
198,
198,
29701,
1847,
34219,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
37347,
1847,
34219,
134... | 2.085714 | 280 |
# -*- coding: utf-8 -*-
"""Main module."""
import os
from google.cloud import bigquery
from pbq.query import Query
from google.cloud import bigquery_storage_v1beta1
from google.cloud.exceptions import NotFound
from google.api_core.exceptions import BadRequest
import pandas as pd
import datetime
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
13383,
8265,
526,
15931,
198,
11748,
28686,
198,
6738,
23645,
13,
17721,
1330,
1263,
22766,
198,
6738,
279,
65,
80,
13,
22766,
1330,
43301,
198,
6738,
23645,
... | 3.274725 | 91 |
import re
import types
from functools import partial
LITERAL_TYPE = types.StringTypes + (int, float, long, bool, )
| [
11748,
302,
198,
11748,
3858,
198,
6738,
1257,
310,
10141,
1330,
13027,
628,
198,
43,
2043,
27130,
62,
25216,
796,
3858,
13,
10100,
31431,
1343,
357,
600,
11,
12178,
11,
890,
11,
20512,
11,
1267,
628
] | 3.277778 | 36 |
from setuptools import setup
setup(
name='lsearch',
version='1.0',
description='The Head First Python Search Tools', author='HF Python 2e', author_email='hfpy2e@gmail.com',
url='headfirstlabs.com',
py_modules=['lsearch'],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
75,
12947,
3256,
198,
220,
220,
220,
2196,
11639,
16,
13,
15,
3256,
198,
220,
220,
220,
6764,
11639,
464,
7123,
3274,
11361,
11140,
20003,
3256,... | 2.663043 | 92 |
from desktop_local_tests.public_ip_during_disruption import PublicIPDuringDisruptionTestCase
from desktop_local_tests.windows.windows_reorder_adapters_disrupter import WindowsReorderAdaptersDisrupter
| [
6738,
11364,
62,
12001,
62,
41989,
13,
11377,
62,
541,
62,
42122,
62,
6381,
6417,
1330,
5094,
4061,
7191,
7279,
6417,
14402,
20448,
198,
6738,
11364,
62,
12001,
62,
41989,
13,
28457,
13,
28457,
62,
260,
2875,
62,
324,
12126,
62,
6381,... | 3.703704 | 54 |
import json
import logging
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import werkzeug
from flask import Blueprint, Flask, Response, abort, jsonify
from flask.views import MethodView
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from geventwebsocket import Resource as WebsocketResource, WebSocketServer
from marshmallow import Schema
from marshmallow.exceptions import ValidationError
from webargs.flaskparser import parser
from werkzeug.exceptions import NotFound
from rotkehlchen.api.rest import RestAPI, api_response, wrap_in_fail_result
from rotkehlchen.api.v1.parser import ignore_kwarg_parser, resource_parser
from rotkehlchen.api.v1.resources import (
AaveBalancesResource,
AaveHistoryResource,
AccountingReportDataResource,
AccountingReportsResource,
AdexBalancesResource,
AdexHistoryResource,
AllAssetsResource,
AllBalancesResource,
AssetIconsResource,
AssetMovementsResource,
AssetsReplaceResource,
AssetsTypesResource,
AssetUpdatesResource,
AssociatedLocations,
AsyncTasksResource,
AvalancheTransactionsResource,
BalancerBalancesResource,
BalancerEventsHistoryResource,
BalancerTradesHistoryResource,
BinanceAvailableMarkets,
BinanceUserMarkets,
BlockchainBalancesResource,
BlockchainsAccountsResource,
BTCXpubResource,
CompoundBalancesResource,
CompoundHistoryResource,
CounterpartiesResource,
CurrentAssetsPriceResource,
DatabaseBackupsResource,
DatabaseInfoResource,
DataImportResource,
DBSnapshotDeletingResource,
DBSnapshotDownloadingResource,
DBSnapshotExportingResource,
DBSnapshotImportingResource,
DefiBalancesResource,
ERC20TokenInfo,
ERC20TokenInfoAVAX,
Eth2DailyStatsResource,
Eth2StakeDepositsResource,
Eth2StakeDetailsResource,
Eth2ValidatorsResource,
EthereumAirdropsResource,
EthereumAssetsResource,
EthereumModuleDataResource,
EthereumModuleResource,
EthereumTransactionsResource,
ExchangeBalancesResource,
ExchangeRatesResource,
ExchangesDataResource,
ExchangesResource,
ExternalServicesResource,
HistoricalAssetsPriceResource,
HistoryActionableItemsResource,
HistoryBaseEntryResource,
HistoryDownloadingResource,
HistoryExportingResource,
HistoryProcessingResource,
HistoryStatusResource,
IgnoredActionsResource,
IgnoredAssetsResource,
InfoResource,
LedgerActionsResource,
LiquityStakingHistoryResource,
LiquityStakingResource,
LiquityTrovesHistoryResource,
LiquityTrovesResource,
LoopringBalancesResource,
MakerdaoDSRBalanceResource,
MakerdaoDSRHistoryResource,
MakerdaoVaultDetailsResource,
MakerdaoVaultsResource,
ManuallyTrackedBalancesResource,
MessagesResource,
NamedEthereumModuleDataResource,
NamedOracleCacheResource,
NFTSBalanceResource,
NFTSResource,
OraclesResource,
OwnedAssetsResource,
PeriodicDataResource,
PickleDillResource,
PingResource,
QueriedAddressesResource,
ReverseEnsResource,
SettingsResource,
StakingResource,
StatisticsAssetBalanceResource,
StatisticsNetvalueResource,
StatisticsRendererResource,
StatisticsValueDistributionResource,
SushiswapBalancesResource,
SushiswapEventsHistoryResource,
SushiswapTradesHistoryResource,
TagsResource,
TradesResource,
UniswapBalancesResource,
UniswapEventsHistoryResource,
UniswapTradesHistoryResource,
UserAssetsResource,
UserPasswordChangeResource,
UserPremiumKeyResource,
UserPremiumSyncResource,
UsersByNameResource,
UsersResource,
WatchersResource,
YearnVaultsBalancesResource,
YearnVaultsHistoryResource,
YearnVaultsV2BalancesResource,
YearnVaultsV2HistoryResource,
create_blueprint,
)
from rotkehlchen.api.websockets.notifier import RotkiNotifier, RotkiWSApp
from rotkehlchen.logging import RotkehlchenLogsAdapter
URLS = List[
Union[
Tuple[str, Type[MethodView]],
Tuple[str, Type[MethodView], str],
]
]
URLS_V1: URLS = [
('/users', UsersResource),
('/watchers', WatchersResource),
('/users/<string:name>', UsersByNameResource),
('/users/<string:name>/password', UserPasswordChangeResource),
('/premium', UserPremiumKeyResource),
('/premium/sync', UserPremiumSyncResource),
('/settings', SettingsResource),
('/tasks/', AsyncTasksResource),
('/tasks/<int:task_id>', AsyncTasksResource, 'specific_async_tasks_resource'),
('/exchange_rates', ExchangeRatesResource),
('/external_services/', ExternalServicesResource),
('/oracles', OraclesResource),
('/oracles/<string:oracle>/cache', NamedOracleCacheResource),
('/exchanges', ExchangesResource),
('/exchanges/balances', ExchangeBalancesResource),
(
'/exchanges/balances/<string:location>',
ExchangeBalancesResource,
'named_exchanges_balances_resource',
),
('/assets/<string:asset>/icon', AssetIconsResource),
('/trades', TradesResource),
('/ledgeractions', LedgerActionsResource),
('/asset_movements', AssetMovementsResource),
('/tags', TagsResource),
('/exchanges/binance/pairs', BinanceAvailableMarkets),
('/exchanges/binance/pairs/<string:name>', BinanceUserMarkets),
('/exchanges/data/', ExchangesDataResource),
('/exchanges/data/<string:location>', ExchangesDataResource, 'named_exchanges_data_resource'),
('/balances/blockchains', BlockchainBalancesResource),
(
'/balances/blockchains/<string:blockchain>',
BlockchainBalancesResource,
'named_blockchain_balances_resource',
),
('/balances/', AllBalancesResource),
('/balances/manual', ManuallyTrackedBalancesResource),
('/statistics/netvalue', StatisticsNetvalueResource),
('/statistics/balance/<string:asset>', StatisticsAssetBalanceResource),
('/statistics/value_distribution', StatisticsValueDistributionResource),
('/statistics/renderer', StatisticsRendererResource),
('/messages/', MessagesResource),
('/periodic/', PeriodicDataResource),
('/history/', HistoryProcessingResource),
('/history/status', HistoryStatusResource),
('/history/export/', HistoryExportingResource),
('/history/download/', HistoryDownloadingResource),
('/history/events', HistoryBaseEntryResource),
('/history/actionable_items', HistoryActionableItemsResource),
('/reports/', AccountingReportsResource),
(
'/reports/<int:report_id>',
AccountingReportsResource,
'per_report_resource',
),
(
'/reports/<int:report_id>/data',
AccountingReportDataResource,
'per_report_data_resource',
),
('/queried_addresses', QueriedAddressesResource),
('/blockchains/ETH/transactions', EthereumTransactionsResource),
(
'/blockchains/ETH/transactions/<string:address>',
EthereumTransactionsResource,
'per_address_ethereum_transactions_resource',
),
('/blockchains/ETH2/validators', Eth2ValidatorsResource),
('/blockchains/ETH2/stake/deposits', Eth2StakeDepositsResource),
('/blockchains/ETH2/stake/details', Eth2StakeDetailsResource),
('/blockchains/ETH2/stake/dailystats', Eth2DailyStatsResource),
('/blockchains/ETH/defi', DefiBalancesResource),
('/blockchains/ETH/airdrops', EthereumAirdropsResource),
('/blockchains/ETH/erc20details/', ERC20TokenInfo),
('/blockchains/ETH/modules/<string:module_name>/data', NamedEthereumModuleDataResource),
('/blockchains/ETH/modules/data', EthereumModuleDataResource),
('/blockchains/ETH/modules/data/counterparties', CounterpartiesResource),
('/blockchains/ETH/modules/', EthereumModuleResource),
('/blockchains/ETH/modules/makerdao/dsrbalance', MakerdaoDSRBalanceResource),
('/blockchains/ETH/modules/makerdao/dsrhistory', MakerdaoDSRHistoryResource),
('/blockchains/ETH/modules/makerdao/vaults', MakerdaoVaultsResource),
('/blockchains/ETH/modules/makerdao/vaultdetails', MakerdaoVaultDetailsResource),
('/blockchains/ETH/modules/aave/balances', AaveBalancesResource),
('/blockchains/ETH/modules/aave/history', AaveHistoryResource),
('/blockchains/ETH/modules/adex/balances', AdexBalancesResource),
('/blockchains/ETH/modules/adex/history', AdexHistoryResource),
('/blockchains/ETH/modules/balancer/balances', BalancerBalancesResource),
('/blockchains/ETH/modules/balancer/history/trades', BalancerTradesHistoryResource),
('/blockchains/ETH/modules/balancer/history/events', BalancerEventsHistoryResource),
('/blockchains/ETH/modules/compound/balances', CompoundBalancesResource),
('/blockchains/ETH/modules/compound/history', CompoundHistoryResource),
('/blockchains/ETH/modules/uniswap/balances', UniswapBalancesResource),
('/blockchains/ETH/modules/uniswap/history/events', UniswapEventsHistoryResource),
('/blockchains/ETH/modules/uniswap/history/trades', UniswapTradesHistoryResource),
('/blockchains/ETH/modules/sushiswap/balances', SushiswapBalancesResource),
('/blockchains/ETH/modules/sushiswap/history/events', SushiswapEventsHistoryResource),
('/blockchains/ETH/modules/sushiswap/history/trades', SushiswapTradesHistoryResource),
('/blockchains/ETH/modules/yearn/vaults/balances', YearnVaultsBalancesResource),
('/blockchains/ETH/modules/yearn/vaults/history', YearnVaultsHistoryResource),
('/blockchains/ETH/modules/yearn/vaultsv2/balances', YearnVaultsV2BalancesResource),
('/blockchains/ETH/modules/yearn/vaultsv2/history', YearnVaultsV2HistoryResource),
('/blockchains/ETH/modules/liquity/balances', LiquityTrovesResource),
('/blockchains/ETH/modules/liquity/events/trove', LiquityTrovesHistoryResource),
('/blockchains/ETH/modules/liquity/events/staking', LiquityStakingHistoryResource),
('/blockchains/ETH/modules/liquity/staking', LiquityStakingResource),
('/blockchains/ETH/modules/pickle/dill', PickleDillResource),
('/blockchains/ETH/modules/loopring/balances', LoopringBalancesResource),
('/blockchains/<string:blockchain>', BlockchainsAccountsResource),
('/blockchains/BTC/xpub', BTCXpubResource),
('/blockchains/AVAX/transactions', AvalancheTransactionsResource),
(
'/blockchains/AVAX/transactions/<string:address>',
AvalancheTransactionsResource,
'per_address_avalanche_transactions_resource',
),
('/blockchains/AVAX/erc20details/', ERC20TokenInfoAVAX),
('/assets', OwnedAssetsResource),
('/assets/types', AssetsTypesResource),
('/assets/replace', AssetsReplaceResource),
('/assets/all', AllAssetsResource),
('/assets/ethereum', EthereumAssetsResource),
('/assets/prices/current', CurrentAssetsPriceResource),
('/assets/prices/historical', HistoricalAssetsPriceResource),
('/assets/ignored', IgnoredAssetsResource),
('/assets/updates', AssetUpdatesResource),
('/assets/user', UserAssetsResource),
('/actions/ignored', IgnoredActionsResource),
('/info', InfoResource),
('/ping', PingResource),
('/import', DataImportResource),
('/nfts', NFTSResource),
('/nfts/balances', NFTSBalanceResource),
('/database/info', DatabaseInfoResource),
('/database/backups', DatabaseBackupsResource),
('/locations/associated', AssociatedLocations),
('/staking/kraken', StakingResource),
('/snapshot/download', DBSnapshotDownloadingResource),
('/snapshot/export', DBSnapshotExportingResource),
('/snapshot/import', DBSnapshotImportingResource),
('/snapshot/delete', DBSnapshotDeletingResource),
('/ens/reverse', ReverseEnsResource),
]
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
| [
11748,
33918,
198,
11748,
18931,
198,
6738,
2638,
1330,
14626,
19580,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
11,
5994,
11,
4479,
198,
198,
11748,
266,
9587,
2736,
1018,
198,
6738,
42903,
1330,
... | 2.887203 | 4,087 |
import numpy as np
pos = []
normals = []
p = [[-0.4722227, -0.24517583, -0.6370031]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.2549828, -0.24587737, -0.63704705]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.25787751, -0.38255749, -0.63705089]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.47206733, -0.38317576, -0.6370076]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
#Contact lgripper/handrail
#Left
p = [[0.3651077, 0.33419711, 0.63609439]]
n = [[-3.39491173e-05, 9.99999875e-01, 4.99472000e-04]]
pos.append(p)
normals.append(n)
#Right
#p = [[0.36510907, 0.29419711, 0.63607441]]
#p = [[0.3651077, 0.33419711, 0.63609439]]
#n = [[3.44761855e-05, -9.99999874e-01, -5.00077386e-04]]
#pos.append(p)
#normals.append(n)
#Bottom
#p = [[0.34212609, 0.31418314, 0.66248165]]
#n = [[-6.56636734e-01, -3.99160434e-04, 7.54206895e-01]]
#pos.append(p)
#normals.append(n)
#
##Top
p = [[0.38480749, 0.31420908, 0.61345819]]
n = [[6.56636734e-01, 4.00439950e-04, -7.54206894e-01]]
pos.append(p)
normals.append(n)
pos = [np.array(px).T for px in pos]
#for p in pos:
# p[2, 0] = 0.0
normals = [np.array(nx).T for nx in normals]
| [
11748,
299,
32152,
355,
45941,
198,
198,
1930,
796,
17635,
198,
27237,
874,
796,
17635,
198,
198,
79,
796,
16410,
12,
15,
13,
2857,
1828,
24403,
11,
532,
15,
13,
22995,
1558,
46239,
11,
532,
15,
13,
21,
2718,
405,
3132,
11907,
198,
... | 1.833103 | 725 |
import inspect
import re
import types
from collections import namedtuple
from typing import List, Union, Dict
from flask_restplus import fields
from ...common import MetaData, Entry, Arg, ArgSource, RpcType,\
type_def, rpc_doc_args_key, rpc_doc_resp_key, rpc_impl_rename
from ...common.web.namespace import get_namespace, NamespaceInfo
function_type = frozenset([staticmethod, classmethod, types.FunctionType])
func_obj_types = frozenset([staticmethod, classmethod])
method_reg = re.compile(r"^[\s\S]+?(?=:param|:return:|$)")
def extract_methods(cls):
"""
Class, api doc
:param cls:
:return:
"""
# process cls' s apidoc if exists
base_entries_arg: List[Arg] = process_cls_args(cls)
entries = []
for (attr_name, attr) in cls.__dict__.items():
attr_type = type(attr)
if attr_name.startswith('_') or attr_type not in function_type:
continue
rpc_doc_args: type_def.Dict = getattr(attr, rpc_doc_args_key, None)
rpc_doc_resp: type_def.RpcType = getattr(attr, rpc_doc_resp_key, None)
if attr_type in func_obj_types:
# extract real method from method object
attr = getattr(attr, "__func__", None)
api_doc = getattr(attr, '__apidoc__', None)
# TODO: ,
entry = None
if api_doc:
entry = analyse_doc(cls, attr, attr_name, api_doc, base_entries_arg)
entry.args = base_entries_arg + entry.args
args = list(base_entries_arg)
result = type_def.Void()
if rpc_doc_args:
# args model RpcType.Dict
# attr_name ArgSource, http method,
#
for name, value in rpc_doc_args.get_elem_info().items():
source = get_source_type(attr_name, value)
args.append(Arg(name, value, value.default_value,
value.description, value.required, source))
if rpc_doc_resp:
result = rpc_doc_resp
if not entry:
raw_doc = inspect.getdoc(attr) or ""
method_doc = method_reg.search(raw_doc)
if method_doc:
method_doc = method_doc.group(0)
args = sorted(args, key=lambda a: a.name.lower())
entry = Entry(attr_name, args, result, method_doc)
entries.append(entry)
return sorted(entries, key=lambda e: e.name.lower())
def get_source_type(method_name: str, field: RpcType) -> ArgSource:
"""
field http
get params, post body
field source, field
"""
source = ArgSource.UNKNOWN
if method_name == "get":
source = ArgSource.PARAMS
elif method_name == "post":
source = ArgSource.BODY
if field.source != ArgSource.UNKNOWN:
source = field.get_source()
return source
def process_cls_args(cls) -> List[Arg]:
"""
cls api_doc
"""
cls_api_doc = getattr(cls, "__apidoc__", {})
params: dict = cls_api_doc.get("params", {})
args: List[Arg] = []
for key, value in params.items():
field_type = switch_type(value.get("type", "str"), value)
source_in = value.get("in", "path")
source = ArgSource.PARAMS
if source_in == "path":
source = ArgSource.PATH
elif source_in == "body":
source = ArgSource.BODY
elif source_in == "header":
source = ArgSource.HEADER
args.append(Arg(key, field_type, field_type.default_value, source=source, description=field_type.description))
# Flask CommonBase
ns_info: Union[NamespaceInfo, None] = get_namespace(cls)
if ns_info is not None:
for arg_name, arg_value in ns_info.params.items():
args.append(
Arg(
arg_name,
arg_value,
arg_value.default_value,
description=arg_value.description,
required=arg_value.required,
source=ArgSource.PATH
)
)
return args
def analyse_doc(cls, method, name, api_doc, class_args: List[Arg]) -> Entry:
"""
cls method api_doc
:param cls
:param method
:param name:
:param api_doc:
:param class_args:
:return:
"""
#
# wrapper,
while hasattr(method, "__wrapped__"):
method = getattr(method, "__wrapped__")
# entry
method_doc_raw = inspect.getdoc(method) or ""
method_doc = method_reg.search(method_doc_raw) or ""
if method_doc:
method_doc = method_doc.group(0)
args = analyse_args(cls, method, method_doc_raw, api_doc, class_args)
entry = Entry(name, args, type_def.Void(), method_doc)
status_codes = api_doc.get("responses", {}).keys()
for status_code in status_codes:
result = analyse_result(api_doc, status_code)
if result:
entry.set_result(status_code, result)
return entry
def analyse_result(api_doc, status_code: int) -> type_def.RpcType:
"""
api_doc
:param api_doc:
:param status_code:
:return:
"""
(desc, data_meta) = api_doc.get("responses", {}).get(status_code, (None, None))
if not desc and not data_meta:
return type_def.Void() #
if isinstance(data_meta, dict):
#
result = type_def.fields.Dict(required=True)
for (key, type_info) in data_meta.items():
result.add_field(key, switch_type(type_info))
else:
#
result = switch_type(data_meta)
return result
def analyse_args(cls, method, method_doc_raw, api_doc, class_args: List[Arg]) -> List[Arg]:
"""
cls method api_doc method
:param cls:
:param method:
:param method_doc_raw:
:param api_doc:
:param class_args:
:return:
"""
frame_info = inspect.getfullargspec(method)
method_args = frame_info.args
if len(frame_info.args) > 0 and frame_info.args[0] == "self":
method_args = method_args[1:]
if len(method_args) > len(frame_info.annotations):
#
raise Exception(
" %s %s %s %s . \n"
" %s , eg: id \n\t"
"def hello(id: int): pass" %
(
cls.__name__,
method.__name__,
len(frame_info.args),
len(frame_info.annotations),
frame_info.args
)
)
params: List[Arg] = []
params_dict = api_doc.get("params", {})
params.extend(analyse_flask_args(method, params_dict, False) or [])
# post
expect_list = api_doc.get("expect", [])
for expect in expect_list:
params.extend(analyse_flask_args(method, expect, True) or [])
#
func_params: List[Arg] = []
for (index, arg) in enumerate(method_args):
arg_type = switch_type(frame_info.annotations[arg])
if isinstance(arg_type, (type_def.Void, )):
continue
# try to extract documentation from doc
arg_doc = re.search(
r":param %s:(?P<doc>[\s\S]+?)(?=:param|:return|$)" % arg, method_doc_raw)
if arg_doc:
arg_doc = arg_doc.group("doc")
arg_info = Arg(arg, arg_type, None, arg_doc or "")
func_params.append(arg_info)
args_len = len(func_params) - 1
for (index, default) in enumerate(frame_info.defaults or []):
func_params[len(args_len - index)].default = default
#
for p in func_params:
is_dup: bool = False
for pp in params:
if p.name == pp.name:
is_dup = True
break
if not is_dup:
for pp in class_args:
if p.name == pp.name:
is_dup = True
break
if not is_dup:
params.append(p)
return params
def analyse_flask_args(method, type_dict, in_body: bool) -> List[Arg]:
"""
type_dict flask
:param method
:param type_dict:
:param in_body: in_body body, in
in header header, get
:return:
"""
params = []
for (key, value) in type_dict.items():
if isinstance(value, dict):
# param flask doc
attr_type = value.get("type", None)
attr_type = switch_type(attr_type, value)
if not attr_type or isinstance(attr_type, type_def.Void):
raise Exception("%s %s " % (method, key))
if in_body:
source = ArgSource.BODY
else:
if value.get("in", "params") == "params":
source = ArgSource.PARAMS
else:
source = ArgSource.HEADER
arg = Arg(key, attr_type, default=value.get("default", None),
description=value.get("description", ""), source=source,
required=attr_type.required)
params.append(arg)
else:
attr_type = switch_type(value)
if attr_type:
if in_body:
source = ArgSource.BODY
else:
source = ArgSource.PARAMS
required = True
if value.required is not None:
required = not not value.required
arg = Arg(key, attr_type, attr_type.default_value, value.description,
required=required, source=source)
params.append(arg)
return params
str_literal = ["str", "string"]
number_literal = ["int", "integer"]
base_mapping_fields = {
"default": None,
"required": True,
"default_value": None,
"maximum": None,
"minimal": None,
}
sm = namedtuple(
"DefaultMapping",
[
"description", "required", "min_length",
"max_length", "min_items", "max_items",
"default_value", "must_true",
"must_false", "minimum", "maximum"
]
)(
("description", ""),
("required", True),
("min_length", None),
("max_length", None),
("min_items", None),
("max_items", None),
("default_value", None),
("must_true", None),
("must_false", None),
("minimum", None),
("maximum", None)
)
field_adaptor = {
"minimum": "min",
"maximum": "max",
"default_value": "default"
}
base_sm = [sm.description, sm.default_value, sm.required]
number_sm = build_sm(sm.minimum, sm.maximum)
str_sm = build_sm(sm.min_length, sm.max_length)
bool_sm = build_sm(sm.must_true, sm.must_false)
list_sm = build_sm(sm.description, sm.min_items, sm.max_items, need_base=False)
type_switch_mapping = {
"int": number_sm,
int: number_sm,
"integer": number_sm,
fields.Integer: number_sm,
"float": number_sm,
float: number_sm,
fields.Float: number_sm,
"str": str_sm,
"string": str_sm,
str: str_sm,
fields.String: str_sm,
"bool": bool_sm,
bool: bool_sm,
fields.Boolean: bool_sm,
"list": list_sm,
fields.List: list_sm,
}
type_convert_mapping = {
int: type_def.fields.Integer,
str: type_def.fields.String,
float: type_def.fields.Float,
bool: type_def.fields.Bool,
fields.Integer: type_def.fields.Integer,
fields.String: type_def.fields.String,
fields.Boolean: type_def.fields.Boolean,
fields.Float: type_def.fields.Float
}
def switch_type(from_type, addition: Union[dict, None] = None) -> type_def.RpcType:
"""
,
addition flask maximum, default, max_items
:param from_type:
:param addition
:return:
"""
map_func = type_switch_mapping.get(from_type, None)
if map_func is None:
map_func = type_switch_mapping.get(type(from_type), lambda _: {})
kwargs = map_func(addition)
if isinstance(from_type, str):
if from_type in str_literal:
return type_def.fields.String(**kwargs)
elif from_type in number_literal:
return type_def.fields.Integer(**kwargs)
# flask
from_type_constructor = type_convert_mapping.get(from_type, None)
if not from_type_constructor:
from_type_constructor = type_convert_mapping.get(type(from_type), None)
if from_type_constructor:
return from_type_constructor(**kwargs)
if isinstance(from_type, fields.List):
elem_type = switch_type(from_type.container)
return type_def.fields.List(
elem_type, **kwargs)
elif isinstance(from_type, fields.Nested):
field_dict = {}
for (field, field_value) in from_type.model.items():
field_dict[field] = switch_type(field_value, field_value)
return type_def.fields.Dict(field_dict, from_type.description, from_type.required)
else:
return type_def.Void()
| [
11748,
10104,
198,
11748,
302,
198,
11748,
3858,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
19720,
1330,
7343,
11,
4479,
11,
360,
713,
198,
6738,
42903,
62,
2118,
9541,
1330,
7032,
198,
198,
6738,
2644,
11321,
1330,
30277,... | 2.118312 | 6,018 |
import logging
import random
from time import time
from threading import RLock, Lock, Thread
from bot import LOGGER, download_dict, download_dict_lock, app, STOP_DUPLICATE, STORAGE_THRESHOLD
from bot.helper.ext_utils.bot_utils import get_readable_file_size
from ..status_utils.telegram_download_status import TelegramDownloadStatus
from bot.helper.telegram_helper.message_utils import sendMarkup, sendMessage, sendStatusMessage
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.fs_utils import check_storage_threshold
global_lock = Lock()
GLOBAL_GID = set()
logging.getLogger("pyrogram").setLevel(logging.WARNING)
| [
11748,
18931,
198,
11748,
4738,
198,
198,
6738,
640,
1330,
640,
198,
6738,
4704,
278,
1330,
371,
25392,
11,
13656,
11,
14122,
198,
198,
6738,
10214,
1330,
41605,
30373,
11,
4321,
62,
11600,
11,
4321,
62,
11600,
62,
5354,
11,
598,
11,
... | 3.29902 | 204 |
import setuptools
setuptools.setup(
name="qualityforward",
version="1.1",
author="Atsushi Nakatsugawa",
author_email="atsushi@moongift.jp",
description="Python library for QualityForward API",
long_description="This is python library for QualityForward API. QualityForward is cloud based test management service.",
long_description_content_type="text/markdown",
url="https://cloud.veriserve.co.jp/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| [
11748,
900,
37623,
10141,
198,
220,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
220,
220,
1438,
2625,
13237,
11813,
1600,
198,
220,
220,
220,
2196,
2625,
16,
13,
16,
1600,
198,
220,
220,
220,
1772,
2625,
32,
912,
17731,
22255,
... | 2.923077 | 221 |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, AbstractUser
from django.utils import timezone
from django.utils.translation import gettext as _
from django import forms
from django.contrib.auth.hashers import make_password
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from phonenumber_field.modelfields import PhoneNumberField
from datetime import datetime
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
14881,
12982,
11,
2448,
8481,
35608,
259,
11,
27741,
12982,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
... | 3.717742 | 124 |
'''
Run lexical substitution experiments
'''
import sys
import time
import argparse
import re
import numpy
from jcs.jcs_io import extract_word_weight
from jcs.data.context_instance import ContextInstance
from jcs.jcs_io import vec_to_str
from jcs.jcs_io import vec_to_str_generated
from jcs.cs_embedding_inferrer import CsEmbeddingInferrer
from jcs.context2vec_inferrer import Context2vecInferrer
target_re = re.compile(".*__(.*)__.*")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='JCS utility')
parser.add_argument('--inferrer', choices=['lstm', 'emb'],
default='lstm',
help='context type ("lstm", "emb")')
# Only for Context2vecInferrer
parser.add_argument('-lstm_config', action="store", dest="lstm_config", default=None, help="config file of lstm context model and respective word embeddings")
# Only for CsEmbeddingInferrer
parser.add_argument('-embeddingpath', action="store", dest="embeddingpath", default=None, help="prefix to files containing word embeddings")
parser.add_argument('-embeddingpathc', action="store", dest="embeddingpathc", default=None, help="prefix to files containing context word embeddings")
parser.add_argument('-vocabfile', action="store", dest="vocabfile")
parser.add_argument('-bow',action='store',dest='bow_size', default=-1, type=int, help="context bag-of-words window size. 0 means entire sentence. -1 means syntactic dependency contexts.")
# Common
parser.add_argument('-targetsfile', action="store", dest="targetsfile", default=None)
parser.add_argument('-testfile', action="store", dest="testfile")
parser.add_argument('-testfileconll', action="store", dest="testfileconll", default=None, help="test file with sentences parsed in conll format")
parser.add_argument('-candidatesfile', action="store", dest="candidatesfile", default=None)
parser.add_argument('-resultsfile', action="store", dest="resultsfile")
parser.add_argument('-contextmath', action="store", dest="contextmath", default=None, help="arithmetics used to consider context [add|mult|geomean|none]")
parser.add_argument('--ignoretarget', action="store_true", dest="ignoretarget", default=False, help="ignore lhs target. compute only context compatibility.")
parser.add_argument('--nopos',action='store_true',dest='no_pos', default=False, help="ignore part-of-speech of target word")
parser.add_argument('-topgenerated', action="store", dest="topgenerated", type=int, default=10, help="top entries to print in generated parvecs")
parser.add_argument('--debug',action='store_true',dest='debug')
args = parser.parse_args(sys.argv[1:])
config_file_name = args.resultsfile + ".CONFIG"
cf = open(config_file_name, 'w')
cf.write(' '.join(sys.argv)+'\n')
cf.close()
numpy.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
run(args)
| [
7061,
6,
201,
198,
10987,
31191,
605,
32097,
10256,
201,
198,
7061,
6,
201,
198,
11748,
25064,
201,
198,
11748,
640,
201,
198,
11748,
1822,
29572,
201,
198,
11748,
302,
201,
198,
11748,
299,
32152,
201,
198,
201,
198,
6738,
474,
6359,... | 2.743521 | 1,119 |
__version__ = 'unknown'
try:
__version__ = __import__('pkg_resources').get_distribution('django_richenum').version
except Exception as e:
pass
| [
834,
9641,
834,
796,
705,
34680,
6,
198,
28311,
25,
198,
220,
220,
220,
11593,
9641,
834,
796,
11593,
11748,
834,
10786,
35339,
62,
37540,
27691,
1136,
62,
17080,
3890,
10786,
28241,
14208,
62,
1173,
831,
388,
27691,
9641,
198,
16341,
... | 2.960784 | 51 |
import numpy
with open ("dic.txt", "w", encoding="utf-8") as dic:
for x in range(5, 790, 1):
if 92 < x <= 113:
dic.write('"'+str(x)+'"'+":"+ '"'+'1'+'",')
elif 113 < x <= 133:
dic.write('"'+str(x)+'"'+":"+ '"'+'2'+'",')
elif 133 < x <= 153:
dic.write('"'+str(x)+'"'+":"+ '"'+'3'+'",')
elif 153 < x <= 173:
dic.write('"'+str(x)+'"'+":"+ '"'+'4'+'",')
elif 173 < x <= 193:
dic.write('"'+str(x)+'"'+":"+ '"'+'5'+'",')
elif 193 < x <= 213:
dic.write('"'+str(x)+'"'+":"+ '"'+'6'+'",')
elif 213 < x <= 233:
dic.write('"'+str(x)+'"'+":"+ '"'+'7'+'",')
elif 233 < x <= 253:
dic.write('"'+str(x)+'"'+":"+ '"'+'8'+'",')
elif 253 < x <= 273:
dic.write('"'+str(x)+'"'+":"+ '"'+'9'+'",')
elif 273 < x <= 293:
dic.write('"'+str(x)+'"'+":"+ '"'+'10'+'",')
elif 293 < x <= 313:
dic.write('"'+str(x)+'"'+":"+ '"'+'11'+'",')
elif 313 < x <= 333:
dic.write('"'+str(x)+'"'+":"+ '"'+'12'+'",')
elif 333 < x <= 353:
dic.write('"'+str(x)+'"'+":"+ '"'+'13'+'",')
elif 353 < x <= 373:
dic.write('"'+str(x)+'"'+":"+ '"'+'14'+'",')
elif 373 < x <= 393:
dic.write('"'+str(x)+'"'+":"+ '"'+'15'+'",')
elif 393 < x <= 413:
dic.write('"'+str(x)+'"'+":"+ '"'+'16'+'",')
elif 413 < x <= 433:
dic.write('"'+str(x)+'"'+":"+ '"'+'17'+'",')
elif 433 < x <= 453:
dic.write('"'+str(x)+'"'+":"+ '"'+'18'+'",')
elif 453 < x <= 473:
dic.write('"'+str(x)+'"'+":"+ '"'+'19'+'",')
elif 473 < x <= 493:
dic.write('"'+str(x)+'"'+":"+ '"'+'20'+'",')
elif 493 < x <= 513:
dic.write('"'+str(x)+'"'+":"+ '"'+'21'+'",')
elif 513 < x <= 533:
dic.write('"'+str(x)+'"'+":"+ '"'+'22'+'",')
elif 533 < x <= 553:
dic.write('"'+str(x)+'"'+":"+ '"'+'23'+'",')
elif 553 < x <= 573:
dic.write('"'+str(x)+'"'+":"+ '"'+'24'+'",')
elif 573 < x <= 593:
dic.write('"'+str(x)+'"'+":"+ '"'+'25'+'",')
elif 593 < x <= 613:
dic.write('"'+str(x)+'"'+":"+ '"'+'26'+'",')
elif 613 < x <= 633:
dic.write('"'+str(x)+'"'+":"+ '"'+'27'+'",')
elif 633 < x <= 653:
dic.write('"'+str(x)+'"'+":"+ '"'+'28'+'",')
elif 653 < x <= 673:
dic.write('"'+str(x)+'"'+":"+ '"'+'29'+'",')
elif 673 < x <= 693:
dic.write('"'+str(x)+'"'+":"+ '"'+'30'+'",')
elif 693 < x <= 713:
dic.write('"'+str(x)+'"'+":"+ '"'+'31'+'",')
elif 713 < x <= 733:
dic.write('"'+str(x)+'"'+":"+ '"'+'32'+'",')
elif 733 < x <= 753:
dic.write('"'+str(x)+'"'+":"+ '"'+'33'+'",')
elif 753 < x <= 773:
dic.write('"'+str(x)+'"'+":"+ '"'+'34'+'",')
elif 773 < x <= 793:
dic.write('"'+str(x)+'"'+":"+ '"'+'35'+'",')
elif 4 < x <= 15:
dic.write('"'+str(x)+'"'+":"+ '"'+'36'+'",')
elif 15 < x <= 25:
dic.write('"'+str(x)+'"'+":"+ '"'+'37'+'",')
elif 25 < x <= 35:
dic.write('"'+str(x)+'"'+":"+ '"'+'38'+'",')
elif 35 < x <= 45:
dic.write('"'+str(x)+'"'+":"+ '"'+'39'+'",')
elif 45 < x <= 55:
dic.write('"'+str(x)+'"'+":"+ '"'+'40'+'",')
elif 55 < x <= 65:
dic.write('"'+str(x)+'"'+":"+ '"'+'41'+'",')
elif 65 < x <= 75:
dic.write('"'+str(x)+'"'+":"+ '"'+'42'+'",')
elif 75 < x <= 85:
dic.write('"'+str(x)+'"'+":"+ '"'+'43'+'",')
elif 85 < x <= 92:
dic.write('"'+str(x)+'"'+":"+ '"'+'44'+'",')
with open ("time.txt", "w", encoding="utf-8") as duree:
for y in numpy.arange(0, 1.7, 0.01):
if 0 < y <= 0.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'80'+'",')
elif 0.1 < y <= 0.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'81'+'",')
elif 0.2 < y <= 0.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'82'+'",')
elif 0.3 < y <= 0.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'83'+'",')
elif 0.4 < y <= 0.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'84'+'",')
elif 0.5 < y <= 0.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'85'+'",')
elif 0.6 < y <= 0.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'86'+'",')
elif 0.7 < y <= 0.8:
duree.write('"'+str(y)+'"'+":"+ '"'+'87'+'",')
elif 0.8 < y <= 0.9:
duree.write('"'+str(y)+'"'+":"+ '"'+'88'+'",')
elif 0.9 < y <= 1:
duree.write('"'+str(y)+'"'+":"+ '"'+'89'+'",')
elif 1 < y <= 1.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'90'+'",')
elif 1.1 < y <= 1.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'91'+'",')
elif 1.2 < y <= 1.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'92'+'",')
elif 1.3 < y <= 1.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'93'+'",')
elif 1.4 < y <= 1.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'94'+'",')
elif 1.5 < y <= 1.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'95'+'",')
elif 1.6 < y <= 1.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'96'+'",') | [
11748,
299,
32152,
201,
198,
4480,
1280,
5855,
67,
291,
13,
14116,
1600,
366,
86,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
288,
291,
25,
201,
198,
201,
198,
220,
220,
220,
329,
2124,
287,
2837,
7,
20,
11,
767,
3829,
11,
352,
... | 1.534141 | 3,632 |
# -*- coding: utf-8 -*-
'''
=============
scrim.globals
=============
Defines variables passed into the python script via Environment Variables by
scrim scripts. If SCRIM_SHELL is None, then the python script was not executed
by a scrim script.
SHELLS (list): list of available shells
SCRIM_SHELL (str): Parent shell, one of the above SHELLS
SCRIM_PATH (str): Path to output shell script
SCRIM_AUTO_WRITE (bool): Write to SCRIM_PATH when python exits?
SCRIM_SCRIPT (str): Path to the scrim script that invoked python
SCRIM_DEBUG (bool): Is scrim script running in debug mode?
'''
from __future__ import absolute_import
import os
__all__ = [
'SHELLS', 'SCRIM_SHELL', 'SCRIM_PATH', 'SCRIM_AUTO_WRITE',
'SCRIM_SCRIPT', 'SCRIM_DEBUG'
]
SHELLS = [
'powershell.exe',
'cmd.exe',
'bash'
]
SCRIM_SHELL = os.environ.get('SCRIM_SHELL', None)
SCRIM_PATH = os.environ.get('SCRIM_PATH', None)
SCRIM_AUTO_WRITE = bool(os.environ.get('SCRIM_AUTO_WRITE', False))
SCRIM_SCRIPT = os.environ.get('SCRIM_SCRIPT', None)
SCRIM_DEBUG = bool(os.environ.get('SCRIM_DEBUG', False))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
25609,
28,
198,
1416,
3036,
13,
4743,
672,
874,
198,
25609,
28,
198,
7469,
1127,
9633,
3804,
656,
262,
21015,
4226,
2884,
9344,
15965,
2977,
416,
198,
14... | 2.548611 | 432 |
import csnd6
| [
11748,
50115,
358,
21,
198,
220,
220,
220,
220,
220,
220,
220,
220
] | 1.615385 | 13 |
# -*- coding: utf-8 -*-
"""Query the Google Books (JSON API v1) service for metadata."""
import logging
from .dev import stdmeta
from .dev._bouth23 import u
from .dev._exceptions import ISBNNotConsistentError, RecordMappingError
from .dev.webquery import query as wquery
UA = 'isbnlib (gzip)'
SERVICE_URL = (
'https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}'
'&fields=items/volumeInfo(title,subtitle,authors,publisher,publishedDate,'
'language,industryIdentifiers)&maxResults=1')
LOGGER = logging.getLogger(__name__)
# pylint: disable=broad-except
def _mapper(isbn, records):
"""Mapp: canonical <- records."""
# canonical: ISBN-13, Title, Authors, Publisher, Year, Language
try:
canonical = {}
canonical['ISBN-13'] = u(isbn)
title = records.get('title', u('')).replace(' :', ':')
subtitle = records.get('subtitle', u(''))
title = title + ' - ' + subtitle if subtitle else title
canonical['Title'] = title
canonical['Authors'] = records.get('authors', [u('')])
# see issue #64
canonical['Publisher'] = records.get('publisher', u('')).strip('"')
if 'publishedDate' in records and len(records['publishedDate']) >= 4:
canonical['Year'] = records['publishedDate'][0:4]
else: # pragma: no cover
canonical['Year'] = u('')
canonical['Language'] = records.get('language', u(''))
except Exception: # pragma: no cover
LOGGER.debug('RecordMappingError for %s with data %s', isbn, records)
raise RecordMappingError(isbn)
# call stdmeta for extra cleanning and validation
return stdmeta(canonical)
def _records(isbn, data):
"""Classify (canonically) the parsed data."""
# put the selected data in records
try:
recs = data['items'][0]['volumeInfo']
except Exception: # pragma: no cover
# don't raise exception!
LOGGER.debug('No data from "goob" for isbn %s', isbn)
return {}
# consistency check (isbn request = isbn response)
if recs:
ids = recs.get('industryIdentifiers', '')
if u('ISBN_13') in repr(ids) and isbn not in repr(
ids): # pragma: no cover
LOGGER.debug('ISBNNotConsistentError for %s (%s)', isbn, repr(ids))
raise ISBNNotConsistentError('{0} not in {1}'.format(
isbn, repr(ids)))
else:
return {} # pragma: no cover
# map canonical <- records
return _mapper(isbn, recs)
def query(isbn):
"""Query the Google Books (JSON API v1) service for metadata."""
data = wquery(SERVICE_URL.format(isbn=isbn), user_agent=UA)
return _records(isbn, data)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
20746,
262,
3012,
13661,
357,
40386,
7824,
410,
16,
8,
2139,
329,
20150,
526,
15931,
198,
198,
11748,
18931,
198,
198,
6738,
764,
7959,
1330,
14367,
28961,
198,
6... | 2.493519 | 1,080 |
# [LICENSE]
# Copyright (c) 2020, Alliance for Sustainable Energy.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or
# promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# [/LICENSE]
import pytest
| [
2,
685,
43,
2149,
24290,
60,
198,
2,
15069,
357,
66,
8,
12131,
11,
10302,
329,
45276,
6682,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
220,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
220,
198,
2,
351,
... | 3.394191 | 482 |
import os
import time
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina.logging.profile import TimeContext
from jina_commons.indexers.dump import import_metas, import_vectors
from ..postgres_indexer import PostgreSQLStorage
from ..postgreshandler import doc_without_embedding
d_embedding = np.array([1, 1, 1, 1, 1, 1, 1])
c_embedding = np.array([2, 2, 2, 2, 2, 2, 2])
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
def validate_db_side(postgres_indexer, expected_data):
ids, vecs, metas = zip(*expected_data)
with postgres_indexer.handler as handler:
cursor = handler.connection.cursor()
cursor.execute(
f'SELECT doc_id, embedding, doc from {postgres_indexer.table} ORDER BY '
f'doc_id::int'
)
record = cursor.fetchall()
for i in range(len(expected_data)):
np.testing.assert_equal(ids[i], str(record[i][0]))
embedding = np.frombuffer(record[i][1], dtype=postgres_indexer.dump_dtype)
np.testing.assert_equal(vecs[i], embedding)
np.testing.assert_equal(metas[i], bytes(record[i][2]))
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[1] / 'config.yml'), override_with={'dry_run': True}
)
assert ex.username == 'postgres'
def test_postgres_shard_distribution():
assert ['0'] == PostgreSQLStorage._vshards_to_get(0, 3, 5)
assert ['1'] == PostgreSQLStorage._vshards_to_get(1, 3, 5)
assert ['2', '3', '4'] == PostgreSQLStorage._vshards_to_get(2, 3, 5)
assert [str(s) for s in range(5)] == PostgreSQLStorage._vshards_to_get(0, 1, 5)
with pytest.raises(ValueError):
PostgreSQLStorage._vshards_to_get(1, 1, 5)
| [
11748,
28686,
198,
11748,
640,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
474,
1437,
1330,
16854,
11,
16854,
19182,
11,
8393,
38409,
11,
27782,
198,
6738,
474,
1437,
... | 2.383929 | 784 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
2,
13610,
534,
4981,
994,
13,
628,
628,
628,
198
] | 3.368421 | 19 |
from WhatSender.sender import SendMessage
| [
6738,
1867,
50,
2194,
13,
82,
2194,
1330,
16290,
12837,
198
] | 3.818182 | 11 |
import pickle
from time import sleep
import googleapiclient.errors
from transliterate import translit
from logs.logging import get_logger
from api_google.google_api_sheets import get_sheets_service, get_multiple_ranges
from api_google.google_api_directory import get_directory_service, get_users_for_domain, \
get_groups_for_domain, create_group, add_user_to_group
from api_google.google_api_groupsettings import get_groupsettings_service, \
get_group_settings, update_group_settings
from config.config import sync_sheets_and_groups, path_data_directory
if __name__ == '__main__':
main()
| [
11748,
2298,
293,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
23645,
499,
291,
75,
1153,
13,
48277,
198,
6738,
4779,
2676,
378,
1330,
4779,
270,
198,
198,
6738,
17259,
13,
6404,
2667,
1330,
651,
62,
6404,
1362,
198,
6738,
40391,
62,
... | 3.235294 | 187 |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'gui.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from .matplotlibwidget import MatplotlibWidget
from .icons_rc import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
29113,
29113,
14468,
198,
2235,
5178,
7560,
422,
3555,
12454,
2393,
705,
48317,
13,
9019,
6,
198,
2235,
198,
2235,
15622,
416,
25,
33734,
11787,
26491,
3082,
5329,
... | 4.052239 | 134 |
#!/usr/bin/env python
# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Lidar/BB check for CARLA
This script obtains the LiDAR's point cloud corresponding to all the vehicles
of the scene and make sure that they are inside the bounding box of the
corresponding actor.
This is done in a predefined route in Town03 with a high speed and several agressive
turns.
In a nutshell, the script have a queue that is filled in each frame with a lidar point
cloud and an structure for storing the Bounding Boxes. This last one is emulated as a
sensor filling the queue in the on_tick callback of the carla.world. In this way, we make
sure that we are correctly syncronizing the lidar point cloud and BB/actor transformations.
Then, we select the points corresponding to each actor (car) in the scene and check they
are inside the bounding boxes of that actor, all in each vehicle frame of reference.
Important Data structure description:
+ Lidar data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'semlidar'
- [2] Point cloud in the form of a numpy dictionary with all semantic lidar information
- [3] Global transformation of the sensor
+ Bounding box data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'bb'
- [2] List of actor information: each a tuple with:
- [0] Actor id
- [1] Actor type (blueprint's name)
- [0] Actor's global transformation
- [0] Actor's bounding box
+ ActorTrace class: Takes the Lidar data structure and one actor information and
check if all the data points related with this actor are inside its BB.
This is done in the local coordinate frame of the actor and should be done like:
trace = ActorTrace(actor_info, lidar_data)
trace.process()
trace.check_lidar_data()
"""
import glob
import os
import sys
import numpy as np
from queue import Queue
from queue import Empty
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
# Sensor callback.
# This is where you receive the sensor data and
# process it as you liked and the important part is that,
# at the end, it should include an element into the sensor queue.
CarPropList = [
SpawnCar(carla.Location(x=83, y= -40, z=5), carla.Rotation(yaw=-90), filter= "*lincoln*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -30, z=3), carla.Rotation(yaw=-90), filter= "*ambulance*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -20, z=3), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=120, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=100, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=140, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=160, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*impala*", autopilot=False),
SpawnCar(carla.Location(x=180, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=60, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=80, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=100, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=120, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=140, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*impala*", autopilot=True),
SpawnCar(carla.Location(x=160, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*prius*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +20,z=2), carla.Rotation(yaw=+90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +40,z=2), carla.Rotation(yaw=+90), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +80,z=2), carla.Rotation(yaw=+90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -40,z=2), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -20,z=2), carla.Rotation(yaw=-90), filter= "*mkz2017*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +00,z=2), carla.Rotation(yaw=-90), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +20,z=2), carla.Rotation(yaw=-90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +40,z=2), carla.Rotation(yaw=-90), filter= "*charger2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +60,z=2), carla.Rotation(yaw=-90), filter= "*lincoln2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +80,z=2), carla.Rotation(yaw=-90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+100,z=2), carla.Rotation(yaw=-90), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+120,z=2), carla.Rotation(yaw=-90), filter= "*wrangler_rubicon*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+140,z=2), carla.Rotation(yaw=-90), filter= "*c3*", autopilot=True)
]
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print(' - Exited by user.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
66,
8,
12131,
13851,
19009,
3337,
357,
34,
15922,
8,
379,
262,
26986,
270,
265,
5231,
261,
6086,
390,
198,
2,
15142,
357,
52,
6242,
737,
198,
2,
198,
2,
770,
... | 2.481322 | 2,329 |
#!/usr/bin/env python3
""" Text files created on DOS/Windows machines have different line endings than
files created on Unix/Linux. DOS uses carriage return and new line ("\r\n")
as a line ending, while Unix uses just new line ("\n"). The purpose of this
script is to have a quick, on the go, shell friendly solution to convert one
to the other.
"""
import sys
import argparse
def main():
""" Removes error traceback clutter and converts files specified.
"""
sys.tracebacklimit = 0
args = commands()
for filename in args.filenames:
convert(filename, args.dos)
def commands():
""" Sets up command line arguments and improper argument error handling.
Returns:
parser (object)
"""
parser = argparse.ArgumentParser()
parser.add_argument('-dos', action='store_true',
help="converts file to DOS")
parser.add_argument('filenames', metavar='filename',
type=str, nargs='+', help="file to be converted")
return parser.parse_args()
def convert(filename, flag):
""" Converts the file's line endings appropriately.
Args:
filename (string): the file being converted
flag (bool): defaults to UNIX. If flag is true, converts line endings to DOS
"""
unix, dos = '\n', '\r\n'
style = 'UNIX'
with open(filename, 'rb') as f:
content = f.read().decode('UTF-8')
if flag:
style = 'DOS'
content = content.replace(unix, dos)
else:
content = content.replace(dos, unix)
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("converting file '{}' to {} ...".format(filename, style))
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
8255,
3696,
2727,
319,
43036,
14,
11209,
8217,
423,
1180,
1627,
38168,
621,
198,
220,
220,
220,
3696,
2727,
319,
33501,
14,
19314,
13,
43036,
3544,
25739,
1441,
290,
64... | 2.585612 | 695 |
import sys
from moviepy.editor import *
clip = VideoFileClip(sys.argv[1])
audioclip = clip.audio
audioclip.write_audiofile(sys.argv[2])
| [
11748,
25064,
198,
6738,
3807,
9078,
13,
35352,
1330,
1635,
198,
198,
15036,
796,
7623,
8979,
2601,
541,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
198,
31330,
38679,
541,
796,
10651,
13,
24051,
198,
31330,
38679,
541,
13,
13564,
62,
... | 2.603774 | 53 |
# -*- coding: utf-8 -*-
"""Mix-in classes for `Qt` types."""
from ._mixins import (
OQAbstractItemModelMixin,
OQAbstractItemViewMixin,
OQObjectMixin,
OQWidgetMixin,
)
from ._views import OQListViewMixin
__all__ = [
"OQObjectMixin",
"OQWidgetMixin",
"OQAbstractItemModelMixin",
"OQAbstractItemViewMixin",
"OQListViewMixin",
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
35608,
12,
259,
6097,
329,
4600,
48,
83,
63,
3858,
526,
15931,
198,
198,
6738,
47540,
19816,
1040,
1330,
357,
198,
220,
220,
220,
440,
48,
23839,
7449,
17633,
3... | 2.240741 | 162 |
#-*-coding:utf8-*-
import logging
from datetime import datetime
| [
2,
12,
9,
12,
66,
7656,
25,
40477,
23,
12,
9,
12,
198,
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198
] | 2.782609 | 23 |
# Oliver Keen
# Software Engineering 001
# jumble_solver.py
# 2/17/2021
# Assignment:
# Consider the game "Jumble"
# https://www.sandiegouniontribune.com/sd-jumble-daily-htmlstory.html
# Create a Python program to find the individual words in Jumble puzzles such
# that INJURE prints after entering the following: solve("JNUIER")
from PyDictionary import PyDictionary # Installation: pip install PyDictionary
from math import factorial
from random import shuffle
| [
2,
15416,
32444,
198,
2,
10442,
14044,
3571,
16,
198,
2,
474,
10344,
62,
82,
14375,
13,
9078,
198,
2,
362,
14,
1558,
14,
1238,
2481,
198,
198,
2,
50144,
25,
198,
2,
12642,
262,
983,
366,
41,
10344,
1,
198,
2,
3740,
1378,
2503,
... | 3.549618 | 131 |
# Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MycroftAI/mycroft-skills-manager).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import logging
import os
import shutil
import subprocess
import yaml
from contextlib import contextmanager
from difflib import SequenceMatcher
from functools import wraps
from git import Repo, GitError
from git.exc import GitCommandError
from lazy import lazy
from os.path import exists, join, basename, dirname, isfile
from shutil import rmtree, move
from subprocess import PIPE, Popen
from tempfile import mktemp, gettempdir
from threading import Lock
from typing import Callable
from pako import PakoManager
from msm import SkillRequirementsException, git_to_msm_exceptions
from msm.exceptions import PipRequirementsException, \
SystemRequirementsException, AlreadyInstalled, SkillModified, \
AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException
from msm.util import cached_property, Git
LOG = logging.getLogger(__name__)
# Branches which can be switched from when updating
# TODO Make this configurable
SWITCHABLE_BRANCHES = ['master']
# default constraints to use if no are given
DEFAULT_CONSTRAINTS = '/etc/mycroft/constraints.txt'
FIVE_MINUTES = 300
def _backup_previous_version(func: Callable = None):
"""Private decorator to back up previous skill folder"""
return wrapper
def attach(self, remote_entry):
"""Attach a remote entry to a local entry"""
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self
def match(self, query, author=None):
search, search_tokens, search_common = self._extract_tokens(
query, ['skill', 'fallback', 'mycroft']
)
name, name_tokens, name_common = self._extract_tokens(
self.name, ['skill', 'fallback', 'mycroft']
)
weights = [
(9, self._compare(name, search)),
(9, self._compare(name.split(' '), search_tokens)),
(2, self._compare(name_common, search_common)),
]
if author:
author_weight = self._compare(self.author, author)
weights.append((5, author_weight))
author_weight = author_weight
else:
author_weight = 1.0
return author_weight * (
sum(weight * val for weight, val in weights) /
sum(weight for weight, val in weights)
)
def run_pip(self, constraints=None):
if not self.dependent_python_packages:
return False
# Use constraints to limit the installed versions
if constraints and not exists(constraints):
LOG.error('Couldn\'t find the constraints file')
return False
elif exists(DEFAULT_CONSTRAINTS):
constraints = DEFAULT_CONSTRAINTS
LOG.info('Installing requirements.txt for ' + self.name)
can_pip = os.access(dirname(sys.executable), os.W_OK | os.X_OK)
pip_args = [sys.executable, '-m', 'pip', 'install']
if constraints:
pip_args += ['-c', constraints]
if not can_pip:
pip_args = ['sudo', '-n'] + pip_args
with self.pip_lock:
"""
Iterate over the individual Python packages and
install them one by one to enforce the order specified
in the manifest.
"""
for dependent_python_package in self.dependent_python_packages:
pip_command = pip_args + [dependent_python_package]
proc = Popen(pip_command, stdout=PIPE, stderr=PIPE)
pip_code = proc.wait()
if pip_code != 0:
stderr = proc.stderr.read().decode()
if pip_code == 1 and 'sudo:' in stderr and pip_args[0] == 'sudo':
raise PipRequirementsException(
2, '', 'Permission denied while installing pip '
'dependencies. Please run in virtualenv or use sudo'
)
raise PipRequirementsException(
pip_code, proc.stdout.read().decode(), stderr
)
return True
def install_system_deps(self):
self.run_requirements_sh()
system_packages = {
exe: (packages or '').split()
for exe, packages in self.dependent_system_packages.items()
}
LOG.info('Installing system requirements...')
all_deps = system_packages.pop('all', [])
try:
manager = PakoManager()
success = manager.install(all_deps, overrides=system_packages)
except RuntimeError as e:
LOG.warning('Failed to launch package manager: {}'.format(e))
success = False
missing_exes = [
exe for exe in self.dependencies.get('exes') or []
if not shutil.which(exe)
]
if missing_exes:
if not success:
LOG.warning('Failed to install dependencies.')
if all_deps:
LOG.warning('Please install manually: {}'.format(
' '.join(all_deps)
))
raise SkillRequirementsException('Could not find exes: {}'.format(
', '.join(missing_exes)
))
return success
def run_requirements_sh(self):
setup_script = join(self.path, "requirements.sh")
if not exists(setup_script):
return False
with work_dir(self.path):
rc = subprocess.call(["bash", setup_script])
if rc != 0:
LOG.error("Requirements.sh failed with error code: " + str(rc))
raise SystemRequirementsException(rc)
LOG.info("Successfully ran requirements.sh for " + self.name)
return True
def run_skill_requirements(self):
if not self.msm:
raise ValueError('Pass msm to SkillEntry to install skill deps')
try:
for skill_dep in self.dependent_skills:
LOG.info("Installing skill dependency: {}".format(skill_dep))
try:
self.msm.install(skill_dep)
except AlreadyInstalled:
pass
except Exception as e:
raise SkillRequirementsException(e)
def verify_info(self, info, fmt):
if not info:
return
if not isinstance(info, type(fmt)):
LOG.warning('Invalid value type manifest.yml for {}: {}'.format(
self.name, type(info)
))
return
if not isinstance(info, dict) or not fmt:
return
for key in info:
if key not in fmt:
LOG.warning('Unknown key in manifest.yml for {}: {}'.format(
self.name, key
))
continue
self.verify_info(info[key], fmt[key])
def __repr__(self):
return '<SkillEntry {}>'.format(' '.join(
'{}={}'.format(attr, self.__dict__[attr])
for attr in ['name', 'author', 'is_local']
))
| [
2,
15069,
357,
66,
8,
2864,
2011,
36714,
9552,
11,
3457,
13,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
2011,
36714,
20389,
9142,
198,
2,
357,
3826,
3740,
1378,
12567,
13,
785,
14,
3666,
36714,
20185,
14,
1820,
36714,
12,
8135,
217... | 2.31523 | 3,480 |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase
| [
37811,
198,
220,
220,
220,
19498,
988,
55,
7378,
47,
25,
383,
19498,
988,
1395,
7378,
47,
10074,
198,
220,
220,
220,
15069,
357,
34,
8,
2321,
32607,
2271,
417,
327,
13,
45954,
11,
18990,
449,
13,
51,
13,
40275,
198,
220,
220,
220,... | 2.811765 | 85 |
#tshark -r input.pcap -qz "follow,tcp,raw,0"
import struct
import sys
import binascii
import subprocess
result = subprocess.Popen( ["tshark", "-r", sys.argv[1], "-qz", "follow,tcp,raw,0"],
stdout=subprocess.PIPE)
sys.stdout.buffer.write(b"FPC\x80")
for i in range(4):
result.stdout.readline()
dp=result.stdout.readline().split(b":")[2]
sp=result.stdout.readline().split(b":")[2]
sys.stdout.buffer.write(struct.pack('>H', int(sp)))
sys.stdout.buffer.write(struct.pack('>H', int(dp)))
for l in result.stdout.readlines():
s2c = 0
if l[0] == 9:
l = l[1:]
s2c = 1
try:
r = binascii.unhexlify(l[:-1])
except:
continue
sys.stdout.buffer.write(struct.pack('>B', int(s2c)))
sys.stdout.buffer.write(r)
sys.stdout.buffer.write(b"FPC0")
| [
2,
912,
71,
668,
532,
81,
5128,
13,
79,
11128,
532,
80,
89,
366,
27780,
11,
83,
13155,
11,
1831,
11,
15,
1,
198,
11748,
2878,
198,
11748,
25064,
198,
11748,
9874,
292,
979,
72,
198,
11748,
850,
14681,
628,
198,
20274,
796,
850,
... | 2.047382 | 401 |
"""
Created: November 11, 2020
Author: Kyle Koeller
Python Version 3.9
This program is meant to make the process of collecting the different filters from AIJ excel spreadsheets faster.
The user enters however many nights they have and the program goes through and checks those text files for the
different columns for,HJD, Amag, and Amag error for the B and V filters.
The program will also calculate the R magnitude from the rel flux of T1.
There are error catching statements within the program so if the user mistypes, the program will not crash and
close on them.
"""
import pandas as pd
from os import path
def get_filters(n):
"""
Takes a number of nights for a given filter and takes out the HJD, either A_Mag1 or T1_flux, and
error for mag or flux
:param n: Number of observation nights
:param f: The filter letter being used
:return: the output text files for each night in a given filter
"""
total_hjd = []
total_amag = []
total_error = []
# checks for either the b, v, r filter as either upper or lowercase will work
for i in range(n):
while True:
# makes sure the file pathway is real and points to some file
# (does not check if that file is the correct one though)
try:
# an example pathway for the files
# E:\Research\Data\NSVS_254037\2018.10.12-reduced\Check\V\2018.10.12.APASS.V_measurements.txt
file = input("Enter night %d file path: " % (i+1))
if path.exists(file):
break
else:
continue
except FileNotFoundError:
print("Please enter a correct file path")
# noinspection PyUnboundLocalVariable
df = pd.read_csv(file, delimiter="\t")
# set parameters to lists from the file by the column header
hjd = []
amag = []
amag_error = []
try:
hjd = list(df["HJD"])
amag = list(df["Source_AMag_T1"])
amag_error = list(df["Source_AMag_Err_T1"])
except KeyError:
print("The file you entered does not have the columns of HJD, Source_AMag_T1, or Source_AMag_Err_T1. "
"Please re-enter the file path and make sure its the correct file.")
c = 1
main(c)
total_hjd.append(hjd)
total_amag.append(amag)
total_error.append(amag_error)
# converts the Dataframe embedded lists into a normal flat list
new_hjd = [item for elem in total_hjd for item in elem]
new_amag = [item for elem in total_amag for item in elem]
new_error = [item for elem in total_error for item in elem]
# outputs the new file to dataframe and then into a text file for use in Peranso or PHOEBE
data = pd.DataFrame({
"HJD": new_hjd,
"AMag": new_amag,
"AMag Error": new_error
})
print("")
output = input("What is the file output name (with file extension .txt): ")
data.to_csv(output, index=False, header=False, sep='\t')
print("")
print("Fished saving the file to the same location as this program.")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
count = 0
main(count)
| [
37811,
198,
41972,
25,
3389,
1367,
11,
12131,
198,
13838,
25,
14316,
17634,
12368,
198,
37906,
10628,
513,
13,
24,
198,
198,
1212,
1430,
318,
4001,
284,
787,
262,
1429,
286,
13157,
262,
1180,
16628,
422,
9552,
41,
27336,
4104,
42011,
... | 2.47705 | 1,329 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-02 05:23
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
16,
319,
2177,
12,
2931,
12,
2999,
8870,
25,
1954,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.690909 | 55 |
CHARACTERS_PER_LINE = 39
if __name__ == '__main__':
print(break_lines('The <y<Spirit of the Sword>> guides the goddess\' chosen hero to <r<Skyloft Village>>'))
print(break_lines('Hey, you look like you have a Questions?'))
print(break_lines('Skyloft Peater/Peatrice\'s Crystals has Bug Net'))
| [
38019,
10659,
4877,
62,
18973,
62,
24027,
796,
5014,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
9032,
62,
6615,
10786,
464,
1279,
88,
27,
41910,
286,
262,
11535,
4211,
17555,
... | 2.851852 | 108 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
2321,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
... | 3.197531 | 81 |
"""Loading MNIST dataset.
"""
import struct
import numpy as np
if __name__ == "__main__":
import random
import matplotlib.pyplot as plt
# Local MNIST data
MNIST_PATH = "../../machine-learning/data/mnist/"
_test()
_test_numpy()
| [
37811,
19031,
29060,
8808,
27039,
13,
198,
37811,
198,
198,
11748,
2878,
198,
11748,
299,
32152,
355,
45941,
628,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1330,
4738,
198,
2... | 2.578431 | 102 |
import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
"""
REINFORCE Threads
"""
| [
11748,
4704,
278,
11,
16834,
11,
640,
11,
28686,
11,
2298,
293,
198,
2,
422,
16834,
1330,
4670,
518,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
264,
1501,
316,
62,
8671,
18,
13,
11321,
... | 2.965909 | 88 |
from flask import Flask, request, render_template
from sklearn.externals import joblib
from feature import *
pipeline = joblib.load('pipeline.sav')
app = Flask(__name__)
if __name__ == '__main__':
app.run(port=8080, debug=True)
| [
6738,
42903,
1330,
46947,
11,
2581,
11,
8543,
62,
28243,
198,
6738,
1341,
35720,
13,
1069,
759,
874,
1330,
1693,
8019,
198,
6738,
3895,
1330,
1635,
628,
198,
79,
541,
4470,
796,
1693,
8019,
13,
2220,
10786,
79,
541,
4470,
13,
39308,
... | 2.833333 | 84 |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
| [
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
2,
1398,
7343,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
2100,
796,
2124,... | 2.193548 | 62 |
# -*- coding: utf-8 -*-
"""
Create: 2014/5/20
Update: 2017/11/22
"""
from .WXError import *
from .WXMenu import *
from .WXUtils import *
from .event import *
from .request import MPCenter
__date__ = '2017/3/12'
__version__ = '1.0.1'
__license__ = 'The MIT License'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
16447,
25,
1946,
14,
20,
14,
1238,
198,
10260,
25,
2177,
14,
1157,
14,
1828,
198,
37811,
198,
198,
6738,
764,
54,
55,
12331,
1330,
1635,
198,
6738,
... | 2.458716 | 109 |
def is_field(token):
"""Checks if the token is a valid ogc type field
"""
return token in ["name", "description", "encodingType", "location", "properties", "metadata",
"definition", "phenomenonTime", "resultTime", "observedArea", "result", "id", "@iot.id",
"resultQuality", "validTime", "time", "parameters", "feature"]
def tokenize_parentheses(tokens):
""" Finds non parsed parentheses in tokens (ex.: ['x(y']['z)'] -> ['x']['(']['y']['z'][')']
:param tokens: a list of tokens
:return: the list with unchecked parenteses tokenized
"""
for index, token in enumerate(tokens):
if ("(" in token or ")" in token) and len(token) > 1:
parenthesis_index = token.find("(")
parenthesis = "("
if parenthesis_index < 0:
parenthesis_index = token.find(")")
parenthesis = ")"
left_side = token[:parenthesis_index]
right_side = token[parenthesis_index + 1:]
del tokens[index]
if bool(left_side):
tokens.insert(index, left_side)
index += 1
tokens.insert(index, parenthesis)
if bool(right_side):
index += 1
tokens.insert(index, right_side)
| [
4299,
318,
62,
3245,
7,
30001,
2599,
198,
220,
220,
220,
37227,
7376,
4657,
611,
262,
11241,
318,
257,
4938,
267,
36484,
2099,
2214,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
1441,
11241,
287,
14631,
3672,
1600,
366,
11213,
1600,... | 2.18802 | 601 |
# -*- coding: utf-8 -*-
'''
A runner to access data from the salt mine
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python Libs
import logging
# Import salt libs
import salt.utils.minions
log = logging.getLevelName(__name__)
def get(tgt, fun, tgt_type='glob'):
'''
Gathers the data from the specified minions' mine, pass in the target,
function to look up and the target type
CLI Example:
.. code-block:: bash
salt-run mine.get '*' network.interfaces
'''
ret = salt.utils.minions.mine_get(tgt, fun, tgt_type, __opts__)
return ret
def update(tgt,
tgt_type='glob',
clear=False,
mine_functions=None):
'''
.. versionadded:: 2017.7.0
Update the mine data on a certain group of minions.
tgt
Which minions to target for the execution.
tgt_type: ``glob``
The type of ``tgt``.
clear: ``False``
Boolean flag specifying whether updating will clear the existing
mines, or will update. Default: ``False`` (update).
mine_functions
Update the mine data on certain functions only.
This feature can be used when updating the mine for functions
that require refresh at different intervals than the rest of
the functions specified under ``mine_functions`` in the
minion/master config or pillar.
CLI Example:
.. code-block:: bash
salt-run mine.update '*'
salt-run mine.update 'juniper-edges' tgt_type='nodegroup'
'''
ret = __salt__['salt.execute'](tgt,
'mine.update',
tgt_type=tgt_type,
clear=clear,
mine_functions=mine_functions)
return ret
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
32,
17490,
284,
1895,
1366,
422,
262,
8268,
6164,
198,
7061,
6,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
28000,
1098,... | 2.340153 | 782 |
from plaid import Client
from backend.link_token import LinkToken
from general_falcon_webserver import WebApp
client = Client(client_id='5e2e3527dd6924001167e8e8', secret='0b89f518880456b6f60020f481b3d7', environment='sandbox')
app = WebApp()
app.add_route('link', LinkToken(client))
app.launch_webserver()
| [
6738,
458,
1698,
1330,
20985,
198,
198,
6738,
30203,
13,
8726,
62,
30001,
1330,
7502,
30642,
198,
6738,
2276,
62,
42932,
1102,
62,
732,
1443,
18497,
1330,
5313,
4677,
198,
198,
16366,
796,
20985,
7,
16366,
62,
312,
11639,
20,
68,
17,
... | 2.689655 | 116 |
"""If you're using Mu, this example will plot the light levels from the light sensor (located next
to the eye) on your Circuit Playground. Try shining a flashlight on your Circuit Playground, or
covering the light sensor to see the plot increase and decrease."""
import time
from adafruit_circuitplayground import cp
while True:
print("Light:", cp.light)
print((cp.light,))
time.sleep(0.1)
| [
37811,
1532,
345,
821,
1262,
8252,
11,
428,
1672,
481,
7110,
262,
1657,
2974,
422,
262,
1657,
12694,
357,
75,
10533,
1306,
198,
1462,
262,
4151,
8,
319,
534,
13588,
3811,
2833,
13,
9993,
22751,
257,
38371,
319,
534,
13588,
3811,
2833,... | 3.474138 | 116 |
"""Exception utilities."""
| [
37811,
16922,
20081,
526,
15931,
628,
628,
628
] | 4 | 8 |
#
# Copyright 2018, 2020 Lars Pastewka
# 2019-2020 Antoine Sanner
# 2015-2016 Till Junge
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Tests to understand the difficulties in extracting hurst from noisy data
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import PyCo.Tools as Tools
import SurfaceTopography as Surf
if __name__ == "__main__":
main()
plt.show()
| [
2,
198,
2,
15069,
2864,
11,
12131,
31239,
11303,
413,
4914,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
13130,
12,
42334,
3738,
42722,
2986,
1008,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1853,
12,
... | 3.502415 | 414 |
import threading
import requests
import json
import os
from nose.tools import *
from server import Httpd
app_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app")
| [
11748,
4704,
278,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
9686,
13,
31391,
1330,
1635,
198,
6738,
4382,
1330,
367,
29281,
67,
198,
198,
1324,
62,
6978,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
... | 3.05 | 60 |
from setuptools import setup, find_packages
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyva',
packages=find_packages(),
version='0.4.1',
license='MIT',
description='Simple and flexible python data validation library',
long_description=long_description,
long_description_content_type='text/markdown',
author='Artak',
author_email='artaksafaryanc@gmail.com',
url='https://github.com/holoyan/python-data-validation',
keywords=['data', 'validation', 'validator', 'data validator'],
install_requires=[ # I get to this in a second
'python-dateutil',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
2,
1100,
262,
10154,
286,
534,
20832,
11682,
2393,
198,
6738,
28686,
1330,
3108,
198,
5661,
62,
34945,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
8... | 2.767544 | 456 |
# Generated by Django 3.1.12 on 2021-06-24 18:12
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1065,
319,
33448,
12,
3312,
12,
1731,
1248,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,... | 2.840909 | 44 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from scipy.constants import epsilon_0
from scipy.ndimage.measurements import center_of_mass
from ipywidgets import IntSlider, FloatSlider, FloatText, ToggleButtons
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import LogFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
from pymatsolver import Pardiso
from discretize import TensorMesh
from SimPEG import maps, SolverLU, utils
from SimPEG.utils import ExtractCoreMesh
from SimPEG.electromagnetics.static import resistivity as DC
from ..base import widgetify
# Mesh, mapping can be globals global
npad = 15
growrate = 2.0
cs = 0.5
hx = [(cs, npad, -growrate), (cs, 200), (cs, npad, growrate)]
hy = [(cs, npad, -growrate), (cs, 100)]
mesh = TensorMesh([hx, hy], "CN")
expmap = maps.ExpMap(mesh)
mapping = expmap
dx = 5
xr = np.arange(-40, 41, dx)
dxr = np.diff(xr)
xmin = -40.0
xmax = 40.0
ymin = -40.0
ymax = 8.0
xylim = np.c_[[xmin, ymin], [xmax, ymax]]
indCC, meshcore = ExtractCoreMesh(xylim, mesh)
indx = (
(mesh.gridFx[:, 0] >= xmin)
& (mesh.gridFx[:, 0] <= xmax)
& (mesh.gridFx[:, 1] >= ymin)
& (mesh.gridFx[:, 1] <= ymax)
)
indy = (
(mesh.gridFy[:, 0] >= xmin)
& (mesh.gridFy[:, 0] <= xmax)
& (mesh.gridFy[:, 1] >= ymin)
& (mesh.gridFy[:, 1] <= ymax)
)
indF = np.concatenate((indx, indy))
_cache = {
"A": None,
"B": None,
"dx": None,
"dz": None,
"xc": None,
"zc": None,
"rotAng": None,
"sigplate": None,
"sighalf": None,
}
# The only thing we need to make it work is a 2.5D field object in SimPEG
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
629,
541,
88,
... | 2.372312 | 744 |
try:
__PHASE_DIAGRAMS_IMPORTED__
except NameError:
__PHASE_DIAGRAMS_IMPORTED__= False
if not __PHASE_DIAGRAMS_IMPORTED__:
from .phase_portrait import PhasePortrait
from .funcion1D import Funcion1D
from .nullclines import Nullcline2D
__PHASE_DIAGRAMS_IMPORTED__ = True | [
28311,
25,
198,
220,
220,
220,
11593,
11909,
11159,
62,
35,
3539,
10761,
40834,
62,
3955,
15490,
1961,
834,
198,
16341,
6530,
12331,
25,
198,
220,
220,
220,
11593,
11909,
11159,
62,
35,
3539,
10761,
40834,
62,
3955,
15490,
1961,
834,
... | 2.307087 | 127 |
#!/usr/bin/python
import sys
import os
import shutil
from glob import glob
from PyQt5.QtCore import (Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg,
QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl)
from PyQt5.QtGui import QIcon, QDesktopServices
from PyQt5.QtWidgets import (QApplication, QMessageBox, QMainWindow, QDockWidget, QAction,
QFileDialog, QProgressDialog)
from P13pt.spectrumfitter.dataloader import DataLoader
from P13pt.spectrumfitter.navigator import Navigator
from P13pt.spectrumfitter.fitter import Fitter
from P13pt.spectrumfitter.plotter import Plotter
from P13pt.spectrumfitter.load_fitresults import load_fitresults
from P13pt.params_from_filename import params_from_filename
def msghandler(type, context, message):
if type == QtInfoMsg:
QMessageBox.information(None, 'Info', message)
elif type == QtDebugMsg:
QMessageBox.information(None, 'Debug', message)
elif type == QtCriticalMsg:
QMessageBox.critical(None, 'Critical', message)
elif type == QtWarningMsg:
QMessageBox.warning(None, 'Warning', message)
elif type == QtFatalMsg:
QMessageBox.critical(None, 'Fatal error', message)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
15095,
1330,
15095,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
357,
48,
83,
11,
10662,
15798,
12837,
25060,
... | 2.61167 | 497 |
#!/usr/bin/env python
# encoding=utf-8
#
# lock_tests.py: testing versioned properties
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import re, os, stat, logging
logger = logging.getLogger()
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Helpers
def is_writable(path):
"Raise if PATH is not writable."
check_writability(path, True)
def is_readonly(path):
"Raise if PATH is not readonly."
check_writability(path, False)
######################################################################
# Tests
#----------------------------------------------------------------------
# Each test refers to a section in
# notes/locking/locking-functional-spec.txt
# II.A.2, II.C.2.a: Lock a file in wc A as user FOO and make sure we
# have a representation of it. Checkout wc B as user BAR. Verify
# that user BAR cannot commit changes to the file nor its properties.
def lock_file(sbox):
"lock a file and verify that it's locked"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
err_re = "(svn\: E195022\: File '.*iota' is locked in another)|" + \
"(svn\: E160039: User '?jconstant'? does not own lock on path.*iota')"
svntest.main.run_svn(None, 'update', wc_b)
# -- Try to change a file --
# change the locked file
svntest.main.file_append(file_path_b, "Covert tweak\n")
# attempt (and fail) to commit as user Sally
svntest.actions.run_and_verify_commit(wc_b, None, None, err_re,
'--username',
svntest.main.wc_author2,
'-m', '', file_path_b)
# Revert our change that we failed to commit
svntest.main.run_svn(None, 'revert', file_path_b)
# -- Try to change a property --
# change the locked file's properties
svntest.main.run_svn(None, 'propset', 'sneakyuser', 'Sally', file_path_b)
err_re = "(svn\: E195022\: File '.*iota' is locked in another)|" + \
"(svn\: E160039\: User '?jconstant'? does not own lock on path)"
# attempt (and fail) to commit as user Sally
svntest.actions.run_and_verify_commit(wc_b, None, None, err_re,
'--username',
svntest.main.wc_author2,
'-m', '', file_path_b)
#----------------------------------------------------------------------
# II.C.2.b.[12]: Lock a file and commit using the lock. Make sure the
# lock is released. Repeat, but request that the lock not be
# released. Make sure the lock is retained.
def commit_file_keep_lock(sbox):
"commit a file and keep lock"
sbox.build()
wc_dir = sbox.wc_dir
# lock 'A/mu' as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'))
# make a change and commit it, holding lock
sbox.simple_append('A/mu', 'Tweak!\n')
svntest.main.run_svn(None, 'commit', '-m', '', '--no-unlock',
sbox.ospath('A/mu'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2, writelocked='K')
# Make sure the file is still locked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def commit_file_unlock(sbox):
"commit a file and release lock"
sbox.build()
wc_dir = sbox.wc_dir
# lock A/mu and iota as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'),
sbox.ospath('iota'))
# make a change and commit it, allowing lock to be released
sbox.simple_append('A/mu', 'Tweak!\n')
sbox.simple_commit()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2)
expected_status.tweak('iota', wc_rev=2)
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def commit_propchange(sbox):
"commit a locked file with a prop change"
sbox.build()
wc_dir = sbox.wc_dir
# lock A/mu as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'))
# make a property change and commit it, allowing lock to be released
sbox.simple_propset('blue', 'azul', 'A/mu')
sbox.simple_commit('A/mu')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2)
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# II.C.2.c: Lock a file in wc A as user FOO. Attempt to unlock same
# file in same wc as user BAR. Should fail.
#
# Attempt again with --force. Should succeed.
#
# II.C.2.c: Lock a file in wc A as user FOO. Attempt to unlock same
# file in wc B as user FOO. Should fail.
#
# Attempt again with --force. Should succeed.
def break_lock(sbox):
"lock a file and verify lock breaking behavior"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.main.run_svn(None, 'update', wc_b)
# attempt (and fail) to unlock file
# This should give a "iota' is not locked in this working copy" error
svntest.actions.run_and_verify_svn(None, None, ".*not locked",
'unlock',
file_path_b)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [],
'unlock', '--force',
file_path_b)
#----------------------------------------------------------------------
# II.C.2.d: Lock a file in wc A as user FOO. Attempt to lock same
# file in wc B as user BAR. Should fail.
#
# Attempt again with --force. Should succeed.
#
# II.C.2.d: Lock a file in wc A as user FOO. Attempt to lock same
# file in wc B as user FOO. Should fail.
#
# Attempt again with --force. Should succeed.
def steal_lock(sbox):
"lock a file and verify lock stealing behavior"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.main.run_svn(None, 'update', wc_b)
# attempt (and fail) to lock file
# This should give a "iota' is already locked... error, but exits 0.
svntest.actions.run_and_verify_svn2(None, None,
".*already locked", 0,
'lock',
'-m', 'trying to break', file_path_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', '--force',
'-m', 'trying to break', file_path_b)
#----------------------------------------------------------------------
# II.B.2, II.C.2.e: Lock a file in wc A. Query wc for the
# lock and verify that all lock fields are present and correct.
def examine_lock(sbox):
"examine the fields of a lockfile for correctness"
sbox.build()
# lock a file as wc_author
svntest.actions.run_and_validate_lock(sbox.ospath('iota'),
svntest.main.wc_author)
#----------------------------------------------------------------------
# II.C.1: Lock a file in wc A. Check out wc B. Break the lock in wc
# B. Verify that wc A gracefully cleans up the lock via update as
# well as via commit.
def handle_defunct_lock(sbox):
"verify behavior when a lock in a wc is defunct"
sbox.build()
wc_dir = sbox.wc_dir
# set up our expected status
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# lock the file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', sbox.ospath('iota'))
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
# --- Meanwhile, in our other working copy... ---
# Try unlocking the file in the second wc.
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_path_b)
# update the 1st wc, which should clear the lock there
sbox.simple_update()
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# II.B.1: Set "svn:needs-lock" property on file in wc A. Checkout wc
# B and verify that that file is set as read-only.
#
# Tests propset, propdel, lock, and unlock
def enforce_lock(sbox):
"verify svn:needs-lock read-only behavior"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
lambda_path = sbox.ospath('A/B/lambda')
mu_path = sbox.ospath('A/mu')
# svn:needs-lock value should be forced to a '*'
svntest.actions.set_prop('svn:needs-lock', 'foo', iota_path)
svntest.actions.set_prop('svn:needs-lock', '*', lambda_path)
expected_err = ".*svn: warning: W125005: To turn off the svn:needs-lock property,.*"
svntest.actions.set_prop('svn:needs-lock', ' ', mu_path, expected_err)
# Check svn:needs-lock
svntest.actions.check_prop('svn:needs-lock', iota_path, ['*'])
svntest.actions.check_prop('svn:needs-lock', lambda_path, ['*'])
svntest.actions.check_prop('svn:needs-lock', mu_path, ['*'])
svntest.main.run_svn(None, 'commit',
'-m', '', iota_path, lambda_path, mu_path)
# Now make sure that the perms were flipped on all files
if os.name == 'posix':
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
if ((os.stat(iota_path)[0] & mode)
or (os.stat(lambda_path)[0] & mode)
or (os.stat(mu_path)[0] & mode)):
logger.warn("Setting 'svn:needs-lock' property on a file failed to set")
logger.warn("file mode to read-only.")
raise svntest.Failure
# obtain a lock on one of these files...
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
# ...and verify that the write bit gets set...
if not (os.stat(iota_path)[0] & mode):
logger.warn("Locking a file with 'svn:needs-lock' failed to set write bit.")
raise svntest.Failure
# ...and unlock it...
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
iota_path)
# ...and verify that the write bit gets unset
if (os.stat(iota_path)[0] & mode):
logger.warn("Unlocking a file with 'svn:needs-lock' failed to unset write bit.")
raise svntest.Failure
# Verify that removing the property restores the file to read-write
svntest.main.run_svn(None, 'propdel', 'svn:needs-lock', iota_path)
if not (os.stat(iota_path)[0] & mode):
logger.warn("Deleting 'svn:needs-lock' failed to set write bit.")
raise svntest.Failure
#----------------------------------------------------------------------
# Test that updating a file with the "svn:needs-lock" property works,
# especially on Windows, where renaming A to B fails if B already
# exists and has its read-only bit set. See also issue #2278.
#----------------------------------------------------------------------
# Tests update / checkout with changing props
def defunct_lock(sbox):
"verify svn:needs-lock behavior with defunct lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
iota_path = sbox.ospath('iota')
iota_path_b = sbox.ospath('iota', wc_dir=wc_b)
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
# Set the prop in wc a
sbox.simple_propset('svn:needs-lock', 'foo', 'iota')
# commit r2
sbox.simple_commit('iota')
# update wc_b
svntest.main.run_svn(None, 'update', wc_b)
# lock iota in wc_b
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path_b)
# break the lock iota in wc a
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock', '--force',
'-m', '', iota_path)
# update wc_b
svntest.main.run_svn(None, 'update', wc_b)
# make sure that iota got set to read-only
if (os.stat(iota_path_b)[0] & mode):
logger.warn("Upon removal of a defunct lock, a file with 'svn:needs-lock'")
logger.warn("was not set back to read-only")
raise svntest.Failure
#----------------------------------------------------------------------
# Tests dealing with a lock on a deleted path
def deleted_path_lock(sbox):
"verify lock removal on a deleted path"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
iota_url = sbox.repo_url + '/iota'
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
sbox.simple_rm('iota')
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'--no-unlock',
'-m', '', iota_path)
# Now make sure that we can delete the lock from iota via a URL
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
iota_url)
#----------------------------------------------------------------------
# Tests dealing with locking and unlocking
def lock_unlock(sbox):
"lock and unlock some files"
sbox.build()
wc_dir = sbox.wc_dir
pi_path = sbox.ospath('A/D/G/pi')
rho_path = sbox.ospath('A/D/G/rho')
tau_path = sbox.ospath('A/D/G/tau')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path, rho_path, tau_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_status.tweak('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', writelocked=None)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
pi_path, rho_path, tau_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Tests dealing with directory deletion and locks
def deleted_dir_lock(sbox):
"verify removal of a directory with locks inside"
sbox.build()
wc_dir = sbox.wc_dir
pi_path = sbox.ospath('A/D/G/pi')
rho_path = sbox.ospath('A/D/G/rho')
tau_path = sbox.ospath('A/D/G/tau')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path, rho_path, tau_path)
sbox.simple_rm('A/D/G') # the parent directory
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'--no-unlock',
'-m', '', sbox.ospath('A/D/G'))
#----------------------------------------------------------------------
# III.c : Lock a file and check the output of 'svn stat' from the same
# working copy and another.
def lock_status(sbox):
"verify status of lock in working copy"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
sbox.simple_append('iota', "This is a spreadsheet\n")
sbox.simple_commit('iota')
svntest.main.run_svn(None, 'lock', '-m', '', sbox.ospath('iota'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', wc_rev=2, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Verify status again after modifying the file
sbox.simple_append('iota', 'check stat output after mod')
expected_status.tweak('iota', status='M ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Verify status of lock from another working copy
svntest.main.run_svn(None, 'update', wc_b)
expected_status = svntest.actions.get_virginal_state(wc_b, 2)
expected_status.tweak('iota', writelocked='O')
svntest.actions.run_and_verify_status(wc_b, expected_status)
#----------------------------------------------------------------------
# III.c : Steal lock on a file from another working copy with 'svn lock
# --force', and check the status of lock in the repository from the
# working copy in which the file was initially locked.
def stolen_lock_status(sbox):
"verify status of stolen lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
svntest.main.file_append(file_path, "This is a spreadsheet\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.main.run_svn(None, 'lock',
'-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, wc_rev=2)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Forcibly lock same file (steal lock) from another working copy
svntest.main.run_svn(None, 'update', wc_b)
svntest.main.run_svn(None, 'lock',
'-m', '', '--force', file_path_b)
# Verify status from working copy where file was initially locked
expected_status.tweak(fname, writelocked='T')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# III.c : Break lock from another working copy with 'svn unlock --force'
# and verify the status of the lock in the repository with 'svn stat -u'
# from the working copy in the file was initially locked
def broken_lock_status(sbox):
"verify status of broken lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
svntest.main.file_append(file_path, "This is a spreadsheet\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.main.run_svn(None, 'lock',
'-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, wc_rev=2)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Forcibly unlock the same file (break lock) from another working copy
svntest.main.run_svn(None, 'update', wc_b)
svntest.main.run_svn(None, 'unlock',
'--force', file_path_b)
# Verify status from working copy where file was initially locked
expected_status.tweak(fname, writelocked='B')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Invalid input test - lock non-existent file
def lock_non_existent_file(sbox):
"verify error on locking non-existent file"
sbox.build()
fname = 'A/foo'
file_path = os.path.join(sbox.wc_dir, fname)
exit_code, output, error = svntest.main.run_svn(1, 'lock',
'-m', '', file_path)
error_msg = "The node '%s' was not found." % os.path.abspath(file_path)
for line in error:
if line.find(error_msg) != -1:
break
else:
logger.warn("Error: %s : not found in: %s" % (error_msg, error))
raise svntest.Failure
#----------------------------------------------------------------------
# Check that locking an out-of-date file fails.
def out_of_date(sbox):
"lock an out-of-date file and ensure failure"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
# Make a new revision of the file in the first WC.
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.actions.run_and_verify_svn2(None, None,
".*newer version of '/iota' exists", 0,
'lock',
'--username', svntest.main.wc_author2,
'-m', '', file_path_b)
#----------------------------------------------------------------------
# Tests reverting a svn:needs-lock file
def revert_lock(sbox):
"verify svn:needs-lock behavior with revert"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
# set the prop in wc
svntest.actions.run_and_verify_svn(None, None, [], 'propset',
'svn:needs-lock', 'foo', iota_path)
# commit r2
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', iota_path)
# make sure that iota got set to read-only
if (os.stat(iota_path)[0] & mode):
logger.warn("Committing a file with 'svn:needs-lock'")
logger.warn("did not set the file to read-only")
raise svntest.Failure
# verify status is as we expect
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', wc_rev=2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# remove read-only-ness
svntest.actions.run_and_verify_svn(None, None, [], 'propdel',
'svn:needs-lock', iota_path)
# make sure that iota got read-only-ness removed
if (os.stat(iota_path)[0] & mode == 0):
logger.warn("Deleting the 'svn:needs-lock' property ")
logger.warn("did not remove read-only-ness")
raise svntest.Failure
# revert the change
svntest.actions.run_and_verify_svn(None, None, [], 'revert', iota_path)
# make sure that iota got set back to read-only
if (os.stat(iota_path)[0] & mode):
logger.warn("Reverting a file with 'svn:needs-lock'")
logger.warn("did not set the file back to read-only")
raise svntest.Failure
# try propdel and revert from a different directory so
# full filenames are used
extra_name = 'xx'
# now lock the file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
# modify it
svntest.main.file_append(iota_path, "This line added\n")
expected_status.tweak(wc_rev=1)
expected_status.tweak('iota', wc_rev=2)
expected_status.tweak('iota', status='M ', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# revert it
svntest.actions.run_and_verify_svn(None, None, [], 'revert', iota_path)
# make sure it is still writable since we have the lock
if (os.stat(iota_path)[0] & mode == 0):
logger.warn("Reverting a 'svn:needs-lock' file (with lock in wc) ")
logger.warn("did not leave the file writable")
raise svntest.Failure
#----------------------------------------------------------------------
def examine_lock_via_url(sbox):
"examine the fields of a lock from a URL"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'iota'
comment = 'This is a lock test.'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + '/' + fname
# lock the file url and check the contents of lock
svntest.actions.run_and_validate_lock(file_url,
svntest.main.wc_author2)
#----------------------------------------------------------------------
def lock_several_files(sbox):
"lock/unlock several files in one go"
sbox.build()
wc_dir = sbox.wc_dir
# Deliberately have no direct child of A as a target
iota_path = os.path.join(sbox.wc_dir, 'iota')
lambda_path = os.path.join(sbox.wc_dir, 'A', 'B', 'lambda')
alpha_path = os.path.join(sbox.wc_dir, 'A', 'B', 'E', 'alpha')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', 'lock several',
iota_path, lambda_path, alpha_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
'--username', svntest.main.wc_author2,
iota_path, lambda_path, alpha_path)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_switched_files(sbox):
"lock/unlock switched files"
sbox.build()
wc_dir = sbox.wc_dir
gamma_path = sbox.ospath('A/D/gamma')
lambda_path = sbox.ospath('A/B/lambda')
iota_URL = sbox.repo_url + '/iota'
alpha_URL = sbox.repo_url + '/A/B/E/alpha'
svntest.actions.run_and_verify_svn(None, None, [], 'switch',
iota_URL, gamma_path,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(None, None, [], 'switch',
alpha_URL, lambda_path,
'--ignore-ancestry')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/gamma', 'A/B/lambda', switched='S')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'lock several',
gamma_path, lambda_path)
expected_status.tweak('A/D/gamma', 'A/B/lambda', writelocked='K')
# In WC-NG locks are kept per working copy, not per file
expected_status.tweak('A/B/E/alpha', 'iota', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
gamma_path, lambda_path)
expected_status.tweak('A/D/gamma', 'A/B/lambda', writelocked=None)
expected_status.tweak('A/B/E/alpha', 'iota', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def lock_uri_encoded(sbox):
"lock and unlock a file with an URI-unsafe name"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'amazing space'
file_path = sbox.ospath(fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.actions.run_and_verify_svn(None, None, [], "add", file_path)
expected_output = svntest.wc.State(wc_dir, {
fname : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname: Item(wc_rev=2, status=' ') })
# Commit the file.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# Make sure that the file was locked.
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_path)
# Make sure it was successfully unlocked again.
expected_status.tweak(fname, writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# And now the URL case.
file_url = sbox.repo_url + '/' + fname
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_url)
# Make sure that the file was locked.
expected_status.tweak(fname, writelocked='O')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_url)
# Make sure it was successfully unlocked again.
expected_status.tweak(fname, writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# A regression test for a bug when svn:needs-lock and svn:executable
# interact badly. The bug was fixed in trunk @ r854933.
#----------------------------------------------------------------------
# A variant of lock_and_exebit1: same test without unlock
def commit_xml_unsafe_file_unlock(sbox):
"commit file with xml-unsafe name and release lock"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'foo & bar'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "Initial data.\n")
svntest.main.run_svn(None, 'add', file_path)
svntest.main.run_svn(None,
'commit', '-m', '', file_path)
# lock fname as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment', file_path)
# make a change and commit it, allowing lock to be released
svntest.main.file_append(file_path, "Followup data.\n")
svntest.main.run_svn(None,
'commit', '-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname : Item(status=' ', wc_rev=3), })
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def repos_lock_with_info(sbox):
"verify info path@X or path -rY return repos lock"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'iota'
comment = 'This is a lock test.'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + '/' + fname
# lock wc file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', comment, file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Steal lock on wc file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'--force',
'-m', comment, file_url)
expected_status.tweak(fname, writelocked='T')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Get repository lock token
repos_lock_token \
= svntest.actions.run_and_parse_info(file_url)[0]['Lock Token']
# info with revision option
expected_infos = [
{ 'Lock Token' : repos_lock_token },
]
svntest.actions.run_and_verify_info(expected_infos, file_path, '-r1')
# info with peg revision
svntest.actions.run_and_verify_info(expected_infos, file_path + '@1')
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def info_moved_path(sbox):
"show correct lock info on moved path"
sbox.build()
wc_dir = sbox.wc_dir
fname = sbox.ospath("iota")
fname2 = sbox.ospath("iota2")
# Move iota, creating r2.
svntest.actions.run_and_verify_svn(None, None, [],
"mv", fname, fname2)
expected_output = svntest.wc.State(wc_dir, {
'iota2' : Item(verb='Adding'),
'iota' : Item(verb='Deleting'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"iota2" : Item(status=' ', wc_rev=2)
})
expected_status.remove("iota")
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Create a new, unrelated iota, creating r3.
svntest.main.file_append(fname, "Another iota")
svntest.actions.run_and_verify_svn(None, None, [],
"add", fname)
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(verb='Adding'),
})
expected_status.add({
"iota" : Item(status=' ', wc_rev=3)
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Lock the new iota.
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
"lock", fname)
expected_status.tweak("iota", writelocked="K")
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Get info for old iota at r1. This shouldn't give us any lock info.
expected_infos = [
{ 'URL' : '.*' ,
'Lock Token' : None },
]
svntest.actions.run_and_verify_info(expected_infos, fname2, '-r1')
#----------------------------------------------------------------------
def ls_url_encoded(sbox):
"ls locked path needing URL encoding"
sbox.build()
wc_dir = sbox.wc_dir
dirname = sbox.ospath("space dir")
fname = os.path.join(dirname, "f")
# Create a dir with a space in its name and a file therein.
svntest.actions.run_and_verify_svn(None, None, [],
"mkdir", dirname)
svntest.main.file_append(fname, "someone was here")
svntest.actions.run_and_verify_svn(None, None, [],
"add", fname)
expected_output = svntest.wc.State(wc_dir, {
'space dir' : Item(verb='Adding'),
'space dir/f' : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"space dir" : Item(status=' ', wc_rev=2),
"space dir/f" : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Lock the file.
svntest.actions.run_and_verify_svn("Lock space dir/f", ".*locked by user",
[], "lock", fname)
# Make sure ls shows it being locked.
expected_output = " +2 " + re.escape(svntest.main.wc_author) + " +O .+f|" \
" +2 " + re.escape(svntest.main.wc_author) + " .+\./"
svntest.actions.run_and_verify_svn("List space dir",
expected_output, [],
"list", "-v", dirname)
#----------------------------------------------------------------------
# Make sure unlocking a path with the wrong lock token fails.
#----------------------------------------------------------------------
# Verify that info shows lock info for locked files with URI-unsafe names
# when run in recursive mode.
def examine_lock_encoded_recurse(sbox):
"verify recursive info shows lock info"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'A/B/F/one iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.actions.run_and_verify_svn(None, None, [], "add", file_path)
expected_output = svntest.wc.State(wc_dir, {
fname : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname: Item(wc_rev=2, status=' ') })
# Commit the file.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
file_path)
# lock the file and validate the contents
svntest.actions.run_and_validate_lock(file_path,
svntest.main.wc_author)
# Trying to unlock someone else's lock with --force should fail.
#----------------------------------------------------------------------
def lock_funky_comment_chars(sbox):
"lock a file using a comment with xml special chars"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'lock & load', file_path)
#----------------------------------------------------------------------
# Check that the svn:needs-lock usage applies to a specific location
# in a working copy, not to the working copy overall.
def lock_twice_in_one_wc(sbox):
"try to lock a file twice in one working copy"
sbox.build()
wc_dir = sbox.wc_dir
mu_path = sbox.ospath('A/mu')
mu2_path = sbox.ospath('A/B/mu')
# Create a needs-lock file
svntest.actions.set_prop('svn:needs-lock', '*', mu_path)
svntest.actions.run_and_verify_svn(None, None, [],
'commit', wc_dir, '-m', '')
# Mark the file readonly
svntest.actions.run_and_verify_svn(None, None, [],
'update', wc_dir)
# Switch a second location for the same file in the same working copy
svntest.actions.run_and_verify_svn(None, None, [],
'switch', sbox.repo_url + '/A',
sbox.ospath('A/B'),
'--ignore-ancestry')
# Lock location 1
svntest.actions.run_and_verify_svn(None, None, [],
'lock', mu_path, '-m', 'Locked here')
# Locking in location 2 should fail ### Currently returns exitcode 0
svntest.actions.run_and_verify_svn2(None, None, ".*is already locked.*", 0,
'lock', '-m', '', mu2_path)
# Change the file anyway
os.chmod(mu2_path, 0700)
svntest.main.file_append(mu2_path, "Updated text")
# Commit will just succeed as the DB owns the lock. It's a user decision
# to commit the other target instead of the one originally locked
svntest.actions.run_and_verify_svn(None, None, [],
'commit', mu2_path, '-m', '')
#----------------------------------------------------------------------
# Test for issue #3524 'Locking path via ra_serf which doesn't exist in
# HEAD triggers assert'
#----------------------------------------------------------------------
def verify_path_escaping(sbox):
"verify escaping of lock paths"
sbox.build()
wc_dir = sbox.wc_dir
# Add test paths using two characters that need escaping in a url, but
# are within the normal ascii range
file1 = sbox.ospath('file #1')
file2 = sbox.ospath('file #2')
file3 = sbox.ospath('file #3')
svntest.main.file_write(file1, 'File 1')
svntest.main.file_write(file2, 'File 2')
svntest.main.file_write(file3, 'File 3')
svntest.main.run_svn(None, 'add', file1, file2, file3)
svntest.main.run_svn(None, 'ci', '-m', 'commit', wc_dir)
svntest.main.run_svn(None, 'lock', '-m', 'lock 1', file1)
svntest.main.run_svn(None, 'lock', '-m', 'lock 2', sbox.repo_url + '/file%20%232')
svntest.main.run_svn(None, 'lock', '-m', 'lock 3', file3)
svntest.main.run_svn(None, 'unlock', sbox.repo_url + '/file%20%233')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add(
{
'file #1' : Item(status=' ', writelocked='K', wc_rev='2'),
'file #2' : Item(status=' ', writelocked='O', wc_rev='2'),
'file #3' : Item(status=' ', writelocked='B', wc_rev='2')
})
# Make sure the file locking is reported correctly
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Issue #3674: Replace + propset of locked file fails over DAV
#----------------------------------------------------------------------
def cp_isnt_ro(sbox):
"uncommitted svn:needs-lock add/cp not read-only"
sbox.build()
wc_dir = sbox.wc_dir
mu_URL = sbox.repo_url + '/A/mu'
mu_path = sbox.ospath('A/mu')
mu2_path = sbox.ospath('A/mu2')
mu3_path = sbox.ospath('A/mu3')
kappa_path = sbox.ospath('kappa')
open(kappa_path, 'w').write("This is the file 'kappa'.\n")
## added file
sbox.simple_add('kappa')
svntest.actions.set_prop('svn:needs-lock', 'yes', kappa_path)
is_writable(kappa_path)
sbox.simple_commit('kappa')
is_readonly(kappa_path)
## versioned file
svntest.actions.set_prop('svn:needs-lock', 'yes', mu_path)
is_writable(mu_path)
sbox.simple_commit('A/mu')
is_readonly(mu_path)
# At this point, mu has 'svn:needs-lock' set
## wc->wc copied file
svntest.main.run_svn(None, 'copy', mu_path, mu2_path)
is_writable(mu2_path)
sbox.simple_commit('A/mu2')
is_readonly(mu2_path)
## URL->wc copied file
svntest.main.run_svn(None, 'copy', mu_URL, mu3_path)
is_writable(mu3_path)
sbox.simple_commit('A/mu3')
is_readonly(mu3_path)
#----------------------------------------------------------------------
# Issue #3525: Locked file which is scheduled for delete causes tree
# conflict
#----------------------------------------------------------------------
def block_unlock_if_pre_unlock_hook_fails(sbox):
"block unlock operation if pre-unlock hook fails"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
svntest.actions.create_failing_hook(repo_dir, "pre-unlock", "error text")
# lock a file.
pi_path = sbox.ospath('A/D/G/pi')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Make sure the unlock operation fails as pre-unlock hook blocks it.
expected_unlock_fail_err_re = ".*error text|.*500 Internal Server Error"
svntest.actions.run_and_verify_svn2(None, None, expected_unlock_fail_err_re,
1, 'unlock', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_invalid_token(sbox):
"verify pre-lock hook returning invalid token"
sbox.build()
hook_path = os.path.join(sbox.repo_dir, 'hooks', 'pre-lock')
svntest.main.create_python_hook_script(hook_path,
'# encoding=utf-8\n'
'import sys\n'
'sys.stdout.write("")\n'
'sys.exit(0)\n')
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.actions.run_and_verify_svn2(None, None,
"svn: warning: W160037: " \
".*scheme.*'opaquelocktoken'", 0,
'lock', '-m', '', file_path)
# When removing directories, the locks of contained files were not
# correctly removed from the working copy database, thus they later
# magically reappeared when new files or directories with the same
# pathes were added.
def non_root_locks(sbox):
"locks for working copies not at repos root"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'cp', sbox.repo_url, sbox.repo_url + '/X',
'-m', 'copy greek tree')
sbox.simple_switch(sbox.repo_url + '/X')
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Lock a file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Updates don't break the lock
sbox.simple_update('A/D')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
sbox.simple_update('')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Break the lock
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Subdir update reports the break
sbox.simple_update('A/D')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Relock and break
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Root update reports the break
sbox.simple_update('')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
lock_file,
commit_file_keep_lock,
commit_file_unlock,
commit_propchange,
break_lock,
steal_lock,
examine_lock,
handle_defunct_lock,
enforce_lock,
defunct_lock,
deleted_path_lock,
lock_unlock,
deleted_dir_lock,
lock_status,
stolen_lock_status,
broken_lock_status,
lock_non_existent_file,
out_of_date,
update_while_needing_lock,
revert_lock,
examine_lock_via_url,
lock_several_files,
lock_switched_files,
lock_uri_encoded,
lock_and_exebit1,
lock_and_exebit2,
commit_xml_unsafe_file_unlock,
repos_lock_with_info,
unlock_already_unlocked_files,
info_moved_path,
ls_url_encoded,
unlock_wrong_token,
examine_lock_encoded_recurse,
unlocked_lock_of_other_user,
lock_funky_comment_chars,
lock_twice_in_one_wc,
lock_path_not_in_head,
verify_path_escaping,
replace_and_propset_locked_path,
cp_isnt_ro,
update_locked_deleted,
block_unlock_if_pre_unlock_hook_fails,
lock_invalid_token,
lock_multi_wc,
locks_stick_over_switch,
lock_unlock_deleted,
commit_stolen_lock,
drop_locks_on_parent_deletion,
dav_lock_timeout,
non_root_locks,
dav_lock_refresh,
delete_locked_file_with_percent,
delete_dir_with_lots_of_locked_files,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
28,
40477,
12,
23,
198,
2,
198,
2,
220,
5793,
62,
41989,
13,
9078,
25,
220,
4856,
2196,
276,
6608,
198,
2,
198,
2,
220,
3834,
9641,
318,
257,
2891,
329,
18440,
1630,
13... | 2.257411 | 22,870 |
#!/usr/bin/env python
"""
Copyright (C) 2006 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
$URL$
$Revision$
$Date$
"""
import unittest
import kml.model
import kml.resourcemap
import xml.dom.minidom
def suite():
suite = unittest.TestSuite()
suite.addTest(SimpleResourceMapItemTestCase())
suite.addTest(NoMidResourceMapItemTestCase())
suite.addTest(ResourceMapTestCase("testResourceMapSize"))
suite.addTest(ResourceMapTestCase("testResourceMapIterator"))
suite.addTest(ResourceMapTestCase("testResourceMapLookup"))
suite.addTest(TexturesTxtTestCase("testSize"))
suite.addTest(TexturesTxtTestCase("testGeomLookup"))
suite.addTest(TexturesTxtTestCase("testKmzLookup"))
suite.addTest(TexturesTxtTestCase("testLookupAll"))
suite.addTest(TestConvertTexturesTxt())
return suite
runner = unittest.TextTestRunner()
runner.run(suite())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
15269,
357,
34,
8,
4793,
3012,
3457,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
7... | 3.276442 | 416 |
from pycket.error import SchemeException
from rpython.tool.pairtype import extendabletype
from rpython.rlib import jit, objectmodel
| [
6738,
12972,
694,
316,
13,
18224,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1330,
32448,
16922,
198,
6738,
374,
29412,
13,
25981,
13,
24874,
4906,
220,
220,
220,
1330,
9117,
540,
4906,
198,
6738,
374,
29412,
13,
81... | 2.901961 | 51 |
import numpy as np
from stable_baselines import PPO2
from stable_baselines.common.policies import CnnPolicy
from stable_baselines.a2c.utils import conv, linear, conv_to_fc
from src.envs import CMDP, FrozenLakeEnvCustomMap
from src.envs.frozen_lake.frozen_maps import MAPS
from src.students import LagrangianStudent, identity_transfer
from src.online_learning import ExponetiatedGradient
from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, \
create_intervention, SmallFrozenTeacherEnv
from src.teacher.frozen_lake_env import SmallFrozenTrainingObservation, SmallFrozenNonStationaryBandits
from src.envs.frozen_lake.utils import create_intervention_from_map, \
OptimalAgent, add_teacher
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
__all__ = ['create_teacher_env', 'small_base_cenv_fn']
# Base CMDP
############################## TEACHER ENV ###################################
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
8245,
62,
12093,
20655,
1330,
350,
16402,
17,
198,
6738,
8245,
62,
12093,
20655,
13,
11321,
13,
79,
4160,
444,
1330,
327,
20471,
36727,
198,
6738,
8245,
62,
12093,
20655,
13,
64,
17,
66,
... | 3.106796 | 309 |
name = "scanapi"
import click
import logging
from scanapi.tree.api_tree import APITree
from scanapi.reporter import Reporter
from scanapi.requests_maker import RequestsMaker
from scanapi.settings import SETTINGS
from scanapi.yaml_loader import load_yaml
| [
3672,
796,
366,
35836,
15042,
1,
198,
198,
11748,
3904,
198,
11748,
18931,
198,
198,
6738,
9367,
15042,
13,
21048,
13,
15042,
62,
21048,
1330,
3486,
2043,
631,
198,
6738,
9367,
15042,
13,
260,
26634,
1330,
25869,
198,
6738,
9367,
15042,... | 3.426667 | 75 |
# Given a binary tree and a node, find the level order successor of the given node in the tree.
# The level order successor is the node that appears right after the given node in the level order traversal.
from collections import deque
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print(level_order_successor(root, 12))
print(level_order_successor(root, 9))
| [
2,
11259,
257,
13934,
5509,
290,
257,
10139,
11,
1064,
262,
1241,
1502,
17270,
286,
262,
1813,
10139,
287,
262,
5509,
13,
220,
198,
2,
383,
1241,
1502,
17270,
318,
262,
10139,
326,
3568,
826,
706,
262,
1813,
10139,
287,
262,
1241,
1... | 3.244898 | 147 |
# Barba, Lorena A., and Forsyth, Gilbert F. (2018).
# CFD Python: the 12 steps to Navier-Stokes equations.
# Journal of Open Source Education, 1(9), 21,
# https://doi.org/10.21105/jose.00021
# TODO: License
# (c) 2017 Lorena A. Barba, Gilbert F. Forsyth.
# All content is under Creative Commons Attribution CC-BY 4.0,
# and all code is under BSD-3 clause (previously under MIT, and changed on March 8, 2018).
import legate.numpy as np
| [
2,
2409,
7012,
11,
15639,
2616,
317,
1539,
290,
27325,
5272,
11,
24023,
376,
13,
357,
7908,
737,
198,
2,
18551,
35,
11361,
25,
262,
1105,
4831,
284,
13244,
959,
12,
1273,
3369,
27490,
13,
198,
2,
4913,
286,
4946,
8090,
7868,
11,
3... | 3.006849 | 146 |