content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
from setuptools import setup
import pysmartprice
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='python-smartprice',
version=pysmartprice.VERSION,
packages=['pysmartprice'],
include_package_data=True,
license='BSD License',
description='A simple scraping-based python library for MySmartPrice',
long_description=README,
url='https://github.com/asifpy/python-smartprice',
author='Asif Jamadar',
author_email='saluasif@gmail.com',
keywords=['smartprice', 'price comparision', 'scrapping'],
install_requires=[
'requests>=2.5.3',
'beautifulsoup4>4.4.0',
'pytest>=2.8.5',
'lxml>=4.1.0'
],
)
| [
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
11748,
279,
893,
13822,
20888,
198,
198,
4480,
1280,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
15675,
11682,
13,
81,
301... | 2.379781 | 366 |
#import built-in modules
import os
import cPickle
from collections import defaultdict
#installed module or modules in this folder
from lxml import etree
import utils
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
'''
IAA
(1) sval2 http://www.aclweb.org/anthology/S01-1005: NA
(2) sval3 http://www.aclweb.org/anthology/W/W04/W04-0811.pdf 72.5
(3) sval2007 http://www.aclweb.org/anthology/S/S07/S07-1016.pdf verbs 72 nouns 86
(4) sval2010 http://www.aclweb.org/anthology/S10-1013: one annotator: not available
(5) sval2013 not available
'''
filename2official_name = {
'1.GAMBL-AW.all-words-test-predictions' : 'GAMBL',
'PNNL.task-17.aw.txt' : 'PNNL',
'128-627_Run-1000.txt' : 'CFILT-2',
'keys-wn.2' : 'UMCC-DLSI',
'SMUaw-' : 'SMUaw'
}
old2new = {'sval2' : 'se2-aw',
'sval3' : 'se3-aw',
'sval2007': 'se7-aw',
'sval2010': 'se10-aw',
'sval2013' : 'se13-aw'}
class MFS_or_not_MFS():
'''
using the global variables defined in ../plots_mfs_vs_not_mfs.sh
(1) a plot (.pdf) is created
(2) stats about the competitions are written to file
'''
def loop(self):
'''
this method loops over the competitions in the folder
'sval_systems' and updates the class attributes
'''
answer_is_u = defaultdict(set)
#loop
for competition,xml_file in self.com_to_xml.iteritems():
#set lists
acc_mfs = []
acc_notmfs = []
bin_file = self.com_to_bin[competition]
com,info = cPickle.load(open(bin_file))
rankings = utils.open_rankings_file(competition)
doc = etree.parse(xml_file)
#loop over tokens and create list with values
for token_el in doc.iterfind("token"):
#obtain gold keys
identifier = token_el.get("token_id")
gold = [key.get("value")
for key in token_el.iterfind("gold_keys/key")]
if any([gold == ['U'],
identifier == 'd001.s044.t009']):
answer_is_u[competition].add(identifier)
continue
#check if gold is mfs
mfs = info[identifier]['MFS'] == "Yes_MFS"
#loop over systems and write to file
for system_el in token_el.iterfind('systems/system'):
system_keys = [answer_el.get('value')
for answer_el in system_el.iterfind('answer')]
system_name = system_el.get("id")
try:
system_rank = rankings[system_name]
except KeyError:
pass
allowed = False
if all([isinstance(system_rank, int),
system_rank <= 1]):
allowed = True
# check if answer was correct
answer = 0
if any([system_key in gold
for system_key in system_keys]):
answer = 1
if allowed:
assert system_name in filename2official_name
if mfs:
acc_mfs.append(answer)
else:
acc_notmfs.append(answer)
#if competition != "sval2010":
self.mfs[competition] = 100 * sum(acc_mfs)/float(len(acc_mfs))
self.notmfs[competition] = 100 * sum(acc_notmfs)/float(len(acc_notmfs))
for competition, ids in answer_is_u.items():
print('# of U answers', competition, len(ids))
def plot_it(self):
'''
given self.mfs and self.notmfs and self.labels a plot is created
'''
matplotlib.rc('font', family='sans-serif')
matplotlib.rc('font', serif='times')
matplotlib.rc('text', usetex='false')
matplotlib.rcParams.update({'font.size': 20})
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(111)
## the data
N = 5
menMeans = [self.mfs[comp] for comp in self.labels]
menStd = [0, 0, 0, 0, 0]
womenMeans = [self.notmfs[comp] for comp in self.labels]
womenStd = [0, 0, 0, 0, 0]
## necessary variables
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
## the bars
rects1 = ax.bar(ind, menMeans, width,
color='black')
rects2 = ax.bar(ind+width, womenMeans, width,
color='red')
# axes and labels
ax.set_xlim(-width,len(ind)+width)
ax.set_ylim(0,100)
ax.set_xlabel('Senseval/SemEval competition')
ax.set_ylabel('Recall')
ax.set_title('Recall on MFS vs LFS of top ranked systems')
xTickMarks = [old2new[label] for label in self.labels]
ax.set_xticks(ind+width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=60, fontsize=20)
## add a legend
ax.legend( (rects1[0], rects2[0]), ('MFS', 'LFS') )
#save
plt.savefig(os.environ['output_pdf'], bbox_inches='tight')
if __name__ == "__main__":
MFS_or_not_MFS() | [
2,
11748,
3170,
12,
259,
13103,
198,
11748,
28686,
198,
11748,
269,
31686,
293,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
2,
37050,
8265,
393,
13103,
287,
428,
9483,
198,
6738,
300,
19875,
1330,
2123,
631,
198,
11748,
3384,
4487,
... | 1.799499 | 3,192 |
import logging
from db.cache import get_cache
import settings
import api.v1
from api import app
application = app
# Clear memcached cache on startup
cache = get_cache()
cache.invalidate(True)
if settings.SENTRY_CONNECTION_STRING is not None:
from raven.contrib.flask import Sentry
sentry = Sentry(dsn=settings.SENTRY_CONNECTION_STRING, logging=True, level=logging.WARN)
sentry.init_app(application)
| [
11748,
18931,
198,
6738,
20613,
13,
23870,
1330,
651,
62,
23870,
198,
11748,
6460,
198,
11748,
40391,
13,
85,
16,
198,
6738,
40391,
1330,
598,
198,
198,
31438,
796,
598,
198,
2,
11459,
1066,
66,
2317,
12940,
319,
13693,
198,
23870,
79... | 2.95 | 140 |
from Beam import Beam
#from Empty import Optical_element
from OpticalElement import Optical_element
from SurfaceConic import SurfaceConic
from Shape import BoundaryRectangle
import numpy as np
from SurfaceConic import SurfaceConic
import matplotlib.pyplot as plt
from Vector import Vector
| [
6738,
25855,
1330,
25855,
198,
2,
6738,
33523,
1330,
49593,
62,
30854,
198,
6738,
49593,
20180,
1330,
49593,
62,
30854,
198,
6738,
20321,
3103,
291,
1330,
20321,
3103,
291,
198,
6738,
25959,
1330,
30149,
560,
45474,
9248,
198,
11748,
299,... | 4.171429 | 70 |
# class Interaction:
# def __init__(self, user, question, answer, score, date, transcript_id, interaction_id, display_seq):
# self.user = user
# self.question = question
# self.answer = answer
# self.score = score
# self.date = date
# self.transcript_id = transcript_id
# self.interaction_id = interaction_id
# self.display_seq = display_seq
#
# def __repr__(self):
# return "<Interaction {}>".format(self.interaction_id) | [
628,
198,
2,
1398,
4225,
2673,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2836,
11,
1808,
11,
3280,
11,
4776,
11,
3128,
11,
14687,
62,
312,
11,
10375,
62,
312,
11,
3359,
62,
41068,
2599,
198,
2,
220,
2... | 2.311927 | 218 |
import json
import requests
from django.conf import settings
from bot.enums import HttpMethods
from bot.exceptions import DiscordApiException
NO_ERROR_STATUS = (200, 201, 202, 100, 101)
def discord_api_request(url, method=HttpMethods.GET, data=None):
"""
Makes a request to the URL with the current Bot got from settings.
"""
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bot {settings.BOT_TOKEN}'
}
if data:
data = json.dumps(data)
if method == HttpMethods.GET:
response = requests.get(url, headers=headers, data=data)
return response
if method == HttpMethods.POST:
response = requests.post(url, headers=headers, data=data)
return response
if method == HttpMethods.PATCH:
response = requests.patch(url, headers=headers, data=data)
return response
def discord_api_post(url, data=None):
"""
Makes a POST request to the give URL.
"""
response = discord_api_request(url=url, method=HttpMethods.POST, data=data)
if response.status_code not in NO_ERROR_STATUS:
raise DiscordApiException(response)
return response
def discord_api_get(url):
"""
Makes a GET request to the given URL.
"""
response = discord_api_request(url=url, method=HttpMethods.GET)
if response.status_code not in NO_ERROR_STATUS:
raise DiscordApiException(response)
return response
def discord_api_patch(url, data=None):
"""
Makes a PATCH request to the given URL.
"""
response = discord_api_request(url=url, method=HttpMethods.PATCH, data=data)
if response.status_code not in NO_ERROR_STATUS:
raise DiscordApiException(response)
return response
| [
11748,
33918,
198,
198,
11748,
7007,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
10214,
13,
268,
5700,
1330,
367,
29281,
46202,
198,
6738,
10214,
13,
1069,
11755,
1330,
39462,
32,
14415,
16922,
198,
198,
15285,
62,
2... | 2.742188 | 640 |
import matplotlib.pyplot as plt
import random
import pickle
from skimage.transform import rotate
from scipy import ndimage
from skimage.util import img_as_ubyte
from joblib import Parallel, delayed
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.ensemble.forest import _generate_sample_indices
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from itertools import product
from joblib import Parallel, delayed
from multiprocessing import Pool
from proglearn.progressive_learner import ProgressiveLearner
from proglearn.deciders import SimpleAverage
from proglearn.transformers import TreeClassificationTransformer, NeuralClassificationTransformer
from proglearn.voters import TreeClassificationVoter, KNNClassificationVoter
### MAIN HYPERPARAMS ###
granularity = 1
reps = 10
########################
angles = np.arange(0,90 + granularity,granularity)
Parallel(n_jobs=-1, verbose = 1)(delayed(LF_experiment)(angle, reps=reps, ntrees=10) for angle in angles) | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
4738,
198,
11748,
2298,
293,
198,
6738,
1341,
9060,
13,
35636,
1330,
23064,
198,
6738,
629,
541,
88,
1330,
299,
67,
9060,
198,
6738,
1341,
9060,
13,
22602,
1330,
337... | 3.495082 | 305 |
import argparse
import glob, os, shutil, sys
import numpy as np
import pytest
from ..qcSTR import *
from ..qcSTR import _QualityTypes
# Set up base argparser
# Just confirm that the method doesn't throw an error
# Just confirm that the method doesn't throw an error
# Just confirm that the method doesn't throw an error
# Just confirm that the method doesn't throw an error
# From https://stackoverflow.com/a/1073382/2966505
| [
11748,
1822,
29572,
198,
11748,
15095,
11,
28686,
11,
4423,
346,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
11485,
80,
66,
18601,
1330,
1635,
198,
6738,
11485,
80,
66,
18601,
1330,
4808,
3501... | 3.464567 | 127 |
# Standard libraries
from dataclasses import dataclass, field
from typing import List, Optional
# Third party libraries
from dataclasses_json import dataclass_json
# Project
from tilda_wrapper_api.dataclasses.base import BaseObject
@dataclass_json
@dataclass(frozen=True)
@dataclass_json
@dataclass(frozen=True)
@dataclass_json
@dataclass(frozen=True)
| [
2,
8997,
12782,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
2,
10467,
2151,
12782,
198,
6738,
4818,
330,
28958,
62,
17752,
1330,
4818,
330,
31172,
62,
17752,
198,
1... | 3.02521 | 119 |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import importlib
import pkgutil
import sys
import traceback
import warnings
from inspect import isclass
from typing import List, Set, Tuple
from warnings import WarningMessage
from rich import print
def import_all_classes(
paths: List[str],
prefix: str,
provider_ids: List[str] = None,
print_imports: bool = False,
print_skips: bool = False,
) -> Tuple[List[str], List[WarningMessage]]:
"""
Imports all classes in providers packages. This method loads and imports
all the classes found in providers, so that we can find all the subclasses
of operators/sensors etc.
:param paths: list of paths to look the provider packages in
:param prefix: prefix to add
:param provider_ids - provider ids that should be loaded.
:param print_imports - if imported class should also be printed in output
:param print_skips - if skipped classes should also be printed in output
:return: tuple of list of all imported classes and all warnings generated
"""
imported_classes = []
tracebacks = []
printed_packages: Set[str] = set()
if provider_ids:
provider_prefixes = [mk_prefix(provider_id) for provider_id in provider_ids]
else:
provider_prefixes = [prefix]
all_warnings: List[WarningMessage] = []
for modinfo in pkgutil.walk_packages(path=paths, prefix=prefix, onerror=onerror):
if not any(modinfo.name.startswith(provider_prefix) for provider_prefix in provider_prefixes):
if print_skips:
print(f"Skipping module: {modinfo.name}")
continue
if print_imports:
package_to_print = ".".join(modinfo.name.split(".")[:-1])
if package_to_print not in printed_packages:
printed_packages.add(package_to_print)
print(f"Importing package: {package_to_print}")
try:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always", category=DeprecationWarning)
_module = importlib.import_module(modinfo.name)
for attribute_name in dir(_module):
class_name = modinfo.name + "." + attribute_name
attribute = getattr(_module, attribute_name)
if isclass(attribute):
imported_classes.append(class_name)
if w:
all_warnings.extend(w)
except Exception:
exception_str = traceback.format_exc()
tracebacks.append(exception_str)
if tracebacks:
print(
"""
[red]ERROR: There were some import errors[/]
""",
file=sys.stderr,
)
for trace in tracebacks:
print("[red]----------------------------------------[/]", file=sys.stderr)
print(trace, file=sys.stderr)
print("[red]----------------------------------------[/]", file=sys.stderr)
sys.exit(1)
else:
return imported_classes, all_warnings
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform import of all provider classes.')
parser.add_argument('--path', action='append', help='paths to search providers in')
parser.add_argument('--prefix', help='prefix to add in front of the class', default='airflow.providers.')
args = parser.parse_args()
print()
print(f"Walking all packages in {args.path} with prefix {args.prefix}")
print()
classes, warns = import_all_classes(
print_imports=True, print_skips=True, paths=args.path, prefix=args.prefix
)
if len(classes) == 0:
print("[red]Something is seriously wrong - no classes imported[/]")
sys.exit(1)
if warns:
print("[yellow]There were warnings generated during the import[/]")
for w in warns:
one_line_message = str(w.message).replace('\n', ' ')
print(f"[yellow]{w.filename}:{w.lineno}: {one_line_message}[/]")
print()
print(f"[green]SUCCESS: All provider packages are importable! Imported {len(classes)} classes.[/]")
print()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
322... | 2.675734 | 1,838 |
import os
import re
import shutil
import subprocess
import textwrap
import traceback
import unittest
from amaranth.hdl.ast import *
from amaranth.hdl.ir import *
from amaranth.back import rtlil
from amaranth._toolchain import require_tool
__all__ = ["FHDLTestCase"]
| [
11748,
28686,
198,
11748,
302,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
11748,
2420,
37150,
198,
11748,
12854,
1891,
198,
11748,
555,
715,
395,
198,
198,
6738,
716,
19173,
400,
13,
71,
25404,
13,
459,
1330,
1635,
198,
6738,
... | 3.068182 | 88 |
# from /RelValQCD_Pt_3000_3500/CMSSW_2_1_0_pre6-RelVal-1213987236-IDEAL_V2-2nd/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO
import FWCore.ParameterSet.Config as cms
# from
| [
2,
422,
1220,
6892,
7762,
48,
8610,
62,
47,
83,
62,
23924,
62,
2327,
405,
14,
24187,
5432,
54,
62,
17,
62,
16,
62,
15,
62,
3866,
21,
12,
6892,
7762,
12,
1065,
1485,
44183,
24940,
12,
14114,
1847,
62,
53,
17,
12,
17,
358,
14,
... | 1.987654 | 81 |
# Generated by Django 3.1.1 on 2020-12-05 10:48
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
16,
319,
12131,
12,
1065,
12,
2713,
838,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 PHYTEC Messtechnik GmbH
# adapted from a script by Jan Lübbe
import sys
import re
import argparse
import os
def is_dir(dirname):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1946,
9370,
56,
51,
2943,
14937,
4169,
1349,
1134,
402,
2022,
39,
198,
2,
16573,
422,
257,
4226,
416,
2365,... | 2.494505 | 182 |
# <pep8-80 compliant>
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Nutti <nutti.metro@gmail.com>"
__status__ = "production"
__version__ = "5.1"
__date__ = "24 Feb 2018"
import bpy
import bmesh
from bpy.props import (
StringProperty,
BoolProperty,
)
from .. import common
class MUV_CPUVObjCopyUV(bpy.types.Operator):
"""
Operation class: Copy UV coordinate per object
"""
bl_idname = "object.muv_cpuv_obj_copy_uv"
bl_label = "Copy UV"
bl_description = "Copy UV coordinate"
bl_options = {'REGISTER', 'UNDO'}
uv_map = StringProperty(options={'HIDDEN'})
@memorize_view_3d_mode
class MUV_CPUVObjCopyUVMenu(bpy.types.Menu):
"""
Menu class: Copy UV coordinate per object
"""
bl_idname = "object.muv_cpuv_obj_copy_uv_menu"
bl_label = "Copy UV"
bl_description = "Copy UV coordinate per object"
class MUV_CPUVObjPasteUV(bpy.types.Operator):
"""
Operation class: Paste UV coordinate per object
"""
bl_idname = "object.muv_cpuv_obj_paste_uv"
bl_label = "Paste UV"
bl_description = "Paste UV coordinate"
bl_options = {'REGISTER', 'UNDO'}
uv_map = StringProperty(options={'HIDDEN'})
copy_seams = BoolProperty(
name="Copy Seams",
description="Copy Seams",
default=True
)
@memorize_view_3d_mode
class MUV_CPUVObjPasteUVMenu(bpy.types.Menu):
"""
Menu class: Paste UV coordinate per object
"""
bl_idname = "object.muv_cpuv_obj_paste_uv_menu"
bl_label = "Paste UV"
bl_description = "Paste UV coordinate per object"
| [
2,
1279,
431,
79,
23,
12,
1795,
31332,
29,
198,
198,
2,
46424,
347,
43312,
38644,
38559,
24290,
9878,
11290,
46424,
198,
2,
198,
2,
220,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
220,
13096,... | 2.733645 | 856 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 20:28:10 2020
@author: Xuheng Ding
"""
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
from matplotlib.colors import LogNorm
from galight.tools.measure_tools import SB_profile
import copy, matplotlib
from matplotlib.ticker import ScalarFormatter
my_cmap = copy.copy(matplotlib.cm.get_cmap('gist_heat')) # copy the default cmap
my_cmap.set_bad('black')
import matplotlib as matt
matt.rcParams['font.family'] = 'STIXGeneral'
def total_compare(flux_list_2d, label_list_2d, flux_list_1d, label_list_1d,
deltaPix = 1., zp=27.0, target_ID = 'target_ID',
mask_image=None, if_annuli=False,
arrows=False, show_plot = True):
"""
Make quick plots to compare the flux profiles in a list and show the normalized residual.
Parameter
--------
flux_list_2d:
A list of 2D flux array, that will use plt.imshow() to plot and show.
e.g., [data, pointsource_list, galaxy_model_list, normalized residual]
label_list_2d:
A list of lables for flux_list_2d.
e.g., ['data', 'model', 'point source(s)', 'galaxy(s)']
flux_list_1d:
A list of 2D flux array, that will be plot as 1D profile in the very right panel.
label_list_1d:
The labels for flux_list_1d.
mask_image:
A 2D mask for the flux_list_2d image.
arrows: bool.
If show the arrows for pointing the North and East.
if_annuli: bool.
If True, the 1D profile will show the surface brightness in the annuli apertures.
"""
norm = LogNorm() #ImageNormalize(stretch=SqrtStretch())
cl_num = len(flux_list_2d) + 1
f = plt.figure(0, figsize=(6.5+ (cl_num-1)*3.5,4))
# f = plt.figure(0, figsize=(17.0,4)) #3
# f = plt.figure(0, figsize=(20.5,4)) #4
# f = plt.figure(0, figsize=(24.0,4)) #5
ax_l = [plt.subplot2grid((6,cl_num), (0,i), rowspan=6) for i in range(len(flux_list_2d)-1)] #The image plot
ax_r = plt.subplot2grid((6,cl_num), (0,cl_num-2), rowspan=6) #The residual plot
ax_rt = plt.subplot2grid((6,cl_num), (0,cl_num-1), rowspan=5)
ax_rb = plt.subplot2grid((6,cl_num), (5,cl_num-1), rowspan=1)
frame_size = len(flux_list_2d[0])
mask = np.ones_like(flux_list_2d[0])
if mask_image is not None:
mask = mask * mask_image
for i in range(len(flux_list_2d)-1):
if i >1:
flux_list_2d[i] = flux_list_2d[i] * mask
if i == 0:
im_i = ax_l[i].imshow(flux_list_2d[i],origin='lower',cmap=my_cmap, norm=norm,
vmax = flux_list_2d[0].max(), vmin = 1.e-4)
clim=im_i.properties()['clim'] #To uniform the color bar scale.
ax_l[i].set_ylabel(target_ID, fontsize=15, weight='bold')
else:
im_i = ax_l[i].imshow(flux_list_2d[i],origin='lower',cmap=my_cmap, norm=norm, clim=clim)
ax_l[i].get_yaxis().set_visible(False)
ax_l[i].get_xaxis().set_visible(False)
scale_bar(ax_l[i], frame_size, dist=1/deltaPix, text='1"', color = 'white')
if arrows == True:
coordinate_arrows(ax_l[i], frame_size, arrow_size=0.03, color = 'white')
cb_i = f.colorbar(im_i, ax=ax_l[i], shrink=0.48, pad=0.01, orientation="horizontal", aspect=15, ticks= [1.e-4, 1.e-3, 1.e-2,1.e-1,0, 10])
cb_i.set_ticks([1.e-5, 1.e-4, 1.e-3, 1.e-2,1.e-1,0,1,10,100])
if len(label_list_2d[i])>10:
fontsize = 17
else:
fontsize = 20
ax_l[i].text(frame_size*0.05, frame_size*0.9, label_list_2d[i],fontsize=fontsize, weight='bold', color='white')
#Plot normalized residual map:
norm_residual = flux_list_2d[-1]
im_r = ax_r.imshow(norm_residual * mask, origin='lower',cmap='bwr', vmin=-6, vmax=6)
scale_bar(ax_r, frame_size, dist=1/deltaPix, text='1"')
if arrows == True:
coordinate_arrows(ax_r, frame_size, arrow_size=0.03)
ax_r.get_xaxis().set_visible(False)
ax_r.get_yaxis().set_visible(False)
f.colorbar(im_r, ax=ax_r, shrink=0.48, pad=0.01, orientation="horizontal", aspect=15)
ax_r.text(frame_size*0.05, frame_size*0.9, 'normalized residual',fontsize=17, weight='bold', color='black')
plt.subplots_adjust(wspace=-0.5, hspace=0)
#Plot the 1D profile:
label_SB_list = label_list_1d #Not show the residual, in order of data, model, QSO, galaxy in principle.
flux_SB_list = flux_list_1d
radi = len(flux_list_1d[0])/2
if if_annuli == False:
for i in range(len(label_SB_list)):
center = len(flux_SB_list[i])/2, len(flux_SB_list[i])/2
if label_SB_list[i] == 'data':
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log',
radius= radi, grids = 50,
mask_image=mask_image, fits_plot=False)
else:
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log', radius= radi,
grids = 30, mask_image = mask_image)
r_mag = - 2.5 * np.log10(r_SB) + zp
if label_SB_list[i] == 'data':
ind = len(r_mag)-(r_mag == r_mag[-1]).sum()
ax_rt.plot(r_grids[:ind], r_mag[:ind], 'o', color = 'whitesmoke',markeredgecolor="black", label=label_SB_list[i])
else:
ax_rt.plot(r_grids, r_mag, '-', label=label_SB_list[i])
ax_rt.set_ylabel('$\mu$(mag, pixel$^{-2}$)', fontsize=12)
ax_rt.invert_yaxis()
r_mag_0 = 2.5 * np.log10(SB_profile(flux_SB_list[0], center, x_gridspace = 'log', radius= radi,
grids = 30, mask_image=mask_image)[0])
r_mag_1 = 2.5 * np.log10(SB_profile(flux_SB_list[1], center, x_gridspace = 'log', grids = 30,radius= radi)[0])
ind = len(r_mag_0)-(r_mag_0 == r_mag_0[-1]).sum()
ax_rb.plot(r_grids[:ind]*deltaPix, (r_mag_0-r_mag_1)[:ind], 'ro')
ax_rb.set_yticks([-0.5,-0.25, 0., 0.25])
ax_rb.set_ylabel('$\Delta\mu$', fontsize=15)
plt.ylim([-0.5,0.5])
elif if_annuli == True:
for i in range(len(label_SB_list)):
center = len(flux_SB_list[i])/2, len(flux_SB_list[i])/2
if label_SB_list[i] == 'data':
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log',
radius = radi, grids = 50,
mask_image = mask_image, fits_plot=False, if_annuli = if_annuli)
ax_rt.plot(r_grids, r_SB, 'o', color = 'whitesmoke',markeredgecolor="black", label=label_SB_list[i])
else:
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log',
radius=radi,grids = 30, mask_image=mask_image, if_annuli = if_annuli)
ax_rt.plot(r_grids, r_SB, '-', label=label_SB_list[i])
ax_rt.set_ylabel('$SB_{annuli}$(counts, pixel$^{-2}$)', fontsize=12)
r_SB_0 = (SB_profile(flux_SB_list[0], center, x_gridspace = 'log', radius= radi, if_annuli = if_annuli,
grids = 30,
mask_image = mask_image)[0])
r_SB_1 = (SB_profile(flux_SB_list[1], center, x_gridspace = 'log', grids = 30, if_annuli = if_annuli,radius= radi)[0])
ax_rb.plot(r_grids*deltaPix, (r_SB_0- r_SB_1), 'ro')
ax_rb.set_yticks([-5,-2.5, 0., 2.5])
ax_rb.set_ylabel('$\Delta SB$', fontsize=15)
plt.ylim([-5,5])
ax_rt.set_xlabel('pixel', fontsize=15)
ax_rt.xaxis.set_label_position('top')
ax_rt.xaxis.tick_top()
ax_rt.set_xscale('log')
ax_rt.set_xticks([2,4,6,10,15,20,30,50,100,150])
ax_rt.xaxis.set_major_formatter(ScalarFormatter())
ax_rt.set_xlim([(r_grids).min()*0.85,r_grids.max()+6])
ax_rt.yaxis.set_label_position('right')
ax_rt.yaxis.tick_right()
ax_rt.yaxis.set_ticks_position('both')
ax_rt.legend()
x = np.linspace((r_grids*deltaPix).min()*0.85, (r_grids.max()+6)*deltaPix)
y = x * 0
ax_rb.set_xlabel('arcsec', fontsize=15)
ax_rb.set_xscale('log')
ax_rb.set_xticks([0.1, 0.2, 0.5, 1, 2,5,10,20])
ax_rb.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax_rb.plot(x, y, 'k--')
ax_rb.yaxis.set_label_position('right')
ax_rb.yaxis.tick_right()
ax_rb.yaxis.set_ticks_position('both')
ax_rb.set_xlim([(r_grids*deltaPix).min()*0.85, (r_grids.max()+6)*deltaPix])
pos4_o = ax_rt.get_position() # get the original position
pos5_o = ax_rb.get_position() # get the original position
pos4 = [pos4_o.x0+0.112 - 0.009 * cl_num , pos4_o.y0 + 0.10, pos4_o.width*0.72, pos4_o.height*0.8]
pos5 = [pos5_o.x0+0.112 - 0.009 * cl_num , pos5_o.y0+0.08, pos5_o.width*0.72, pos5_o.height*1.1]
ax_rt.set_position(pos4) # set a new position
ax_rb.set_position(pos5) # set a new position
if show_plot == True:
plt.show()
else:
plt.close()
return f
def profile_plots(flux_list_2d, label_list_2d, flux_list_1d, label_list_1d,
deltaPix = 1., zp=27.0, target_ID = 'target_ID',
mask_image=None, if_annuli=False,
arrows=False, show_plot = True):
"""
Similar to total_compare(), i.e., to compare a list of light profiles but without showing normlized residual.
"""
norm = LogNorm() #ImageNormalize(stretch=SqrtStretch())
cl_num = len(flux_list_2d) + 1
f = plt.figure(0, figsize=(6.5+ (cl_num-1)*3.5,4))
# f = plt.figure(0, figsize=(17.0,4)) #3
# f = plt.figure(0, figsize=(20.5,4)) #4
# f = plt.figure(0, figsize=(24.0,4)) #5
ax_l = [plt.subplot2grid((6,cl_num), (0,i), rowspan=6) for i in range(len(flux_list_2d))] #The image plot
ax_rt = plt.subplot2grid((6,cl_num), (0,cl_num-1), rowspan=6)
# ax_rb = plt.subplot2grid((6,cl_num), (5,cl_num-1), rowspan=1)
frame_size = len(flux_list_2d[0])
mask = np.ones_like(flux_list_2d[0])
if mask_image is not None:
mask = mask * mask_image
for i in range(len(flux_list_2d)):
if i >1:
flux_list_2d[i] = flux_list_2d[i] * mask
if i == 0:
im_i = ax_l[i].imshow(flux_list_2d[i] * mask ,origin='lower',cmap=my_cmap, norm=norm,
vmax = flux_list_2d[0].max(), vmin = 1.e-4)
clim=im_i.properties()['clim'] #To uniform the color bar scale.
ax_l[i].set_ylabel(target_ID, fontsize=15, weight='bold')
else:
im_i = ax_l[i].imshow(flux_list_2d[i],origin='lower',cmap=my_cmap, norm=norm, clim=clim)
ax_l[i].get_yaxis().set_visible(False)
ax_l[i].get_xaxis().set_visible(False)
scale_bar(ax_l[i], frame_size, dist=1/deltaPix, text='1"', color = 'white')
if arrows == True:
coordinate_arrows(ax_l[i], frame_size, arrow_size=0.03, color = 'white')
cb_i = f.colorbar(im_i, ax=ax_l[i], shrink=0.48, pad=0.01, orientation="horizontal", aspect=15, ticks= [1.e-4, 1.e-3, 1.e-2,1.e-1,0, 10])
cb_i.set_ticks([1.e-5, 1.e-4, 1.e-3, 1.e-2,1.e-1,0,1,10,100])
if len(label_list_2d[i])>10:
fontsize = 17
else:
fontsize = 20
ax_l[i].text(frame_size*0.05, frame_size*0.9, label_list_2d[i],fontsize=fontsize, weight='bold', color='white')
plt.subplots_adjust(wspace=-0.45, hspace=0)
#Plot the 1D profile:
label_SB_list = label_list_1d #Not show the residual, in order of data, model, QSO, galaxy in principle.
flux_SB_list = flux_list_1d
radi = len(flux_list_1d[0])/2
if if_annuli == False:
for i in range(len(label_SB_list)):
center = len(flux_SB_list[i])/2, len(flux_SB_list[i])/2
if label_SB_list[i] == 'data':
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log',
radius= radi, grids = 50,
mask_image=mask_image, fits_plot=False)
else:
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log', radius= radi,
grids = 30, mask_image = mask_image)
r_mag = - 2.5 * np.log10(r_SB) + zp
if label_SB_list[i] == 'data':
ind = len(r_mag)-(r_mag == r_mag[-1]).sum()
ax_rt.plot(r_grids[:ind], r_mag[:ind], 'o', color = 'whitesmoke',markeredgecolor="black", label=label_SB_list[i])
else:
ax_rt.plot(r_grids, r_mag, '-', label=label_SB_list[i])
ax_rt.set_ylabel('$\mu$(mag, pixel$^{-2}$)', fontsize=12)
ax_rt.invert_yaxis()
elif if_annuli == True:
max_y = 0
for i in range(len(label_SB_list)):
center = len(flux_SB_list[i])/2, len(flux_SB_list[i])/2
if label_SB_list[i] == 'data':
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log',
radius = radi, grids = 50,
mask_image = mask_image, fits_plot=False, if_annuli = if_annuli)
ax_rt.plot(r_grids, r_SB, 'o', color = 'whitesmoke',markeredgecolor="black", label=label_SB_list[i])
else:
r_SB, r_grids = SB_profile(flux_SB_list[i], center, x_gridspace = 'log',
radius=radi,grids = 30, mask_image=mask_image, if_annuli = if_annuli)
ax_rt.plot(r_grids, r_SB, '-', label=label_SB_list[i])
if max_y < np.max(r_SB):
max_y = np.max(r_SB)
ax_rt.set_ylabel('$SB_{annuli}$(counts, pixel$^{-2}$)', fontsize=12)
ax_rt.set_yscale('log')
ax_rt.set_ylim([10**(-3), max_y])
ax_rt.set_xlabel('pixel', fontsize=15)
ax_rt.set_xscale('log')
ax_rt.set_xticks([2,4,6,10,15,20,30,50,100,150])
ax_rt.xaxis.set_major_formatter(ScalarFormatter())
ax_rt.set_xlim([(r_grids).min()*0.85,r_grids.max()+6])
#TODO!!!: Update the axis in arcsec on upside:
# ax_rt2 = ax_rt.twiny()
# new_tick_locations = np.array([ax_rt.get_xlim()[0], 0.2, 0.5, 1, 2,5 , ax_rt.get_xlim()[1]])*deltaPix
# ax_rt2.set_xticks(new_tick_locations)
# ax_rt2.set_xticklabels([0.1, 0.2, 0.5, 1, 2,5])
# ax_rt2.set_xlabel("arcsec")
ax_rt.yaxis.set_label_position('right')
ax_rt.yaxis.tick_right()
ax_rt.yaxis.set_ticks_position('both')
ax_rt.legend()
pos4_o = ax_rt.get_position() # get the original position
pos4 = [pos4_o.x0+0.112 - 0.009 * cl_num , pos4_o.y0 + 0.10, pos4_o.width*0.72, pos4_o.height*0.8]
ax_rt.set_position(pos4) # set a new position
if show_plot == True:
plt.show()
else:
plt.close()
return f
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
8621,
1467,
1160,
25,
2078,
25,
940,
12131,
198,
198,
31,
9800,
25,
33591,
31753,
4... | 1.848677 | 8,201 |
########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Xml/XPath/CoreFunctions.py,v 1.31.4.1 2006/09/18 13:49:49 uogbuji Exp $
"""
The implementation of the core functions from XPath 1.0.
Copyright 2006 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
import warnings
from xml.dom import Node
from Ft import TranslateMessage as _
from Ft.Lib import number, boolean, Set
from Ft.Xml import EMPTY_NAMESPACE, XML_NAMESPACE
from Ft.Xml.XPath import NAMESPACE_NODE
from Ft.Xml.XPath import Conversions, RuntimeException
from Ft.Xml.XPath.XPathTypes import NodesetType, NumberType
from Ft.Xml.XPath.XPathTypes import StringType as XPathStringType
### Node Set Functions ###
def Last(context):
"""Function: <number> last()"""
return float(context.size)
def Position(context):
"""Function: <number> position()"""
return float(context.position)
def Count(context, nodeSet):
"""Function: <number> count(<node-set>)"""
if not isinstance(nodeSet, NodesetType):
raise RuntimeException(RuntimeException.WRONG_ARGUMENTS, 'count',
_("expected node-set argument"))
return float(len(nodeSet))
def Id(context, object_):
"""Function: <node-set> id(<object>)"""
if not isinstance(object_, NodesetType):
st = Conversions.StringValue(object_)
id_list = st.split()
else:
id_list = [ Conversions.StringValue(n) for n in object_ ]
id_list = Set.Unique(id_list)
doc = context.node.rootNode
nodeset = []
for id in id_list:
element = doc.getElementById(id)
if element:
nodeset.append(element)
return nodeset
def LocalName(context, nodeSet=None):
"""Function: <string> local-name(<node-set>?)"""
if nodeSet is None:
node = context.node
elif not isinstance(nodeSet, NodesetType):
raise RuntimeException(RuntimeException.WRONG_ARGUMENTS, 'local-name',
_("expected node-set"))
elif not nodeSet:
return u''
else:
nodeSet.sort()
node = nodeSet[0]
node_type = getattr(node, 'nodeType', None)
if node_type in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
# localName could be null
return node.localName or u''
elif node_type == NAMESPACE_NODE:
# localName could be null
return node.localName or u''
elif node_type == Node.PROCESSING_INSTRUCTION_NODE:
# target cannot be null
return node.target
return u''
def NamespaceUri(context, nodeSet=None):
"""Function: <string> namespace-uri(<node-set>?)"""
if nodeSet is None:
node = context.node
elif not isinstance(nodeSet, NodesetType):
raise RuntimeException(RuntimeException.WRONG_ARGUMENTS,
'namespace-uri', _("expected node-set"))
elif not nodeSet:
return u''
else:
nodeSet.sort()
node = nodeSet[0]
# only elements and attributes have a namespace-uri
node_type = getattr(node, 'nodeType', None)
if node_type in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
return node.namespaceURI or u''
return u''
def Name(context, nodeSet=None):
"""Function: <string> name(<node-set>?)"""
if nodeSet is None:
node = context.node
elif not isinstance(nodeSet, NodesetType):
raise RuntimeException(RuntimeException.WRONG_ARGUMENTS, 'name',
_("expected node-set"))
elif not nodeSet:
return u''
else:
nodeSet.sort()
node = nodeSet[0]
node_type = getattr(node, 'nodeType', None)
if node_type in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
return node.nodeName
elif node_type == NAMESPACE_NODE:
# localName could be null
return node.localName or u''
elif node_type == Node.PROCESSING_INSTRUCTION_NODE:
# target cannot be null
return node.target
return u''
### String Functions ###
def String(context, object_=None):
"""Function: <string> string(<object>?)"""
if isinstance(object_, XPathStringType):
return object_
if object_ is None:
object_ = context.node
return Conversions.StringValue(object_)
def Concat(context, *args):
"""Function: <string> concat(<string>, <string>, ...)"""
if len(args) < 1:
raise RuntimeException(RuntimeException.WRONG_ARGUMENTS, 'concat', _("at least 2 arguments expected"))
return reduce(lambda a, b: a + Conversions.StringValue(b), args, u'')
def StartsWith(context, outer, inner):
"""Function: <string> starts-with(<string>, <string>)"""
if not isinstance(outer, XPathStringType):
outer = Conversions.StringValue(outer)
if not isinstance(inner, XPathStringType):
inner = Conversions.StringValue(inner)
if not inner:
return boolean.true
return outer[:len(inner)] == inner and boolean.true or boolean.false
def Contains(context, outer, inner):
"""Function: <string> contains(<string>, <string>)"""
if not isinstance(outer, XPathStringType):
outer = Conversions.StringValue(outer)
if not isinstance(inner, XPathStringType):
inner = Conversions.StringValue(inner)
if not inner:
return boolean.true
return outer.find(inner) >= 0 and boolean.true or boolean.false
def SubstringBefore(context, outer, inner):
"""Function: <string> substring-before(<string>, <string>)"""
if not isinstance(outer, XPathStringType):
outer = Conversions.StringValue(outer)
if not isinstance(inner, XPathStringType):
inner = Conversions.StringValue(inner)
if not inner:
return u''
index = outer.find(inner)
if index == -1:
return u''
return outer[:index]
def SubstringAfter(context, outer, inner):
"""Function: <string> substring-after(<string>, <string>)"""
if not isinstance(outer, XPathStringType):
outer = Conversions.StringValue(outer)
if not isinstance(inner, XPathStringType):
inner = Conversions.StringValue(inner)
if not inner:
return u''
index = outer.find(inner)
if index == -1:
return u''
return outer[index+len(inner):]
def Substring(context, st, start, length=None):
"""Function: <string> substring(<string>, <number>, <number>?)"""
if not isinstance(st, XPathStringType):
st = Conversions.StringValue(st)
if not isinstance(start, NumberType):
start = Conversions.NumberValue(start)
# start == NaN: spec doesn't say; assume no substring to return
# start == +Inf or -Inf: no substring to return
if number.isnan(start) or number.isinf(start):
return u''
# start is finite, safe for int() and round().
start = int(round(start))
# convert to 0-based index for python string slice
if start < 1:
startidx = 0
else:
startidx = start - 1
# length undefined: return chars startidx to end
if length is None:
return st[startidx:]
elif not isinstance(length, NumberType):
length = Conversions.NumberValue(length)
# length == NaN: spec doesn't say; assume no substring to return
if number.isnan(length):
return u''
# length == +Inf: return chars startidx to end
# length == -Inf: no substring to return
elif number.isinf(length):
if length > 0:
return st[startidx:]
else:
return u''
# length is finite, safe for int() and round().
length = int(round(length))
# return value must end before position (start+length)
# which is (start+length-1) in 0-based index
endidx = start + length - 1
if endidx > startidx:
return st[startidx:endidx]
else:
return u''
def StringLength(context, st=None):
"""Function: <number> string-length(<string>?)"""
if st is None:
st = context.node
if not isinstance(st, XPathStringType):
st = Conversions.StringValue(st)
return float(len(st))
def Normalize(context, st=None):
"""Function: <string> normalize-space(<string>?)"""
if st is None:
st = context.node
if not isinstance(st, XPathStringType):
st = Conversions.StringValue(st)
return u' '.join(st.split())
def Translate(context, source, fromChars, toChars):
"""Function: <string> translate(<string>, <string>, <string>)"""
if not isinstance(source, XPathStringType):
source = Conversions.StringValue(source)
if not isinstance(fromChars, XPathStringType):
fromChars = Conversions.StringValue(fromChars)
if not isinstance(toChars, XPathStringType):
toChars = Conversions.StringValue(toChars)
# remove duplicate chars from From string
fromChars = reduce(lambda st, c: st + c * (st.find(c) == -1), fromChars, '')
toChars = toChars[:len(fromChars)]
# string.maketrans/translate do not handle unicode
translate = {}
for from_char, to_char in map(None, fromChars, toChars):
translate[ord(from_char)] = to_char
result = reduce(lambda a, b, t=translate:
a + (t.get(ord(b), b) or ''),
source, '')
return result
### Boolean Functions ###
def Boolean(context, object_):
"""Function: <boolean> boolean(<object>)"""
return Conversions.BooleanValue(object_)
def Not(context, object_):
"""Function: <boolean> not(<boolean>)"""
return ((not Conversions.BooleanValue(object_) and boolean.true)
or boolean.false)
def True(context):
"""Function: <boolean> true()"""
return boolean.true
def False(context):
"""Function: <boolean> false()"""
return boolean.false
def Lang(context, lang):
"""Function: <boolean> lang(<string>)"""
lang = Conversions.StringValue(lang).lower()
node = context.node
while node.parentNode:
for attr in node.attributes.values():
# Search for xml:lang attribute
if (attr.localName == 'lang' and
attr.namespaceURI == XML_NAMESPACE):
value = attr.nodeValue.lower()
# Exact match (PrimaryPart and possible SubPart)
if value == lang:
return boolean.true
# Just PrimaryPart (ignore '-' SubPart)
index = value.find('-')
if index != -1 and value[:index] == lang:
return boolean.true
# Language doesn't match
return boolean.false
# Continue to next ancestor
node = node.parentNode
# No xml:lang declarations found
return boolean.false
### Number Functions ###
def Number(context, object_=None):
"""Function: <number> number(<object>?)"""
if object_ is None:
object_ = [context.node]
return Conversions.NumberValue(object_)
def Sum(context, nodeSet):
"""Function: <number> sum(<node-set>)"""
if not isinstance(nodeSet, NodesetType):
raise RuntimeException(RuntimeException.WRONG_ARGUMENTS, 'sum',
_("expected node-set argument"))
nns = map(Conversions.NumberValue, nodeSet)
return reduce(lambda x, y: x + y, nns, 0)
def Floor(context, object_):
"""Function: <number> floor(<number>)"""
num = Conversions.NumberValue(object_)
if number.isnan(num) or number.isinf(num):
return num
elif int(num) == num:
return num
elif num < 0:
return float(int(num) - 1)
else:
return float(int(num))
def Ceiling(context, object_):
"""Function: <number> ceiling(<number>)"""
num = Conversions.NumberValue(object_)
if number.isnan(num) or number.isinf(num):
return num
elif int(num) == num:
return num
elif num > 0:
return float(int(num) + 1)
else:
return float(int(num))
def Round(context, object_):
"""Function: <number> round(<number>)"""
num = Conversions.NumberValue(object_)
if number.isnan(num) or number.isinf(num):
return num
elif num < 0 and num % 1.0 == 0.5:
return round(num, 0) + 1
else:
return round(num, 0)
### Function Mappings ###
CoreFunctions = {
(EMPTY_NAMESPACE, 'last'): Last,
(EMPTY_NAMESPACE, 'position'): Position,
(EMPTY_NAMESPACE, 'count'): Count,
(EMPTY_NAMESPACE, 'id'): Id,
(EMPTY_NAMESPACE, 'local-name'): LocalName,
(EMPTY_NAMESPACE, 'namespace-uri'): NamespaceUri,
(EMPTY_NAMESPACE, 'name'): Name,
(EMPTY_NAMESPACE, 'string'): String,
(EMPTY_NAMESPACE, 'concat'): Concat,
(EMPTY_NAMESPACE, 'starts-with'): StartsWith,
(EMPTY_NAMESPACE, 'contains'): Contains,
(EMPTY_NAMESPACE, 'substring-before'): SubstringBefore,
(EMPTY_NAMESPACE, 'substring-after'): SubstringAfter,
(EMPTY_NAMESPACE, 'substring'): Substring,
(EMPTY_NAMESPACE, 'string-length'): StringLength,
(EMPTY_NAMESPACE, 'normalize-space'): Normalize,
(EMPTY_NAMESPACE, 'translate'): Translate,
(EMPTY_NAMESPACE, 'boolean'): Boolean,
(EMPTY_NAMESPACE, 'not'): Not,
(EMPTY_NAMESPACE, 'true'): True,
(EMPTY_NAMESPACE, 'false'): False,
(EMPTY_NAMESPACE, 'lang'): Lang,
(EMPTY_NAMESPACE, 'number'): Number,
(EMPTY_NAMESPACE, 'sum'): Sum,
(EMPTY_NAMESPACE, 'floor'): Floor,
(EMPTY_NAMESPACE, 'ceiling'): Ceiling,
(EMPTY_NAMESPACE, 'round'): Round,
}
| [
29113,
29113,
7804,
198,
2,
720,
39681,
25,
1220,
7785,
14,
12001,
14,
66,
14259,
15763,
14,
19,
5606,
578,
14,
37,
83,
14,
55,
4029,
14,
55,
15235,
14,
14055,
24629,
2733,
13,
9078,
11,
85,
352,
13,
3132,
13,
19,
13,
16,
4793,
... | 2.453539 | 5,510 |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
| [
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
2,
1398,
7343,
19667,
7,
15252,
2599,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
2100... | 2.253968 | 63 |
import sys
import serial
import time
import numpy as np
import copy
try:
from urllib.parse import quote
from urllib.request import urlopen
except ImportError:
# from urlparse import quote
from urllib import quote
from urllib2 import urlopen
# https://github.com/markjones112358/pyInstruments
class eScope:
""" The base class for a eScope instruments."""
def write(self, command):
""" Writes a command to the instrument."""
self.request = urlopen("http://%s/Comm.html?COMMAND=%s" % (self.ip, quote(command)), timeout=2)
if self.debug:
print("scope: >> %s" % command)
def read(self, raw=False):
""" Reads a response from the instrument.
This function will block until the instrument responds."""
ans = self.request.readline()
if self.debug:
print("scope: <<%s" % ans)
if not raw:
return ans.decode("ascii")
else:
return ans
def query(self, command):
""" Writes a command to the instrument and reads the response.
"""
self.write(command)
return self.read()
def getName(self):
""" Returns the instruments identifier string.
This is a fairly universal command so should work on most devices.
"""
ans = self.query("*IDN?").split(",")
return "%s %s" % (ans[0], ans[1])
def sendReset(self):
""" Resets the instrument.
This is a fairly universal command so should work on most devices.
"""
self.write("*RST")
class serialInstrument:
""" The base class for a serial instrument.
Extend this class to implement other instruments with a serial interface.
"""
def write(self, command):
""" Writes a command to the instrument."""
self.inst.write(bytearray(command + "\n", "ascii"))
if self.debug:
print(command)
def read(self, raw=False):
""" Reads a response from the instrument.
This function will block until the instrument responds."""
out = ""
tmp = self.inst.read(raw=raw)
return tmp
def query(self, command):
""" Writes a command to the instrument and reads the response.
"""
self.write(command + "\n")
tmp = self.read()
while tmp is False:
tmp = self.read()
return tmp
def getName(self):
""" Returns the instruments identifier string.
This is a fairly universal command so should work on most devices.
"""
return self.query("*IDN?")
def sendReset(self):
""" Resets the instrument.
This is a fairly universal command so should work on most devices.
"""
self.inst.write("*RST")
class Scope:
""" The class for the Tektronix TPS2024 oscilloscope
This class is responsible for any functionality not specific to a
particular channel, e.g. horizontal scale adjustment.
"""
x_incr = False
x_num = False
numAvg = 0
selectedChannel = 1
debug = False
available_tdivs = [50,
25,
10,
5,
2.5,
1,
0.5,
0.25,
0.1,
0.05,
0.025,
0.01,
0.005,
0.0025,
0.001,
0.0005,
0.00025,
0.0001,
0.00005,
0.000025,
0.00001,
0.000005,
0.0000025,
0.000001,
0.0000005,
0.00000025,
0.0000001,
0.00000005,
0.000000025,
0.00000001,
0.000000005,
0.0000000025]
available_averageSettings = [128, 64, 16, 4]
def getName(self):
""" Returns the instruments identifier string.
This is a fairly universal command so should work on most devices.
"""
return self.inst.getName()
def set_averaging(self, averages):
""" Sets or disables averaging (applies to all channels).
Valid number of averages are either 4,16,64 or 128.
A value of 0 or False disables averaging
"""
if averages in self.available_averageSettings:
self.write("ACQuire:MODe AVERage")
self.write("ACQuire:NUMAVg " + str(averages))
self.numAvg = averages
elif averages == 0 or averages is False:
self.write("ACQuire:MODe SAMPLE")
self.write("ACQuire:NUMAVg " + str(0))
self.numAvg = 0
else:
sys.exit()
def set_autoRange(self, mode):
""" Enables or disables autoranging for the device
Arguments:
mode = False | 'vertical' | 'horizontal' | 'both'
the autoRanging mode with False being Disabled
"""
if mode is False:
self.issueCommand("AUTORange:STATE OFF", "Disabling auto ranging")
elif mode.find("or") != -1:
self.issueCommand("AUTORANGE:SETTINGS HORizontal",
"Setting auto range mode to horizontal")
self.issueCommand("AUTORange:STATE ON", "Enabling auto ranging")
elif mode.find("er") != -1:
self.issueCommand("AUTORANGE:SETTINGS VERTICAL",
"Setting auto range mode to vertical")
self.issueCommand("AUTORange:STATE ON", "Enabling auto ranging")
elif mode.find("th") != -1:
self.issueCommand("AUTORANGE:SETTINGS BOTH",
"Setting auto range mode to both")
self.issueCommand("AUTORange:STATE ON", "Enabling auto ranging")
self.wait()
def set_single_acquisition(self):
"""Start single aquisition."""
self.issueCommand("ACQuire:STOPAfter SEQuence", "Starting waveform acquisition")
def is_running(self):
"""Check if acquisition is running."""
if int(self.query("ACQuire:STATE?")):
return True
else:
return False
def acquisition(self, enable):
""" Sets acquisition parameter.
Toggling this controls whether the scope acquires a waveform
Arguments:
enable [bool] Toggles waveform acquisition
"""
if enable:
self.issueCommand("ACQuire:STATE ON", "Starting waveform acquisition")
else:
self.issueCommand("ACQuire:STATE OFF", "Stopping waveform acquisition")
def get_numAcquisitions(self):
""" Indicates the number of acquisitions that have taken place since
starting oscilloscope acquisition. The maximum number of acquisitions
that can be counted is 231-1. This value is reset to zero when you
change most Acquisition, Horizontal, Vertical, or Trigger arguments
that affect the waveform
"""
num = self.query("ACQuire:NUMACq?")
while num is False:
num = self.read()
return int(num)
def waitForAcquisitions(self, num=False):
""" Waits in a loop until the scope has captured the required number of
acquisitions
"""
until = 0
if num is False and self.numAvg is False:
# "Waiting for a single acquisition to finish"
until = 1
elif num is False:
until = num
# "Waiting until " + str(until) + " acquisitions have been made"
else:
until = self.numAvg
# "Waiting until " + str(until) + " acquisitions have been made"
last = 0
done = self.get_numAcquisitions()
while done < until:
if done != last:
# "Waiting for " + str(until - done) + " acquisitions"
last = done
done = self.get_numAcquisitions()
time.sleep(0.1)
def set_hScale(self,
tdiv=False,
frequency=False,
cycles=False):
""" Set the horizontal scale according to the given parameters.
Parameters:
tdiv [float] A time division in seconds (1/10 of the width of the display)
frequency [float] Select a timebase that will capture '# cycles' of this
frequency
cycles [float] Minimum number of frequency cycles to set timebase for
used in conjunction with 'frequency' parameter
"""
if tdiv is False:
set_div = False
for a_tdiv in self.available_tdivs:
if set_div is False and float(tdiv) <= a_tdiv:
set_div = a_tdiv
elif frequency is False:
if cycles is False:
set_div = self.find_minTdiv(frequency, cycles)
else:
set_div = self.find_minTdiv(frequency)
if set_div is False:
self.issueCommand("HORizontal:SCAle " + str(set_div),
"Setting horizontal scale to "
+ str(set_div) + " s/div")
return set_div * 10.0
def get_timeToCapture(self, frequency, cycles, averaging=1):
""" Calculates and returns the time (in seconds) for a capture
to complete based on the given frequency, cycles, and number
of averages.
"""
if averaging == 0:
averaging = 1
tdiv = self.find_minTdiv(frequency, cycles)
windowlength = float(tdiv) * 10.0
wavelength = 1.0 / frequency
# time if the first cycle triggers instantly and for every average
time_min = windowlength * averaging
# time when triggering is delayed by a full wavelength and at each
# acquire for an average
time_max = (windowlength * averaging) + (wavelength * averaging)
return (time_min, time_max)
def find_minTdiv(self, frequency, min_cycles=2):
""" Finds the minimum s/div that will allow a given number of
cycles at a particular frequency to fit in a capture
"""
tmp = copy.copy(self.available_tdivs)
tmp.reverse()
wavelength = 1.0 / float(frequency)
min_div = (wavelength * min_cycles) / 10.0
for tdiv in tmp:
if min_div <= tdiv:
return tdiv
return tmp[len(tmp) - 1]
def set_trigger_position(self, position):
"""Set pretrigger amout to position (%) of the record length"""
if position >= 0 and position <= 100:
self.issueCommand("HORizontal:TRIGger:POSition %d" % position, "Setting trigger position")
def get_channels_autoRange(channels, wait=True, averages=False, max_adjustments=5):
""" Helper function to control the adjustment of multiple channels between
captures.
This reduces the amount of time spend adjusting the V/div when multiple
channels are used as only one re-acquisition is required between adjustments.
"""
channels_data = [False for x in range(len(channels))]
channels_rescale = [False for x in range(len(channels))]
reset = False
to_wait = wait
for channel_number, channel in enumerate(channels):
xs, ys = channel.get_waveform(False, wait=to_wait)
to_wait = False
if channel.did_clip():
# Increase V/div until no clipped data
set_vdiv = channel.get_yScale()
if channel.available_vdivs.index(set_vdiv) > 0:
temp_index = channel.available_vdivs.index(set_vdiv) - 1
temp1 = channel.available_vdivs[temp_index]
temp2 = 'Decreasing channel ' + str(channel_number) + ' to '
temp2 += str(temp1)
temp2 += ' V/div'
print(temp2)
channels_rescale[channel_number] = temp1
reset = True
else:
print()
print('===================================================')
print('WARN: Scope Y-scale maxed out! THIS IS BAD!!!!!!!!!')
print('===================================================')
print('Aborting!')
sys.exit()
else:
tmp_max = 0
tmp_min = 0
for y in ys:
if y > tmp_max:
tmp_max = y
elif y < tmp_min:
tmp_min = y
datarange = tmp_max - tmp_min
set_range = channel.get_yScale()
set_window = set_range * 8.0
# find the best (minimum no-clip) range
best_window = 0
tmp_range = copy.copy(channel.available_vdivs)
available_windows = map(lambda x: x * 8.0, tmp_range)
for available_window in available_windows:
if datarange <= (available_window * 0.95):
best_window = available_window
# if it's not the range were already using, set it
if best_window < set_window:
temp = 'Increasing channel ' + str(channel_number)
temp += ' to ' + str(best_window / 8.0) + ' V/div'
print(temp)
channels_rescale[channel_number] = best_window / 8.0
reset = True
channels_data[channel_number] = (xs, ys)
if max_adjustments > 0 and reset:
max_adjustments -= 1
temp = 'A channels range has been altered, data will need to be'
temp += ' re-acquired'
print(temp)
temp = 'The maximum remaining adjustments to the channels is '
temp += str(max_adjustments)
print(temp)
enumerated_data = enumerate(zip(channels_rescale, channels))
for channel_number, (channel_scale, channel) in enumerated_data:
if channel_scale is False:
temp = 'Adjusting channel ' + str(channel_number) + ' to '
temp += str(channel_scale) + ' V/div'
print(temp)
channel.set_vScale(channel_scale)
channels[0].set_averaging(False)
time.sleep(1)
channels[0].set_averaging(averages)
return get_channels_autoRange(channels,
wait,
averages,
max_adjustments=max_adjustments)
else:
return channels_data
class channel(Scope):
""" Channel class that implements the functionality related to one of
the oscilloscope's physical channels.
"""
channel = False # Channel num
available_vdivs = [50.0,
20.0,
10.0,
5.0,
2.0,
1.0,
0.5,
0.2,
0.1,
0.05,
0.02]
def set_vScale(self, s, debug=False):
""" Sets the V/div setting (vertical scale) for this channel
"""
tmp = copy.copy(self.available_vdivs)
setVdiv = False
for vdiv in tmp:
if s <= vdiv:
setVdiv = vdiv
if setVdiv is False:
print()
print('===================================================')
print('WARN: ' + str(s) + ' V/div is outside of scope range ')
print('Will use ' + str(tmp[len(tmp) - 1]) + ' V/div instead,')
print('===================================================')
print()
self.issueCommand("CH" + str(self.channel) + ":SCAle " + str(setVdiv),
"Setting channel " + str(self.channel) +
" scale to " + str(setVdiv) + " V/div")
self.y_mult = setVdiv
def did_clip(self, debug=False):
""" Checks to see if the last acquisition contained clipped data points.
This would indicate that the V/div is set too high.
"""
count = 0
for point in self.signal_raw:
if point > 250 or point < 5:
count += 1
else:
count = 0
if count > 1:
return True
return False
def get_yScale(self):
""" query the instrument for this channels V/div setting.
"""
tmp = self.inst.query('CH' + str(self.channel) + ':SCAle?')
return float(tmp)
def get_waveform_autoRange(self, debug=False, wait=True, averages=False):
""" Download a waveform, checking to see whether the V/div for this
channel has been set too high or too low.
This function will automatically adjust the V/div for this channel and
keep re-requesting captures until the data fits correctly
"""
xs, ys = self.get_waveform(False, wait=wait)
# Check if this waveform contained clipped data
if self.did_clip():
clipped = True
while clipped:
# Increase V/div until no clipped data
set_vdiv = self.get_yScale()
if self.available_vdivs.index(set_vdiv) > 0:
best_div = self.available_vdivs[self.available_vdivs.index(set_vdiv) - 1]
if debug:
temp = 'Setting Y-scale on channel '
temp += str(self.channel) + ' to '
temp += str(best_div)
temp += ' V/div'
self.set_vScale(best_div)
self.waitForAcquisitions(self.numAvg)
xs, ys = self.get_waveform(debug=False)
clipped = self.did_clip()
else:
print()
print('===================================================')
print('WARN: Scope Y-scale maxed out! THIS IS BAD!!!!!!!!!')
print('===================================================')
print()
clipped = False
else:
# Detect if decreasing V/div it will cause clipping
tmp_max = 0
tmp_min = 0
for y in ys:
if y > tmp_max:
tmp_max = y
elif y < tmp_min:
tmp_min = y
datarange = tmp_max - tmp_min
set_range = self.get_yScale()
set_window = set_range * 8.0
# find the best (minimum no-clip) range
best_window = 0
tmp_range = copy.copy(self.available_vdivs)
available_windows = map(lambda x: x * 8.0, tmp_range)
for available_window in available_windows:
if datarange <= (available_window * 0.90):
best_window = available_window
# if it's not the range were already using, set it
if best_window != set_window:
self.set_vScale(best_window / 8.0)
print('Disabling averaging')
self.set_averaging(False)
time.sleep(1)
print('Enabling averaging, setting to ' + str(averages))
self.set_averaging(averages)
time.sleep(1)
return self.get_waveform_autoRange(averages=averages)
return [xs, ys]
def set_waveformParams(self, encoding='RPBinary', start=0, stop=2500, width=1):
""" Sets waveform parameters for the waveform specified by the channel
parameter.
Arguments:
channel [int - 1-4] - specifies which channel to configure
encoding (optional: 'ASCII') [str - {'ASCII' , 'Binary'}] - how the
waveform is to be transferred (ascii is easiest but slowest)
start (optional: 0) [int - 0-2499] - data point to begin transfer from
stop (optional: 2500) [int - 1-2500] - data point to stop transferring at
width (optional: 2) [int] - how many bytes per data point to transfer.
"""
self.issueCommand("DATA:SOUrce CH" + str(self.channel),
"Setting data source to channel " + str(self.channel),
False)
if encoding == 'ASCII':
self.issueCommand("DATA:ENCdg ASCIi", "Setting data encoding to ASCII", False)
self.encoding = 'ASCII'
else:
self.issueCommand("DATA:ENCdg RPBinary", "Setting data encoding to RPBinary", False)
self.encoding = 'RPBinary'
self.issueCommand("DATA:STARt " + str(start), "Setting start data point to " + str(start), False)
self.issueCommand("DATA:STOP " + str(stop), "Setting stop data point to " + str(stop), False)
self.issueCommand("DATA:WIDTH " + str(width), "Setting of bytes to transfer per waveform point to " + str(width), False)
self.checkComplete()
def get_waveform(self):
""" Downloads this channels waveform data.
This function will not make any adjustments to the V/div settings.
"""
self.issueCommand("DAT:ENC RPB")
self.issueCommand("DATA:SOUrce CH" + str(self.channel), "Setting data source to channel " + str(self.channel))
self.input_range = self.get_yScale()
# self.write("WFMPre?")
encoding = self.inst.query("WFMPre:ENCdg?")
self.y_offset = float(self.inst.query("WFMP:YOF?"))
self.y_mult = float(self.inst.query("WFMP:YMU?"))
self.y_zero = float(self.inst.query("WFMP:YZE?"))
self.x_zero = float(self.inst.query("WFMPre:XZE?")) or 0
self.x_incr = float(self.inst.query("WFMP:XIN?"))
self.inst.write("CURVE?")
ans = self.inst.read(raw=True)[: -1] # remove "\n" at the line end
# if "#" not in ans[0].decode("ascii"):
# print("scope: <<%s" % ans[0].decode("ascii"))
# raise "Error reading waveform"
num_bytes = int(ans[1])
self.x_num = 10000 # int(ans[2: 2 + num_bytes])
tmp = ans[7:] # Signal
# 510000
if encoding == 'ascii':
out = tmp.split(":CURVE ")[1]
data = out.split(',')
else:
data = np.frombuffer(tmp, dtype=np.uint8)
self.signal_raw = data
data_y = ((data - self.y_offset) * self.y_mult) + self.y_zero
data_x = self.x_zero + np.arange(data.size) * self.x_incr
if self.x_num != data.size:
print("======================================================")
print("WARNING: Data payload was stated as " + str(self.x_num) + " points")
print("but " + str(data.size) + " points were returned for CH" + str(self.channel))
print("======================================================")
# if self.did_clip() is True:
# print("=======================================================")
# print("WARNING: Data payload possibly contained clipped points")
# print("=======================================================")
return [data_x, data_y]
| [
11748,
25064,
198,
11748,
11389,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4866,
628,
198,
28311,
25,
198,
220,
220,
220,
422,
2956,
297,
571,
13,
29572,
1330,
9577,
198,
220,
220,
220,
422,
2956,
297,
571,
13,
... | 2.117028 | 11,023 |
from .aggregation import aggregate_with_sparse
from .array_mapper import ArrayMapper
from .indexers import Indexer, Proxy
from bw_processing import DatapackageBase
from stats_arrays import MCRandomNumberGenerator
from typing import Any, Union, Callable
import numpy as np
class ResourceGroup:
"""A class that handles a resource group - a collection of files in data package which define one matrix. A resource group can contain the following:
* data array or interface (required)
* indices structured array (required)
* flip array (optional)
* csv metadata (optional)
* json metadata (optional)
After instantiation, the ``MappedMatrix`` class will add an indexer (an instance of ``matrix_utils.indexers.Indexer``), using either ``add_indexer`` or ``add_combinatorial_indexer``. It will also add one or two array mappers, using ``add_mapper``. Only one mapper is needed if the matrix is diagonal.
One easy source of confusion is the difference between ``.row_original`` and ``.row``. Both of these are one-dimensional vectors which have matrix row indices (i.e. they have already been mapped). ``.row_original`` are the values as given in the data package, whereas ``.row`` are the values actually used in matrix data insertion (and the same for column indices).There are two possible modifications that can be applied from ``.row_original`` to ``.row``; first, we can have internal aggregation, which will shrink the size of the row or column indices, as duplicate elements are eliminated. Second, if the array mapper was already instantiated, we might need to delete some elements from the row or column indices, as these values are not used in this calculation. For example, in life cycle assessment, LCIA method implementations often contain characterization factors for flows not present in the biosphere (as they were not used by an of the activities). In this case, we would need to eliminate these factors, as our characterization matrix must match exactly to the biosphere matrix already built.
Here is an example for row indices:
.. code-block:: python
row_input_indices = [0, 17, 99, 42, 17]
row_original = [0, 1, -1, 2, 1]
after_aggregation = [0, 1, -1, 2] # -1 is missing data point
after_masking = [0, 1, 2]
row = [0, 1, 2]
Any data coming into this class, with through instantiation or via method calls such as ``.calculate``, should follow the length and order of ``.row_original``.
The current data, as entered into the matrix, is given by ``.current_data``.
"""
@property
@property
@property
def raw_flip(self):
"""The source data for the flip array."""
return self.get_resource_by_suffix("flip")
@property
def flip(self):
"""The flip array, with the custom filter mask applied if necessary."""
if self.filter_mask is None:
return self.raw_flip
else:
return self.raw_flip[self.filter_mask]
@property
def raw_indices(self):
"""The source data for the indices array."""
return self.get_resource_by_suffix("indices")
@property
def indices(self):
"""The indices array, with the custom filter mask applied if necessary."""
if self.filter_mask is None:
return self.raw_indices
else:
return self.raw_indices[self.filter_mask]
def is_vector(self) -> bool:
"""Determine if this is a vector or array resource"""
metadata = self.package.get_resource(self.label + ".data")[1]
return metadata.get("category") == "vector"
@property
def build_mask(self, row, col):
"""Build boolean array mask where ``False`` means that a data element is not present, and should be ignored. See discussion above."""
mask = (row != -1) * (col != -1)
if (~mask).sum():
return mask
else:
return None
def unique_row_indices(self):
"""Return array of unique indices that respect aggregation policy"""
return np.unique(self.indices["row"])
def unique_col_indices(self):
"""Return array of unique indices that respect aggregation policy"""
return np.unique(self.indices["col"])
def calculate(self, vector: np.ndarray = None):
"""Generate row and column indices and a data vector. If ``.data`` is an iterator, draw the next value. If ``.data`` is an array, use the column given by ``.indexer``.
``vector`` is an optional input that overrides the data. It must be in the same order and have the same length as the data package indices (before possible aggregation and masking); see discussion above.
"""
if self.empty:
self.current_data = np.array([])
return self.row, self.col, self.current_data
if vector is not None:
data = vector
elif self.vector:
if self.use_distributions:
data = next(self.rng)
else:
try:
data = next(self.data)
except TypeError:
data = self.data
else:
data = self.data[:, self.indexer.index % self.ncols]
# Copy to avoid modifying original data
data = data.copy()
if self.filter_mask is not None:
data = data[self.filter_mask]
try:
data[self.flip] *= -1
except KeyError:
# No flip array
pass
# Second copy because we want to store the data before aggregation
self.current_data = data.copy()
if self.aggregate:
if self.mask is not None:
return aggregate_with_sparse(
self.row_original[self.mask],
self.col_original[self.mask],
data[self.mask],
self.count,
)
else:
return aggregate_with_sparse(
self.row_original, self.col_original, data, self.count
)
else:
return self.row, self.col, data[self.mask]
| [
6738,
764,
9460,
43068,
1330,
19406,
62,
4480,
62,
82,
29572,
198,
6738,
764,
18747,
62,
76,
11463,
1330,
15690,
44,
11463,
198,
6738,
764,
9630,
364,
1330,
12901,
263,
11,
38027,
198,
6738,
275,
86,
62,
36948,
1330,
16092,
499,
441,
... | 2.6835 | 2,297 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import decorators
from telemetry.page import page as page_module
from telemetry.testing import browser_test_case
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
from telemetry.timeline import chrome_trace_category_filter
from telemetry.util import wpr_modes
from telemetry.web_perf import timeline_based_measurement as tbm_module
from telemetry.web_perf.metrics import gpu_timeline
from telemetry.web_perf.metrics import smoothness
| [
2,
15069,
1853,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
6738,... | 3.63388 | 183 |
from collections import defaultdict
class LazyDefaultDict(defaultdict):
"""
LazyDefaultDict(default_factory[, ...]) --> dict with default factory
The default factory is call with the key argument to produce
a new value when a key is not present, in __getitem__ only.
A LazyDefaultDict compares equal to a dict with the same items.
All remaining arguments are treated the same as if they were
passed to the dict constructor, including keyword arguments.
"""
def __missing__(self, key):
"""
__missing__(key) # Called by __getitem__ for missing key; pseudo-code:
if self.default_factory is None: raise KeyError((key,))
self[key] = value = self.default_factory(key)
return value
"""
self[key] = self.default_factory(key)
return self[key]
| [
6738,
17268,
1330,
4277,
11600,
628,
198,
4871,
406,
12582,
19463,
35,
713,
7,
12286,
11600,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
406,
12582,
19463,
35,
713,
7,
12286,
62,
69,
9548,
58,
11,
2644,
12962,
14610,
8633,
... | 2.851351 | 296 |
import time
import numpy as np
import io
import struct
import os
import ctypes
import sys
from .fast_file import *
from .dxgi_97 import process_image_97
from .dxgi_94_95_96 import process_image_94_95_96
from .dxgi_types import *
from numba import njit
process_image_func = None
c_process_image_lib = None
c_process_image_func = None
# https://docs.microsoft.com/en-us/windows/desktop/direct3d9/opaque-and-1-bit-alpha-textures
# https://msdn.microsoft.com/ja-jp/library/bb173059(v=vs.85).aspx
# https://docs.microsoft.com/en-us/windows/win32/api/dxgiformat/ne-dxgiformat-dxgi_format
# less than 32 bit floats
# https://docs.microsoft.com/en-us/windows/win32/direct3d10/d3d10-graphics-programming-guide-resources-float-rules
@njit(inline='always')
@njit(inline='always')
@njit(inline='always')
@njit(inline='always')
@njit
@njit
@njit
@njit
@njit
@njit
@njit
@njit
@njit
@njit(fastmath=True)
@njit
@njit
| [
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33245,
198,
11748,
2878,
198,
11748,
28686,
198,
11748,
269,
19199,
198,
11748,
25064,
198,
6738,
764,
7217,
62,
7753,
1330,
1635,
198,
6738,
764,
34350,
12397,
62,
5607,
1330,
... | 2.525469 | 373 |
# 入力
N = int(input())
A = list(map(int, input().split()))
S = sum(A)
# A_{i + 1} - A_i から解を求める
ans = (
'YES' if (2 * S) % (N * (N + 1)) == 0 and all(
k % N == 0 and k >= 0
for k in (
(2 * S) // (N * (N + 1)) - (A[i] - A[i - 1])
for i in range(N)
)
) else
'NO'
)
# 出力
print(ans)
| [
2,
10263,
227,
98,
27950,
249,
198,
45,
796,
493,
7,
15414,
28955,
198,
32,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
198,
198,
50,
796,
2160,
7,
32,
8,
198,
198,
2,
317,
23330,
72,
1343,
352,
92,
532,
31... | 1.571429 | 217 |
from conans import ConanFile, CMake, tools
| [
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
11,
4899,
628
] | 3.666667 | 12 |
import pytest
import deepdiff
import typing
from sqlalchemy.orm import Session
from mlrun.api import schemas
from mlrun.api.db.base import DBInterface
from mlrun.api.db.sqldb.models import (
_classes,
Function,
Project,
Run,
Artifact,
FeatureSet,
Feature,
Entity,
Schedule,
)
from tests.api.db.conftest import dbs
# running only on sqldb cause filedb is not really a thing anymore, will be removed soon
@pytest.mark.parametrize(
"db,db_session", [(dbs[0], dbs[0])], indirect=["db", "db_session"]
)
| [
11748,
12972,
9288,
198,
11748,
2769,
26069,
198,
11748,
19720,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
23575,
198,
198,
6738,
25962,
5143,
13,
15042,
1330,
3897,
5356,
198,
6738,
25962,
5143,
13,
15042,
13,
9945,
13,
8692,
1330,
2... | 2.688119 | 202 |
import time
import requests
import click
from . import decorators
from .. import jenkins_api
from .. import utils
RESULT_COLORS = {
'SUCCESS': 'green',
'UNSTABLE': 'yellow',
'FAILURE': 'red',
}
@click.command()
@click.option('--block/--no-block', default=False, help='Block until builds '
'are done and show their outcome.')
@decorators.build_command
@decorators.repos_command
@decorators.jobs_command(dirty_flag=True)
@decorators.handle_all_errors()
@click.pass_context
def build(context, jobs_names, base_dir, block, build_parameters):
"""
Trigger builds for JOBS.
"""
session = jenkins_api.auth(base_dir)
exit_code = do_build(session, jobs_names, base_dir, block,
build_parameters)
context.exit(exit_code)
def trigger_builds(session, jobs_names, base_dir, parameters):
"""
Trigger builds for *jobs_names*.
Return a *queue_urls*, which can be passed to :func:`wait_for_builds` to
wait for jobs completion.
"""
queue_urls = {}
for name in jobs_names:
queue_url = jenkins_api.build_job(session, name, parameters)
queue_urls[name] = queue_url
return queue_urls
def wait_for_builds(session, queue_urls):
"""
Wait until builds corresponding to *queue_urls* are done.
Return a dict indexed by job names, containing ``(build_url, result,
runs_urls)`` tuples.
*build_url* is the location of the build, e.g.
"http://jenkins.example.com/job/myjob/51", and *result* a string
representing the build result ("SUCCESS", "UNSTABLE" or "FAILURE").
*runs_urls* is a (possibly empty) list of sub runs URLs for multi
configuration projects.
"""
builds_urls = _get_builds_urls(session, queue_urls)
return _poll_builds(session, builds_urls)
def print_build_result(session, base_dir, job_name, build_url, result=None,
runs_urls=None, prefix='', suffix='',
only_log_failures=True):
"""
Print build results of a job.
"""
# Get result and/or runs URLs if not given in arguments
if result is None or runs_urls is None:
build_infos = jenkins_api.get_object(session, build_url)
if result is None:
result = build_infos['result']
if runs_urls is None:
runs_urls = _get_runs_urls(build_infos)
# A null result means the build is in progress
if result is None:
click.secho('%s%s: build is in progress%s' %
(prefix, job_name, suffix), fg='yellow')
# Print results
color = RESULT_COLORS[result]
click.secho('%s%s: %s%s' % (prefix, job_name, result.lower(), suffix),
fg=color)
if not only_log_failures or result != 'SUCCESS':
if not runs_urls:
log = jenkins_api.get_build_log(session, build_url)
_print_marker('Beginning of "%s" logs' % job_name)
print(log.rstrip())
_print_marker('End of "%s" logs' % job_name)
for run_url in runs_urls:
run_info = jenkins_api.get_object(session, run_url)
print_build_result(session,
base_dir,
run_info['fullDisplayName'],
run_url,
run_info['result'],
[],
prefix=' ',
only_log_failures=only_log_failures)
| [
11748,
640,
198,
198,
11748,
7007,
198,
11748,
3904,
198,
198,
6738,
764,
1330,
11705,
2024,
198,
6738,
11485,
1330,
474,
268,
5331,
62,
15042,
198,
6738,
11485,
1330,
3384,
4487,
628,
198,
19535,
16724,
62,
25154,
20673,
796,
1391,
198... | 2.207247 | 1,573 |
__version__ = "9.3.0"
| [
834,
9641,
834,
796,
366,
24,
13,
18,
13,
15,
1,
198
] | 1.833333 | 12 |
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.forms.models import BaseModelFormSet, modelformset_factory
from django.template import loader
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from argus.models import (Group, Transaction, Party, Share, Category,
URL_SAFE_CHARS)
from argus.tokens import token_generators
GroupCreateFormSet = modelformset_factory(
Party,
form=PartyForm,
formset=BaseGroupCreateFormSet,
extra=0,
min_num=1,
fields=('name',))
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
49315,
13,
19509,
23779,
1330,
651,
62,
14421,
62,
15654,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
3758,
62,
4529,
198,
6738,
42625,
14208,... | 2.856557 | 244 |
# -*- python2 -*-
#
# Copyright 2010 The Native Client Authors. All rights reserved. Use
# of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../../../build/common.gypi',
],
'target_defaults': {
'variables':{
'target_base': 'none',
},
'target_conditions': [
['target_base=="nacl_base"', {
'sources': [
'nacl_refcount.h',
'nacl_refcount.c',
],
'xcode_settings': {
'WARNING_CFLAGS': [
'-fno-strict-aliasing',
'-Wno-missing-field-initializers'
]
},
},
]],
},
'conditions': [
['OS=="win" and target_arch=="ia32"', {
'targets': [
{
'target_name': 'nacl_base64',
'type': 'static_library',
'variables': {
'target_base': 'nacl_base',
'win_target': 'x64',
},
'dependencies': [
'<(DEPTH)/native_client/src/shared/imc/imc.gyp:imc64',
'<(DEPTH)/native_client/src/shared/platform/platform.gyp:platform64',
],
},
],
}],
],
'targets': [
{
'target_name': 'nacl_base',
'type': 'static_library', # 'dynamic_library', ?!?
'variables': {
'target_base': 'nacl_base',
},
'dependencies': [
'<(DEPTH)/native_client/src/shared/platform/platform.gyp:platform',
],
},
],
}
| [
2,
532,
9,
12,
21015,
17,
532,
9,
12,
198,
2,
198,
2,
15069,
3050,
383,
12547,
20985,
46665,
13,
220,
1439,
2489,
10395,
13,
220,
5765,
198,
2,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
30... | 1.930171 | 759 |
__all__ = [
"hsl_to_rgb",
"hue_to_value",
"map",
]
| [
628,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
71,
6649,
62,
1462,
62,
81,
22296,
1600,
198,
220,
220,
220,
366,
71,
518,
62,
1462,
62,
8367,
1600,
198,
220,
220,
220,
366,
8899,
1600,
198,
60,
198
] | 1.55814 | 43 |
from one.api import ONE
from ibllib.atlas import atlas
from brainbox.io.one import load_channels_from_insertion
pid = "8413c5c6-b42b-4ec6-b751-881a54413628"
ba = atlas.AllenAtlas()
xyz = load_channels_from_insertion(ONE().alyx.rest('insertions', 'read', id=pid), ba=ba)
| [
6738,
530,
13,
15042,
1330,
16329,
198,
198,
6738,
24283,
297,
571,
13,
265,
21921,
1330,
379,
21921,
198,
6738,
3632,
3524,
13,
952,
13,
505,
1330,
3440,
62,
354,
8961,
62,
6738,
62,
28463,
295,
198,
198,
35317,
796,
366,
5705,
148... | 2.415929 | 113 |
#!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""install.py
Copyright (c) 2019 Yukio Kuro
This software is released under BSD license.
Linux用インストーラ。
"""
import os as __os
import sys as __sys
import shutil as __shutil
__shell_script = __os.path.join(__sys.exec_prefix, "games", "starseeker")
__icon = __os.path.join(__sys.exec_prefix, "share", "icons", "starseeker.png")
__source = __os.path.join(__sys.exec_prefix, "share", "games", "StarSeeker")
__desktop_entry = __os.path.join(
__sys.exec_prefix, "share", "applications", "StarSeeker.desktop")
def __install():
u"""ゲームをインストール。
"""
print "Start installation."
print "---- Set shell script ----"
startup_file = __os.path.join("linux", "startup.sh")
__os.chmod(startup_file, 0o755)
__shutil.copy(startup_file, __shell_script)
print "---- Set icon ----"
icon_file = __os.path.join("linux", "icon.png")
__os.chmod(icon_file, 0o644)
__shutil.copy(icon_file, __icon)
print "---- Set source ----"
for root, dirs, files in __os.walk("Source"):
for dir_ in dirs:
__os.chmod(__os.path.join(root, dir_), 0o755)
for file_ in files:
__os.chmod(__os.path.join(root, file_), 0o755)
if __os.path.exists(__source):
__shutil.rmtree(__source)
__shutil.copytree("Source", __source)
print "---- Set desktop entry ----"
entry_file = __os.path.join("linux", "Entry.desktop")
__os.chmod(entry_file, 0o644)
__shutil.copy(entry_file, __desktop_entry)
print "Installation is finished."
def __uninstall():
u"""ゲームをアンインストール。
"""
print "Start uninstallation."
print "---- Remove shell script ----"
try:
__os.remove(__shell_script)
except OSError:
print "Shell script does not exsit."
print "---- Remove icon ----"
try:
__os.remove(__icon)
except OSError:
print "Icon does not exsit."
print "---- Remove source ----"
if __os.path.exists(__source):
__shutil.rmtree(__source)
else:
print "Source does not exsit."
print "---- Remove desktop entry ----"
try:
__os.remove(__desktop_entry)
except OSError:
print "Desktop entry does not exsit."
print "Uninstallation is finished."
if __name__ == '__main__':
if 1 < len(__sys.argv) and __sys.argv[-1] == "-u":
__uninstall()
else:
__install()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
48504,
12,
23,
532,
9,
12,
17,
198,
84,
37811,
17350,
13,
9078,
198,
198,
15269,
357,
66,
8,
13130,
19760,
952,
25796,
198,
1212,
3788,
318,... | 2.342439 | 1,025 |
# HeraldBot - polling notification bot for Discord
# Written in 2018 by Sam Hanes <sam@maltera.com>
#
# To the extent possible under law, the author(s) have dedicated all
# copyright and related and neighboring rights to this software to the
# public domain worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication
# along with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
import argparse
import configparser
import logging
import os
import sys
from .server import BotServer
LOG = logging.getLogger('heraldbot')
| [
2,
18277,
20630,
532,
13985,
14483,
10214,
329,
39462,
198,
2,
22503,
287,
2864,
416,
3409,
367,
7305,
1279,
37687,
31,
7617,
49600,
13,
785,
29,
198,
2,
198,
2,
1675,
262,
6287,
1744,
739,
1099,
11,
262,
1772,
7,
82,
8,
423,
7256... | 3.835366 | 164 |
from typing import Dict, Type
from .common import DataFetcher
from .pypi import PypiDataFetcher
from .crates import CratesDataFetcher
from .cran import CranDataFetcher
KNOWN_FETCHERS: Dict[str, Type[DataFetcher]] = {
"pypi": PypiDataFetcher,
"crates": CratesDataFetcher,
"cran": CranDataFetcher,
}
__all__ = [
"PypiDataFetcher",
"CratesDataFetcher",
"CranDataFetcher",
"KNOWN_FETCHERS",
"DataFetcher",
]
| [
6738,
19720,
1330,
360,
713,
11,
5994,
198,
198,
6738,
764,
11321,
1330,
6060,
37,
316,
2044,
198,
6738,
764,
79,
4464,
72,
1330,
350,
4464,
72,
6601,
37,
316,
2044,
198,
6738,
764,
6098,
689,
1330,
3864,
689,
6601,
37,
316,
2044,
... | 2.268041 | 194 |
# Time: O(m * n)
# Space: O(1)
# 840
# A 3 x 3 magic square is a 3 x 3 grid filled with
# distinct numbers from 1 to 9 such that each row, column,
# and both diagonals all have the same sum.
#
# Given an grid of integers, how many 3 x 3 "magic square" subgrids are there?
# (Each subgrid is contiguous).
#
# Example 1:
#
# Input: [[4,3,8,4],
# [9,5,1,9],
# [2,7,6,2]]
# Output: 1
# Explanation:
# The following subgrid is a 3 x 3 magic square:
# 438
# 951
# 276
#
# while this one is not:
# 384
# 519
# 762
#
# In total, there is only one magic square inside the given grid.
# Note:
# - 1 <= grid.length <= 10
# - 1 <= grid[0].length <= 10
# - 0 <= grid[i][j] <= 15
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
print(Solution().numMagicSquaresInside([[4,3,8,4],[9,5,1,9],[2,7,6,2]])) | [
2,
3862,
25,
220,
440,
7,
76,
1635,
299,
8,
198,
2,
4687,
25,
440,
7,
16,
8,
198,
198,
2,
48777,
198,
2,
317,
513,
2124,
513,
5536,
6616,
318,
257,
513,
2124,
513,
10706,
5901,
351,
198,
2,
7310,
3146,
422,
352,
284,
860,
88... | 2.411429 | 350 |
import setuptools
setuptools.setup(
name="gabri",
version="0.0.2",
author="Jenisha Thankaraj",
packages=setuptools.find_packages(),
python_requires='>=3.6.8',
install_requires=[
"tqdm~=4.48.2",
"matplotlib~=3.2.2",
"tensorboard~=2.2.0",
"albumentations~=0.4.6",
"opencv-python~=4.4.0.42",
"pytorch-lightning~=1.0.0",
"ipympl~=0.5.8"
]
)
| [
11748,
900,
37623,
10141,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
220,
220,
1438,
2625,
70,
397,
380,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
15,
13,
17,
1600,
198,
220,
220,
220,
1772,
2625,
44875,
19388,
6952,
... | 1.76569 | 239 |
from dataclasses import dataclass
from typing import Optional, Dict, List
@dataclass(frozen=True)
@dataclass(frozen=True)
@dataclass(frozen=True)
@dataclass(frozen=True)
@dataclass(frozen=True)
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
32233,
11,
360,
713,
11,
7343,
628,
198,
31,
19608,
330,
31172,
7,
69,
42005,
28,
17821,
8,
628,
198,
31,
19608,
330,
31172,
7,
69,
42005,
28,
17821,
8,
628,
... | 2.582278 | 79 |
from nose.tools import timed
@timed(1)
@timed(1)
@timed(1)
| [
6738,
9686,
13,
31391,
1330,
28805,
628,
628,
198,
31,
16514,
276,
7,
16,
8,
628,
198,
31,
16514,
276,
7,
16,
8,
628,
198,
31,
16514,
276,
7,
16,
8,
198
] | 2.09375 | 32 |
# -*- coding: utf-8 -*-
import logging
import pada
TRACE = 7
SIMPLE_LOG_FORMAT = r'%(levelname)s - %(message)s'
DETAIL_LOG_FORMAT = r'[%(asctime)s] {%(name)s: %(filename)s:%(lineno)d} %(levelname)s - %(message)s' # noqa E501
logging.addLevelName(TRACE, 'TRACE')
logger = logging.getLogger(pada.__name__)
_handler = None | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
198,
11748,
279,
4763,
198,
198,
5446,
11598,
796,
767,
198,
48913,
16437,
62,
25294,
62,
21389,
1404,
796,
374,
6,
4,
7,
5715,
3672,
8,
82,
532,
... | 2.174497 | 149 |
#BINARY TO NUMBER
b=str(input("ENTER THE BINARY :"))
l=len(b)
c=0
for a in b:
x=int(a)
l=l-1
if (x==1):
power=pow(2,(l))
print("2^",(l),"=",power)
c=c+power
print("SUM =",c) | [
2,
33,
1268,
13153,
5390,
36871,
13246,
201,
198,
201,
198,
65,
28,
2536,
7,
15414,
7203,
3525,
1137,
3336,
347,
1268,
13153,
1058,
48774,
201,
198,
75,
28,
11925,
7,
65,
8,
201,
198,
66,
28,
15,
201,
198,
1640,
257,
287,
275,
2... | 1.574468 | 141 |
#!../venv/bin/python
from vsvlandb import dbo
from vsvlandb.models import VLAN, Subnet, Site | [
2,
0,
40720,
574,
85,
14,
8800,
14,
29412,
198,
198,
6738,
3691,
85,
1044,
65,
1330,
288,
2127,
198,
6738,
3691,
85,
1044,
65,
13,
27530,
1330,
569,
25697,
11,
3834,
3262,
11,
14413
] | 2.657143 | 35 |
import re
import sys
import logging
import subprocess
from pathlib import Path
from typing import Optional
from pydantic import BaseModel, SecretStr, BaseSettings
logging.basicConfig(level=logging.INFO)
settings = Settings() # type: ignore
logging.debug(f"Settings: {settings.json()}")
if not settings.input_changelog_file.is_file():
logging.error(f"Input changelog file not found: {settings.input_changelog_file}")
sys.exit(1)
source = settings.input_changelog_file.read_text()
new_content: str
if settings.input_archive_title:
new_content, replaced = re.subn(
settings.input_archive_regex, settings.input_archive_title, source, count=1
)
if not replaced:
logging.error(f"Archive title not found: {replaced}")
sys.exit(0)
elif settings.input_changelog_body:
new_content, replaced = re.subn(
settings.input_replace_regex,
"\n" + settings.input_changelog_body.strip("\n") + "\n",
source,
count=1,
)
if not replaced:
logging.warning(f"Changelog body not found: {replaced}, insert new one")
match = re.search(settings.input_latest_changes_position, source)
if not match:
logging.error(f"Latest changes position not found: {match}")
sys.exit(1)
pre_content = source[: match.end()]
post_content = source[match.end() :]
new_content = (
pre_content
+ settings.input_latest_changes_title
+ "\n\n"
+ settings.input_changelog_body.strip("\n")
+ "\n\n"
+ post_content
)
else:
logging.error("Error input changelog body or archive title")
sys.exit(1)
settings.input_changelog_file.write_text(new_content)
if settings.input_commit_and_push:
logging.info(f"Committing changes to: {settings.input_changelog_file}")
subprocess.run(["git", "config", "user.name", "github-actions[bot]"], check=True)
subprocess.run(
["git", "config", "user.email", "github-actions[bot]@users.noreply.github.com"],
check=True,
)
subprocess.run(["git", "add", str(settings.input_changelog_file)], check=True)
subprocess.run(["git", "commit", "-m", ":memo: Update changelog"], check=True)
logging.info(f"Pushing changes: {settings.input_changelog_file}")
subprocess.run(["git", "push"], check=True)
logging.info("Finished")
| [
11748,
302,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
850,
14681,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
3943,
13290,
11,
7308,
26232,
628,
198,
... | 2.440367 | 981 |
# idea of count diag element (i,j) which
# sum is 4, 7, 10...
# and note R=2 special case
solution()
| [
2,
2126,
286,
954,
2566,
363,
5002,
357,
72,
11,
73,
8,
543,
198,
2,
2160,
318,
604,
11,
767,
11,
838,
986,
198,
2,
290,
3465,
371,
28,
17,
2041,
1339,
198,
82,
2122,
3419,
198
] | 2.72973 | 37 |
# -*- coding: utf-8 -*-
"""
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
from resources.lib.modules import tvmaze
########################
# Credits to evgen_dev #
########################
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
38557,
3060,
12,
261,
628,
220,
220,
220,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
220,
... | 3.518152 | 303 |
# Generated by Django 2.0 on 2019-06-25 16:03
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
import users.validators
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
13130,
12,
3312,
12,
1495,
1467,
25,
3070,
198,
198,
11748,
269,
9091,
2072,
13,
25747,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
1... | 3.072727 | 55 |
# ref - https://matplotlib.org/gallery/lines_bars_and_markers/psd_demo.html#sphx-glr-gallery-lines-bars-and-markers-psd-demo-py
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.gridspec as gridspec
# Fixing random state for reproducibility
np.random.seed(19680801)
dt = 0.01
t = np.arange(0, 10, dt)
nse = np.random.randn(len(t))
r = np.exp(-t / 0.05)
cnse = np.convolve(nse, r) * dt
cnse = cnse[:len(t)]
s = 0.1 * np.sin(2 * np.pi * t) + cnse
plt.subplot(211)
plt.plot(t, s)
plt.subplot(212)
plt.psd(s, 512, 1 / dt)
plt.show()
| [
2,
1006,
532,
3740,
1378,
6759,
29487,
8019,
13,
2398,
14,
24460,
14,
6615,
62,
34046,
62,
392,
62,
4102,
364,
14,
862,
67,
62,
9536,
78,
13,
6494,
2,
82,
746,
87,
12,
4743,
81,
12,
24460,
12,
6615,
12,
34046,
12,
392,
12,
410... | 2.127273 | 275 |
# -*- coding: utf-8 -*-
"""Top-level package for Elejandria Libros chef."""
__author__ = """Learning Equality"""
__email__ = 'benjamin@learningequality.org'
__version__ = '0.1.0'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
15987,
73,
392,
7496,
7980,
4951,
21221,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
41730,
31428,
37811,
198,
834,
12888,
8... | 2.701493 | 67 |
import zipfile
import os
from os.path import dirname, join
from aequilibrae import Project
def create_example(path: str, from_model='sioux_falls') -> Project:
"""Copies an example model to a new project project and returns the project handle
Args:
*path* (:obj:`str`): Path where to create a new model. must be a non-existing folder/directory.
*from_model path* (:obj:`str`, `Optional`): Example to create from *sioux_falls* or *nauru*. Defaults to
*sioux_falls*
Returns:
*project* (:obj:`Project`): Aequilibrae Project handle (open)
"""
if os.path.isdir(path):
raise FileExistsError('Cannot overwrite an existing directory')
if not os.path.isfile(join(dirname(__file__), f'../reference_files/{from_model}.zip')):
raise FileExistsError('Example not found')
os.mkdir(path)
zipfile.ZipFile(join(dirname(__file__), f'../reference_files/{from_model}.zip')).extractall(path)
proj = Project()
proj.open(path)
return proj
| [
11748,
19974,
7753,
198,
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
4654,
198,
6738,
257,
4853,
22282,
430,
68,
1330,
4935,
628,
198,
4299,
2251,
62,
20688,
7,
6978,
25,
965,
11,
422,
62,
19849,
11639,
13396,
221... | 2.662304 | 382 |
import random
import numpy as np
import bob.pipelines as mario
from bob.pipelines import Sample, SampleSet
from bob.pipelines.utils import flatten_samplesets
| [
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
29202,
13,
79,
541,
20655,
355,
1667,
952,
198,
198,
6738,
29202,
13,
79,
541,
20655,
1330,
27565,
11,
27565,
7248,
198,
6738,
29202,
13,
79,
541,
20655,
13,
26791... | 3.196078 | 51 |
# Exercise 50 - Text Input
age = int(input("What's your age? "))
age_last_year = age - 1
print("Last year you were %s." % age_last_year) | [
2,
32900,
2026,
532,
8255,
23412,
198,
496,
796,
493,
7,
15414,
7203,
2061,
338,
534,
2479,
30,
366,
4008,
198,
496,
62,
12957,
62,
1941,
796,
2479,
532,
352,
198,
4798,
7203,
5956,
614,
345,
547,
4064,
82,
526,
4064,
2479,
62,
12... | 2.893617 | 47 |
import sys
from cStringIO import StringIO
from tests.testcase import CoverageReporterTestCase
EXPECTED_REPORT = """\
Name Stmts Exec Miss Cover
---------------------------------------------------------------
bar 10 10 0 100.00
foo 80 70 10 87.50
long_name_foo_another_long_name 10 0 10 0.00
---------------------------------------------------------------
TOTAL 100 80 20 80.00
"""
| [
11748,
25064,
198,
6738,
269,
10100,
9399,
1330,
10903,
9399,
198,
198,
6738,
5254,
13,
9288,
7442,
1330,
33998,
6207,
4337,
14402,
20448,
198,
198,
49864,
9782,
1961,
62,
2200,
15490,
796,
37227,
59,
198,
5376,
220,
220,
220,
220,
220,... | 2.024823 | 282 |
from PyQt5 import QtGui
from brown.interface.interface import Interface
class BrushInterface(Interface):
"""Interface for a generic drawing brush controlling fill patterns.
Currently only solid colors are supported.
"""
def __init__(self, brown_object, color, pattern):
"""
Args:
brown_object (Brush): The object this interface belongs to
color (Color): The color of the brush.
pattern (BrushPattern): The fill pattern of the brush.
"""
# Initialize color to bright red to signal this not being
# set correctly by color setter
super().__init__(brown_object)
self.qt_object = QtGui.QBrush(QtGui.QColor('#ff0000'))
self.color = color
self.pattern = pattern
######## PUBLIC PROPERTIES ########
@property
def color(self):
"""Color: The color for the brush.
This setter automatically propagates changes to the
underlying Qt object.
"""
return self._colorterm
@color.setter
@property
def pattern(self):
"""BrushPattern: The fill pattern.
This setter automatically propagates changes to the
underlying Qt object.
"""
return self._pattern
@pattern.setter
| [
6738,
9485,
48,
83,
20,
1330,
33734,
8205,
72,
198,
198,
6738,
7586,
13,
39994,
13,
39994,
1330,
26491,
628,
198,
4871,
39846,
39317,
7,
39317,
2599,
198,
220,
220,
220,
37227,
39317,
329,
257,
14276,
8263,
14093,
12755,
6070,
7572,
1... | 2.634694 | 490 |
from examtool.api.extract_questions import extract_questions
| [
6738,
2814,
25981,
13,
15042,
13,
2302,
974,
62,
6138,
507,
1330,
7925,
62,
6138,
507,
628,
198
] | 3.5 | 18 |
#!/usr/bin/python3
import cgi
import cgitb
import os
from datetime import date
print("Cache-Control: no-cache")
print("Content-type: text/html\n")
print("<html>")
print("<head>")
print("<title>GET Request Echo</title>")
print("</head>")
print("<body>")
print("<h1 align=center>GET Request Echo</h1>")
print("<hr/>")
print("<b>Query String:</b><br/>")
form = cgi.FieldStorage()
for key in form:
print(f"<b>{key}</b>: {form.getvalue(key)}<br/>")
print("</body>")
print("</html>") | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
269,
12397,
198,
11748,
269,
18300,
65,
198,
11748,
28686,
198,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
4798,
7203,
30562,
12,
15988,
25,
645,
12,
23870,
4943,
198,
4798,
7203... | 2.482234 | 197 |
from projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
GMLNS = "http://www.opengis.net/gml"
try:
from pyproj import Proj
from lxml.etree import ElementTree as ET
except:
# try:
from xml.etree import ElementTree as ET
# except:
# pass | [
6738,
19887,
1330,
1635,
198,
6738,
2956,
297,
571,
17,
1330,
19016,
9654,
198,
6738,
1841,
489,
571,
1330,
14626,
32048,
198,
6738,
4704,
278,
1330,
14122,
198,
6738,
479,
452,
88,
13,
6404,
1362,
1330,
5972,
1362,
198,
6738,
479,
45... | 2.780488 | 164 |
from argparse import ArgumentParser
from time import sleep
from src.server import Server
parser = ArgumentParser()
parser.add_argument('-d', '--debug', action="store_true")
parser.add_argument('-w', '--disable-watchdog', action="store_true")
args = parser.parse_args()
while True:
try:
Server(debug=args.debug)
except KeyboardInterrupt:
if args.disable_watchdog:
break
print("To manually kill the server, send another interrupt in the next 10 seconds.")
sleep(10)
| [
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
12351,
13,
15388,
1330,
9652,
198,
198,
48610,
796,
45751,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
10786,
12,
67,
3256,
705,
438,
24442,
3256,
2223... | 2.867403 | 181 |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This manages a Business Type Code master records.
It defines the type of business the account is primarily doing.
"""
from .base_model import BaseCodeModel
class BusinessTypeCode(BaseCodeModel): # pylint: disable=too-few-public-methods
"""Business Type Code to store all the available business types to add to the account.
That is supported by auth system.
"""
__tablename__ = 'business_type_codes'
| [
2,
15069,
10673,
13130,
22783,
286,
3517,
9309,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.774074 | 270 |
from coco2customvision.integration import get_category_id_to_tag_id_dictionary
| [
6738,
8954,
78,
17,
23144,
10178,
13,
18908,
1358,
1330,
651,
62,
22872,
62,
312,
62,
1462,
62,
12985,
62,
312,
62,
67,
14188,
628
] | 3.2 | 25 |
import os
import sys
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
sys.path.append(os.path.join(os.path.dirname(__file__),'..', ".."))
from datasets.images import AffectNetDataset
DATASET_ID = 0
MODEL_ID = 1
| [
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,... | 2.424779 | 113 |
import time
username = input("Enter your username: ")
password = input("Enter your password: ")
if username == "Nenye" and password == "1010":
print("welcome to CyberSafe " + username)
print(" ")
account_type = input("savings or current? ")
transaction_type = input("Deposit or withdrawal? ")
if account_type == "savings" and transaction_type == "withdrawal" :
print("proceed to enter amount")
amount = int(input("enter here: "))
if amount <= 15000:
print("Please hold on your transaction is processing")
time.sleep(10)
print("Please take your cash")
time.sleep(6)
print("THANK YOU FOR BANKING WITH US, dont get poor!")
elif amount > 15000:
print("sapa nice one!! insufficient balance")
elif account_type == "current" or transaction_type == "Deposit" :
print("oops! seems like an error occured")
else:
print("sorry boss, i dont know you") | [
11748,
640,
198,
29460,
796,
5128,
7203,
17469,
534,
20579,
25,
366,
8,
198,
28712,
796,
5128,
7203,
17469,
534,
9206,
25,
366,
8,
198,
198,
361,
20579,
6624,
366,
45,
268,
5948,
1,
290,
9206,
6624,
366,
8784,
15,
1298,
198,
220,
... | 2.895899 | 317 |
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
from Bio.PopGen.GenePop import FileParser
import Bio.PopGen.FDist
# Quite a few utility functions could be done (like remove pop,
# add locus, etc...). The recommended strategy is convert back
# and forth from/to GenePop and use GenePop Utils
def convert_genepop_to_fdist(gp_rec, report_pops = None):
"""Converts a GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (either standard or big)
Returns:
FDist record.
"""
if hasattr(gp_rec, "populations"):
return _convert_genepop_to_fdist(gp_rec)
else:
return _convert_genepop_to_fdist_big(gp_rec, report_pops)
def _convert_genepop_to_fdist(gp_rec):
"""Converts a standard GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Standard)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
fd_rec.num_pops = len(gp_rec.populations)
for lc_i in range(len(gp_rec.loci_list)):
alleles = []
pop_data = []
for pop_i in range(len(gp_rec.populations)):
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
alleles.sort() # Dominance requires this
#here we go again (necessary...)
for pop_i in range(len(gp_rec.populations)):
allele_counts = {}
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None:
count = allele_counts.get(al, 0)
allele_counts[al] = count + 1
allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
#if lc_i==3:
# print alleles, allele_counts#, pop_data
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def _convert_genepop_to_fdist_big(gp_rec, report_pops = None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 1
fd_rec.num_loci = len(gp_rec.loci_list)
num_loci = len(gp_rec.loci_list)
loci = []
for i in range(num_loci):
loci.append(set())
pops = []
work_rec = FileParser.read(gp_rec.fname)
lParser = work_rec.get_individual()
curr_pop = init_pop()
num_pops = 1
if report_pops:
report_pops(num_pops)
while lParser:
if lParser is not True:
for loci_pos in range(num_loci):
for al in lParser[1][loci_pos]:
if al is not None:
loci[loci_pos].add(al)
curr_pop[loci_pos][al]= curr_pop[loci_pos].get(al,0)+1
else:
pops.append(curr_pop)
num_pops += 1
if report_pops:
report_pops(num_pops)
curr_pop = init_pop()
lParser = work_rec.get_individual()
work_rec._handle.close() # TODO - Needs a proper fix
pops.append(curr_pop)
fd_rec.num_pops = num_pops
for loci_pos in range(num_loci):
alleles = sorted(loci[loci_pos])
loci_rec = [len(alleles), []]
for pop in pops:
pop_rec = []
for allele in alleles:
pop_rec.append(pop[loci_pos].get(allele, 0))
loci_rec[1].append(pop_rec)
fd_rec.loci_data.append(tuple(loci_rec))
return fd_rec
def _convert_genepop_to_fdist_big_old(gp_rec, report_loci = None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
work_rec0 = FileParser.read(gp_rec.fname)
fd_rec.num_pops = countPops(work_rec0)
num_loci = len(gp_rec.loci_list)
for lc_i in range(num_loci):
if report_loci:
report_loci(lc_i, num_loci)
work_rec = FileParser.read(gp_rec.fname)
work_rec2 = FileParser.read(gp_rec.fname)
alleles = []
pop_data = []
lParser = work_rec.get_individual()
while lParser:
if lParser is not True:
for al in lParser[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
lParser = work_rec.get_individual()
#here we go again (necessary...)
alleles.sort()
lParser = work_rec2.get_individual()
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None]=0
while lParser:
if lParser is True:
process_pop(pop_data, alleles, allele_counts)
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None]=0
else:
for al in lParser[1][lc_i]:
allele_counts[al] += 1
lParser = work_rec2.get_individual()
process_pop(pop_data, alleles, allele_counts)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def approximate_fst(desired_fst, simulated_fst, parameter_fst,
max_run_fst = 1, min_run_fst = 0, limit = 0.005):
"""Calculates the next Fst attempt in order to approximate a
desired Fst.
"""
if abs(simulated_fst - desired_fst) < limit:
return parameter_fst, max_run_fst, min_run_fst
if simulated_fst > desired_fst:
max_run_fst = parameter_fst
next_parameter_fst = (min_run_fst + parameter_fst)/2
else:
min_run_fst = parameter_fst
next_parameter_fst = (max_run_fst + parameter_fst)/2
return next_parameter_fst, max_run_fst, min_run_fst
| [
2,
15069,
4343,
416,
16953,
3839,
3738,
5488,
1279,
20259,
3839,
4910,
78,
31,
14816,
13,
785,
28401,
220,
1439,
2489,
10395,
13,
198,
2,
770,
2438,
318,
636,
286,
262,
8436,
404,
7535,
6082,
290,
21825,
416,
663,
198,
2,
5964,
13,
... | 2.002475 | 3,232 |
# -*- coding: utf-8 -*-
# @Author: prabhakar
# @Date: 2016-06-25 20:13:13
# @Last Modified by: Prabhakar Gupta
# @Last Modified time: 2016-12-25 20:53:28
from setuptools import setup, find_packages
setup(
name='github_email',
packages=find_packages(),
version='0.0.5',
description='Get email ID of any GitHub user',
long_description='Get a list of email IDs of any valid GitHub user from one function call even if there are no public email for that user',
author='Prabhakar Gupta',
author_email='prabhakargupta267@gmail.com',
url='https://github.com/prabhakar267/github_email',
download_url='https://github.com/prabhakar267/github_email/tarball/0.0.5',
keywords=['github', 'email', 'user', 'public', 'commit', 'get'],
license='MIT',
include_package_data = True,
install_requires=['requests'],
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
13838,
25,
778,
397,
43573,
283,
198,
2,
2488,
10430,
25,
220,
220,
1584,
12,
3312,
12,
1495,
1160,
25,
1485,
25,
1485,
198,
2,
2488,
5956,
40499,
416,
25,
... | 2.716561 | 314 |
from suds.client import Client
url = "http://localhost:10000/ValidaCNPJ?wsdl"
client = Client(url)
cnpj = input("Digite o CNPJ: ")
response= client.service.isCNPJ(cnpj)
print(response)
| [
6738,
424,
9310,
13,
16366,
1330,
20985,
201,
198,
6371,
796,
366,
4023,
1378,
36750,
25,
49388,
14,
7762,
3755,
34,
22182,
41,
30,
18504,
25404,
1,
220,
220,
201,
198,
16366,
796,
20985,
7,
6371,
8,
201,
198,
31522,
79,
73,
796,
... | 2.017857 | 112 |
import threading
def threadsafe(fn, lock=None):
"""decorator making sure that the decorated function is thread safe"""
lock = lock or threading.Lock()
return new
| [
11748,
4704,
278,
628,
198,
4299,
14390,
8635,
7,
22184,
11,
5793,
28,
14202,
2599,
198,
220,
220,
220,
37227,
12501,
273,
1352,
1642,
1654,
326,
262,
24789,
2163,
318,
4704,
3338,
37811,
198,
220,
220,
220,
5793,
796,
5793,
393,
4704... | 3.339623 | 53 |
from .train import main, produce_dataframe, read_file_data, COLUMNS, TARGET_COLUMN, FEATURE_COLUMNS | [
6738,
764,
27432,
1330,
1388,
11,
4439,
62,
7890,
14535,
11,
1100,
62,
7753,
62,
7890,
11,
20444,
5883,
8035,
11,
309,
46095,
62,
25154,
5883,
45,
11,
18630,
40086,
62,
25154,
5883,
8035
] | 2.911765 | 34 |
from Modules.Project import Project
import random
import string
| [
6738,
3401,
5028,
13,
16775,
1330,
4935,
198,
11748,
4738,
198,
11748,
4731,
628,
628
] | 4.466667 | 15 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.dispatch import dispatcher
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from flier import (methods, managers)
class BaseModel(models.Model, methods.BaseModel):
'''Base Model
'''
created_at = models.DateTimeField(_(u'Created At'), auto_now_add=True, )
updated_at = models.DateTimeField(_(u'Updated At'), auto_now=True, )
class Address(BaseModel, methods.Address):
''' Mail Address
'''
address = models.EmailField(
_('Email Address'),
help_text=_('Email Address Help'), max_length=100,
unique=True, db_index=True)
domain = models.CharField(
_('Email Domain'),
help_text=_('Email Domain Help'), max_length=50,
null=True, blank=True, default='',)
bounced = models.IntegerField(
_('Bounced Count'),
help_text=_('Bounced Count Help'), default=0)
enabled = models.BooleanField(
_('Enabled Address'), help_text=_('Enabled Address Help'),
default=True)
class Recipient(RecipientContext, BaseModel, methods.Recipient):
'''Recipients for a Mail
'''
key = models.CharField(
_('Recipient Key'), help_text=_('Recipient Key Help'),
max_length=100, unique=True, db_index=True)
message_id = models.CharField(
_('Message ID'), help_text=_('Message ID Help'),
max_length=100, unique=True, db_index=True)
sender = models.ForeignKey(
Sender, verbose_name=_('Message Sender'),
help_text=_('Message Sender Help'))
to = models.ForeignKey(
Address, verbose_name=_('Recipient Address'),
help_text=_('Recipient Address Help'))
status = models.ForeignKey(
RecipientStatus, verbose_name=_('Recipient Status'),
help_text=_('Recipient Status Help'),
null=True, default=None, blank=True, on_delete=models.SET_NULL)
sent_at = models.DateTimeField(
_('Sent At to Reipient'), help_text=_('Sent At to Recipient Help'),
null=True, blank=True, default=None)
message = models.TextField(
_('Recipient Message'), null=True, default=None, blank=True)
objects = managers.RecipientQuerySet.as_manager()
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
49952,
198,
6738,
42625,
14208,
13,
3642,
822,
13,... | 2.646591 | 880 |
# coding: utf-8
# In[ ]:
#reading from hdfs
from pyspark.sql import SparkSession
import pandas as pd
from pyspark.sql import SQLContext
#creating spark session
sparkSession = SparkSession.builder.appName("bastsmen-cluster").getOrCreate()
#if csv has header
#loading bats_details.csv from hdfs
df_load = sparkSession.read.csv('hdfs://localhost:9000/input/bats_details.csv', header=True)
#renaming column names
df_load = df_load.withColumnRenamed("batsman", "player_name")
df_load = df_load.withColumnRenamed("average", "average")
df_load = df_load.withColumnRenamed("strike_rate", "strike_rate")
#use this if header is not present
'''
df_load = sparkSession.read.csv('hdfs://localhost:9000/input/bats_details.csv', header=False)
df_load = df_load.withColumnRenamed("_c0", "player_name")
df_load = df_load.withColumnRenamed("_c1", "average")
df_load = df_load.withColumnRenamed("_c2", "strike_rate")
'''
#to see the dataframe format
df_load.show()
print(type(df_load))
# In[ ]:
from pyspark.ml.feature import VectorAssembler
from pyspark.sql.functions import col , column
from pyspark.sql.functions import *
#fill null values with 0
df_load = df_load.na.fill({'average': '0'})
df_load = df_load.na.fill({'strike_rate': '0'})
#convert all strings to double
df_load = df_load.withColumn("average", col("average").cast("double"))
df_load = df_load.withColumn("strike_rate", col("strike_rate").cast("double"))
#form a feature column by joining average and strike_rate column
vecAssembler = VectorAssembler(inputCols=["strike_rate","average"], outputCol="features")
#to add feature column to the original dataframe
new_df = vecAssembler.transform(df_load)
#to see the new dataframe
new_df.show()
# In[ ]:
#create new dataframe which has only player_name and features as columns
df_kmeans = vecAssembler.transform(df_load).select('player_name', 'features')
#to see the input format that will be given to k-means clustering
df_kmeans.show()
# In[ ]:
#to decide the best k value
import numpy as np
from pyspark.ml.clustering import KMeans
#compute the cost of clustering output by varying k-values from 2 to 19
cost = np.zeros(20)
for k in range(2,20):
kmeans = KMeans().setK(k).setSeed(50).setFeaturesCol("features")
model = kmeans.fit(df_kmeans)
cost[k] = model.computeCost(df_kmeans)
# In[ ]:
#plotting cost vs k-values to know which k value to choose
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyspark import SparkContext
from pyspark.ml.clustering import KMeans
from sklearn.datasets.samples_generator import make_blobs
from mpl_toolkits.mplot3d import Axes3D
fig, ax = plt.subplots(1,1, figsize =(8,6))
ax.plot(range(2,20),cost[2:20])
ax.set_xlabel('k')
ax.set_ylabel('cost')
# In[ ]:
#optimal k value
k = 10
#fit the model with dataset
kmeans = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans.fit(df_kmeans)
#to get batsman cluster's centroids
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
# In[ ]:
#dataframe to see the cluster to which a batsman belongs to
#select player_name and prediction columns from the kmeans output dataframe
transformed = model.transform(df_kmeans).select('player_name', 'prediction')
rows = transformed.collect()
df_pred = sqlContext.createDataFrame(rows)
df_pred.show()
# In[ ]:
#storing output into hdfs in csv format
#bats_clusters.csv : batsman_name, cluster_number
df = df_pred
df = df.withColumn("player_name", col("player_name").cast("string"))
df = df.withColumn("prediction", col("prediction").cast("string"))
df.write.csv('hdfs://localhost:9000/input/bats_clusters.csv')
print("saved")
# In[ ]:
#add prediction(cluster number) column to the original dataframe
#by joining output dataframe(df_pred) and original(new_df) based on player_name
df_pred = df_pred.join(new_df, 'player_name')
df_pred.show()
#converting spark dataframe into pandas dataframe to plot points in each cluster
pddf_pred = df_pred.toPandas().set_index('player_name')
pddf_pred.head()
# In[ ]:
#plot to see how the datapoints belonging to each cluster
threedee = plt.figure(figsize=(15,10)).gca(projection='3d')
threedee.scatter(pddf_pred.average, pddf_pred.strike_rate, c=pddf_pred.prediction)
threedee.set_xlabel('average')
threedee.set_ylabel('strike_rate')
threedee.set_zlabel('prediction')
plt.show()
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
2361,
25,
628,
198,
2,
25782,
422,
289,
7568,
82,
198,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
17732,
36044,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
279,
8... | 2.801887 | 1,590 |
from typing import Any, Callable, Dict
from .bookmarker import Bookmarker
from ..infrastructure import default_dumps, default_loads
| [
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
713,
198,
198,
6738,
764,
2070,
4102,
263,
1330,
4897,
4102,
263,
198,
6738,
11485,
10745,
6410,
1330,
4277,
62,
67,
8142,
11,
4277,
62,
46030,
628
] | 3.722222 | 36 |
# path
source_path = "../../data"
data_path = "../data"
save_path = "./models"
log_path = "./log"
train_source = "train_data.json"
train_name = "train_data.pkl"
dev_source = "dev_data.json"
dev_name = "dev_data.pkl"
# test_source = "test1_data_postag_with_predicate.json"
# test_name = "test1_data_postag_with_predicate.pkl"
word_vocab_name = "word_vocab.pkl"
char_vocab_name = "char_vocab.pkl"
pos_vocab_name = "pos_vocab.pkl"
tag_vocab_name = "tag_vocab.pkl"
weight_name = "weight.pkl"
device = 2
seed = 11
char_embed_dim = 64
word_embed_dim = 64
pos_embed_dim = 64
sentence_length = 320
hidden_dim = 128
encoding_type = 'bieso'
batch_size = 64
epochs = 64
learning_rate = 1e-3
weight_decay = 0
patience = 10
num_layers = 4
inner_size = 256
key_size = 64
value_size = 64
num_head = 4
dropout = 0.1
| [
2,
3108,
198,
10459,
62,
6978,
796,
366,
40720,
40720,
7890,
1,
198,
7890,
62,
6978,
796,
366,
40720,
7890,
1,
198,
21928,
62,
6978,
796,
366,
19571,
27530,
1,
198,
6404,
62,
6978,
796,
366,
19571,
6404,
1,
198,
198,
27432,
62,
10... | 2.336232 | 345 |
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2015 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# PMA.V_Mailbox
#
# Purpose
# A virtual mailbox (combing several real mailboxes into one mailbox)
#
# Revision Dates
# 30-Dec-2005 (MG) Creation
# 2-Jan-2006 (CT) `sync` added
# 23-Jan-2006 (MG) Rewritten using the change counter
# 26-Jan-2006 (MG) `remove_msg` reordered
# 26-Jan-2006 (MG) Use new `changes_for_observer` feature
# 8-Oct-2015 (CT) Change `__getattr__` to *not* handle `__XXX__`
# ««revision-date»»···
#--
from _TFL import TFL
import _TFL.sos as sos
from _PMA import PMA
import _TFL._Meta.Object
import _PMA.Mailbox
import weakref
class _Proxy_ (TFL.Meta.Object) :
"""A proxy around an object which overrides some of the attributes."""
# end def __init__
# end def __getattr__
# end class _Proxy_
class _V_Mailbox_ (PMA._Mailbox_) :
"""Root class for all kind of virtual mailboxes"""
supports_status = True
# end def __init__
# end def add_filter_mailbox
# end def add_messages
# end def add_subbox
# end def delete
# end def sync
# end def remove_msg
# end def _add
# end def _get_messages
# end def _mailbox_changed
# end class _V_Mailbox_
class V_Mailbox (_V_Mailbox_) :
"""Virtual mailbox (combing several real mailboxes into one mailbox)."""
# end def __init__
# end def _eligible
# end class V_Mailbox
"""
from _PMA import PMA
import _PMA.Mailbox
import _PMA.V_Mailbox
import _PMA.Matcher
mbi = PMA.Maildir ("/home/glueck/PMA/D/inbox")
mb1 = PMA.Mailbox ("/home/glueck/PMA/TTTech/planung")
mb2 = PMA.Mailbox ("/home/glueck/PMA/TTTech/BIKA")
mbs = PMA.MH_Mailbox ("/home/glueck/work/MH/Installscript")
vmb = PMA.V_Mailbox ("f1", (mb1, mb2))
vmb.messages
m = mbs.messages [58]
"""
if __name__ != "__main__" :
PMA._Export ("V_Mailbox", "_V_Mailbox_")
### __END__ PMA.V_Mailbox
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
5472,
12,
4626,
2944,
13,
4302,
11818,
9107,
13,
1439,
2489,
10395,
198,
2,
21931,
559,
6422,
21612,
3933,
11,
317,
438,
1157,
1270,
370,
2013,
... | 2.454545 | 957 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
f = open('README.md', 'r')
try:
long_desc = f.read()
finally:
f.close()
requires = ['Sphinx>=0.6']
setup (
name = 'sphinxcontrib-pdfembed',
version = '0.1',
author = 'Super Kogito',
author_email = 'superkogito@gmail.com',
description = 'Sphinx extension to embedd a pdf files webpages',
license = 'MIT',
url = 'https://github.com/SuperKogito/sphinx-pdfembed',
packages = find_packages(),
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms = 'any',
include_package_data = True,
install_requires = requires,
namespace_packages = ['sphinxcontrib'],
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
69,
796,
1280,
10786,
15675,
11682,
13,
9132,
3256,
705,
81,
11537,
198,
28311,
25,
198,
220,
... | 1.918879 | 678 |
import unittest
from ayeaye.ignition import Ignition, EngineUrlCase, EngineUrlStatus
EXAMPLE_ENGINE_URL_0 = "mysql://root:{env_secret_password}@localhost/my_database"
| [
11748,
555,
715,
395,
198,
198,
6738,
257,
5948,
48822,
13,
570,
653,
1330,
16583,
653,
11,
7117,
28165,
20448,
11,
7117,
28165,
19580,
198,
198,
6369,
2390,
16437,
62,
26808,
8881,
62,
21886,
62,
15,
796,
366,
28744,
13976,
1378,
157... | 2.982456 | 57 |
import numpy as np
from scipy.spatial import distance
from scipy.optimize import curve_fit
import csv
import math
# def get_image_lagtime(names): #time that needs to be added to start of acquisition
# fname = "../bax_agg/Image Acquisition Times.csv"
# rows = get_rows(fname, 4)
# lagtime = {}
# for names in rows:
# print type(names[0]), type(names[2])
# lagtime[names[0]] = float(names[2])
# #print lagtime
# return lagtime
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
2777,
34961,
1330,
5253,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
12133,
62,
11147,
198,
11748,
269,
21370,
220,
198,
11748,
10688,
628,
628,
198,
2,
825,
651,
62,
9... | 2.628571 | 175 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Adding task owner AWS account id
"""
from yoyo import step
__depends__ = {
"20211026_01_scyfj-support-dataperf",
"20211103_01_pdUwN-add-placeholder-validation",
"20211203_01_Dkyr3-adding-fields-to-tasks-to-allow-owners-to-run-in-their-own-aws-env",
}
steps = [
step(
"ALTER TABLE tasks ADD COLUMN task_aws_account_id TEXT",
"ALTER TABLE tasks DROP task_aws_account_id",
),
step(
"ALTER TABLE tasks ADD COLUMN task_gateway_predict_prefix TEXT",
"ALTER TABLE tasks DROP task_gateway_predict_prefix",
),
]
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
198,
37811... | 2.523649 | 296 |
"""Parse data files from the International Earth Rotation Service.
See:
https://datacenter.iers.org/eop.php
ftp://cddis.gsfc.nasa.gov/pub/products/iers/readme.finals2000A
"""
import numpy as np
from ..constants import DAY_S
inf = float('inf')
# This regular expression must remain a plain string; attempting to
# compile it triggers a bug in older NumPy versions like 1.14.3:
# https://github.com/skyfielders/python-skyfield/issues/372
_R = (b'(?m)^......(.........) . '
b'(.\d.......)......... '
b'(.\d.......)......... '
b'.(.\d........)')
# Compatibility with older Skyfield versions:
| [
37811,
10044,
325,
1366,
3696,
422,
262,
4037,
3668,
371,
14221,
4809,
13,
198,
198,
6214,
25,
198,
5450,
1378,
19608,
330,
9255,
13,
3183,
13,
2398,
14,
68,
404,
13,
10121,
198,
701,
79,
1378,
66,
1860,
271,
13,
14542,
16072,
13,
... | 2.833333 | 216 |
###############################################
#
# Odds and ends for debugging
#
###############################################
| [
29113,
7804,
4242,
21017,
198,
2,
198,
2,
220,
220,
20664,
82,
290,
5645,
329,
28769,
198,
2,
198,
29113,
7804,
4242,
21017,
198
] | 5.5 | 24 |
from ..mpditem import MPDItem
| [
6738,
11485,
3149,
5266,
368,
1330,
4904,
35,
7449,
198
] | 3 | 10 |
#
# Example file for working with loops
#
if __name__ == "__main__":
main()
| [
2,
198,
2,
17934,
2393,
329,
1762,
351,
23607,
198,
2,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
1388,
3419,
198
] | 2.857143 | 28 |
import numpy as np
import pandas as pd
import datetime
import random as r
donor = pd.read_csv("donors.csv")
receiver = pd.read_csv("receiver.csv")
delivery = pd.read_csv("delivery.csv")
donor = donor[['ID']]
receiver = receiver[['ID', 'Accepts']]
delivery = delivery[['employeeID', 'isWorkingForSocialCause']]
d = delivery.to_numpy()
del_worker = [d[0][0]]
for i in d:
if i[1] == 1:
del_worker = np.append(del_worker, i[0])
don = donor.to_numpy()
rec = receiver.to_numpy()
x = randate()
stat = ['delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'delivered', 'active']
arr = np.array([['donor', 'receiver', 'delivery', 'category', 'status', 0, x]])
for i in range(220):
di = r.randint(0,len(don) - 1)
ri = r.randint(0,len(rec) - 1)
dwi = r.randint(0,len(del_worker) - 1)
dai = randate()
xi = r.randint(0,len(stat) - 1)
if rec[ri][1] == 'Money':
temp = [[don[di][0], rec[ri][0], del_worker[dwi], rec[ri][1], stat[xi], r.randint(100,2000), dai]]
else:
temp = [[don[di][0], rec[ri][0], del_worker[dwi], rec[ri][1], stat[xi], 1, dai]]
arr = np.append(arr, temp, axis = 0)
print(arr)
pd.DataFrame(arr).to_csv("donation.csv")
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
4738,
355,
374,
201,
198,
201,
198,
9099,
273,
796,
279,
67,
13,
961,
62,
40664,
7203,
9099,
669,
13,
40664,
494... | 2.237607 | 585 |
#-*- coding: utf-8 -*-
from iidxrank import iidx
from iidxrank import models
import time
from datetime import datetime
import json
import copy
"""
get songs in ranktable
"""
"""
search all candidate(level & type) songs in ranktable
"""
"""
generate NOPLAY playrecord from song object
"""
"""
common processor of 'playrecord data'
- add 'rate','rank' to each song
- modify 'diff' to uppercase
"""
"""
get ranktable metadata
"""
"""
only get userdata
"""
"""
get pdata from iidx.me object
- add 'pkid', 'tag' for future processing
"""
"""
get only userdata from player object
"""
"""
generate playrecord from songobjs(merge songinfo into playrecord)
using database info.
"""
"""
get pdata from player object
- if player==None, then return DJ NONAME (empty player)
"""
"""
categorize playdata
"""
"""
update player record
- desc requires: clear, rate(opt), rank(opt), score(opt)
"""
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
1312,
312,
87,
43027,
1330,
1312,
312,
87,
198,
6738,
1312,
312,
87,
43027,
1330,
4981,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,... | 3.128472 | 288 |
import pandas as pd
from flask import Flask, request, Response, jsonify
import json
app = Flask(__name__)
app.config.update(DEBUG=True)
print("importing black cards")
df = pd.read_csv (r'CAH_Black_Cards.csv', header=None)
print("importing black cards COMPLETE")
#Return TRUE length of dataframe
@app.route('/get_bc_length', methods=['GET', 'POST'])
#read all
#NEED TO CHECK RETURN OF LIST
@app.route('/read_all', methods=['GET', 'POST'])
#retrieve a card
@app.route('/retrieve_bc', methods=['GET', 'POST'])
#delete card
#COULD SET TO POST
@app.route('/delete_bc', methods=['GET', 'POST'])
#add card
#COULD SET TO POST
@app.route('/add_bc', methods=['GET', 'POST'])
if __name__ == '__main__':
app.run(port = 5001, host = '0.0.0.0') | [
11748,
19798,
292,
355,
279,
67,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
18261,
11,
33918,
1958,
198,
11748,
33918,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
11250,
13,
19119,
7,
30531,
28,
17821,
8,
628,... | 2.656028 | 282 |
"""
This :mod: `models` module includes different modules used in the prediction.
"""
from .helpers import bestF1
__all__ = [
"bestF1"
]
| [
37811,
198,
1212,
1058,
4666,
25,
4600,
27530,
63,
8265,
3407,
1180,
13103,
973,
287,
262,
17724,
13,
198,
37811,
198,
198,
6738,
764,
16794,
364,
1330,
1266,
37,
16,
198,
198,
834,
439,
834,
796,
685,
198,
197,
197,
197,
1,
13466,
... | 2.788462 | 52 |
N, M, Y = input().split()
N, M, Y = float(N), float(M), int(Y)
current = N
for year in range(Y + 1):
current = round(current + year * M, 2)
print(f"{year} {two_decimals(current)}") | [
45,
11,
337,
11,
575,
796,
5128,
22446,
35312,
3419,
198,
45,
11,
337,
11,
575,
796,
12178,
7,
45,
828,
12178,
7,
44,
828,
493,
7,
56,
8,
198,
198,
14421,
796,
399,
198,
198,
1640,
614,
287,
2837,
7,
56,
1343,
352,
2599,
198,
... | 2.358974 | 78 |
#!/usr/bin/env python3
import sys
"""
Handle all the error messages in piemmer.
In piemmer, errors always need to be address to ensure the completely excution. Warnings, on
the other hand, are usually non-critical and can be automatically fixed by piemmer.
"""
class Error(Exception):
"""
Error class
Arguments:
code -- Type: str
error code number in str
Attributes:
code -- Type: str
"""
def aftermath(fn):
"""
What should piemmer behave when error occurs?
1. Print the docstring for ErrorCode __init__ function.
It is much easier to write detail explanation for each error or
warning in docstring than in print()
2. Whether exist the program
Suppress sys.exit() when running unittest
"""
return wapper
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
198,
37811,
198,
37508,
477,
262,
4049,
6218,
287,
279,
26597,
647,
13,
198,
198,
818,
279,
26597,
647,
11,
8563,
1464,
761,
284,
307,
2209,
284,
4155,
26... | 2.87291 | 299 |
"""
"whatrecord parse" is used to parse and interpret a startup script or database
file, dumping the resulting ``ShellState`` or ``Database``.
"""
import argparse
import json
import logging
import pathlib
from typing import Dict, List, Optional, Union
import apischema
from ..common import IocMetadata
from ..db import Database, load_database_file
from ..format import FormatContext
from ..macro import MacroContext
from ..shell import LoadedIoc
logger = logging.getLogger(__name__)
DESCRIPTION = __doc__
def parse(
filename: Union[str, pathlib.Path],
dbd: Optional[str] = None,
standin_directories: Optional[Dict[str, str]] = None,
macros: Optional[str] = None,
) -> Union[Database, LoadedIoc]:
"""
Generically parse either a startup script or a database file.
Hopefully does the right thing based on file extension.
Parameters
----------
filename : str or pathlib.Path
The filename to parse.
dbd : str or pathlib.Path
The associated database definition file, if parsing a database file.
standin_directories : dict, optional
macros : str, optional
"""
standin_directories = standin_directories or {}
filename = pathlib.Path(filename)
macro_context = MacroContext()
macro_context.define(**macro_context.definitions_to_dict(macros or ""))
if filename.suffix in {".db", ".template", ".dbd"}:
if filename.suffix == ".dbd" or not dbd:
return Database.from_file(filename, macro_context=macro_context)
return load_database_file(dbd=dbd, db=filename, macro_context=macro_context)
return LoadedIoc.from_metadata(
IocMetadata.from_filename(
filename,
standin_directories=standin_directories
)
)
| [
37811,
198,
1,
10919,
22105,
21136,
1,
318,
973,
284,
21136,
290,
6179,
257,
13693,
4226,
393,
6831,
198,
7753,
11,
30231,
262,
7186,
7559,
23248,
9012,
15506,
393,
7559,
38105,
15506,
13,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
... | 2.911184 | 608 |
'''
Created on May 25, 2018
@author: nishant.sethi
'''
n=5
p=1/3
result=(1-p)**(n-1)*p
print(round(result,3)) | [
7061,
6,
201,
198,
41972,
319,
1737,
1679,
11,
2864,
201,
198,
201,
198,
31,
9800,
25,
299,
680,
415,
13,
2617,
5303,
201,
198,
7061,
6,
201,
198,
77,
28,
20,
201,
198,
79,
28,
16,
14,
18,
201,
198,
20274,
16193,
16,
12,
79,
... | 1.830769 | 65 |
from auction.utils.generic import get_or_create_bidbasket
| [
6738,
14389,
13,
26791,
13,
41357,
1330,
651,
62,
273,
62,
17953,
62,
14065,
65,
11715,
198
] | 3.411765 | 17 |
# 用于绘制同一种合约的不同参数
from matplotlib import pyplot as plt
from read_data import read_data
from label_data import get_labeled_data
import config
import numpy as np
to_sec = lambda delta_t:delta_t.seconds+delta_t.microseconds/1e6
if __name__ == '__main__':
(data, label) = get_labeled_data([0])
means = []
for i in range(len(data['A1'])):
means.append(data['A1'][i][8])
print(label['A1'].count(0))
print(label['A1'].count(1))
print(label['A1'].count(2))
#print(means)
#print(label['A1'])
show_data('A1', ['price'], means, label['A1'])
| [
2,
13328,
242,
101,
12859,
236,
163,
119,
246,
26344,
114,
28938,
234,
31660,
163,
100,
235,
28938,
230,
163,
118,
99,
21410,
38834,
28938,
234,
20998,
224,
46763,
108,
198,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
... | 2.105839 | 274 |
from lbry.testcase import CommandTestCase
| [
6738,
18360,
563,
13,
9288,
7442,
1330,
9455,
14402,
20448,
628,
628
] | 3.75 | 12 |
import fnmatch
import os | [
11748,
24714,
15699,
198,
11748,
28686
] | 4 | 6 |
import sys
import pandas as pd
file=sys.argv[1]
df=pd.read_csv(file)#,delimiter=';')
df=df.loc[df['Pulse Rate(bpm)']<65535]
print(df)
df.to_csv(sys.argv[1][:-4]+'_processado.csv',index=False)
print("wrote to :"+sys.argv[1][:-4]+'_processado.csv')
| [
11748,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
7753,
28,
17597,
13,
853,
85,
58,
16,
60,
198,
198,
7568,
28,
30094,
13,
961,
62,
40664,
7,
7753,
8,
2,
11,
12381,
320,
2676,
11639,
26,
11537,
198,
198,
7568,
28,
75... | 2.04878 | 123 |
import sys
import json
import usrconfig
from tasklib import TaskWarrior
task = json.loads(sys.argv[1])
if task['description'] in usrconfig.prayer_list or task['description'] == 'Jumuah':
print("Accounting for missed ajr...")
db=TaskWarrior()
p_task=db.tasks.get(uuid=task['parent'])
found_missed=False
if 'annotations' in p_task._data.keys():
for ann in p_task['annotations']:
ann_split = ann['description'].split(":")
if len(ann_split) == 2 and ann_split[0] == 'MISSED' and ann_split[1].isnumeric():
found_missed = True
p_task.remove_annotation(ann)
p_task.add_annotation('MISSED:'+str(int(ann_split[1]) + 1))
if not found_missed:
p_task.add_annotation('MISSED:'+str(usrconfig.missed_start + 1))
p_task.save()
sys.exit(0)
| [
11748,
25064,
198,
11748,
33918,
198,
11748,
514,
81,
11250,
198,
6738,
4876,
8019,
1330,
15941,
13195,
7701,
198,
198,
35943,
796,
33918,
13,
46030,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
198,
361,
4876,
17816,
11213,
20520,
287,
... | 2.204188 | 382 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Rafael Corsi @ insper.edu.br
# Dez/2017
# Disciplina Elementos de Sistemas
#
from os.path import join, dirname
import sys
import os
import shutil
import subprocess
import argparse
# Scripts python
ROOT_PATH = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8')
PROJ_PATH = os.path.join(ROOT_PATH, 'Projetos', 'src')
TOOL_PATH = os.path.join(ROOT_PATH, 'Projetos', 'Z01-tools')
TOOL_SCRIPT_PATH = os.path.join(TOOL_PATH, 'scripts')
sys.path.insert(0,TOOL_SCRIPT_PATH)
from report import report
from assemblerReport import assemblerReport
# Verificar se testes unitários passaram
if __name__ == "__main__":
genJAR()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
31918,
26978,
72,
2488,
1035,
525,
13,
15532,
13,
1671,
198,
2,
1024,
89,
14,
5539,
198,
2,
3167,
6671,
1437... | 2.585366 | 287 |
from django.conf.urls import url
from plan2dance.views import ActionModel, MusicAnalysis, PlanningGeneration, ScriptGeneration
urlpatterns = [
url(r'^action_model$', ActionModel.as_view()),
url(r'^music_analysis$', MusicAnalysis.as_view()),
url(r'^planning_generation$', PlanningGeneration.as_view()),
url(r'^script_generation$', ScriptGeneration.as_view()),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
1410,
17,
67,
590,
13,
33571,
1330,
7561,
17633,
11,
7849,
32750,
11,
21913,
8645,
341,
11,
12327,
8645,
341,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
2... | 2.842105 | 133 |
#Note the index.py is merely intended for running the Lambda locally
import json | [
2,
6425,
262,
6376,
13,
9078,
318,
6974,
5292,
329,
2491,
262,
21114,
6814,
15726,
198,
11748,
33918
] | 4.444444 | 18 |
import os
import sys
import json
import atexit
from functools import wraps
from flask import redirect, render_template, session, g, request, Blueprint, url_for
from genelist import app, conf, uptime
site = Blueprint('site', __name__, template_folder='templates', static_folder='static')
import support
print dir(site)
try:
dblogger = support.dblogger.DBLogger(conf.build_db_conn())
app.logger.addHandler(dblogger)
atexit.register(close_dblogger)
except:
dblogger = None
sys.stderr.write("Unable to setup DB Logger!\n")
import cbplims.users
import cbplims.projects
# Let's just load a DB connection before each request
@site.before_request
# Be sure to close it
@site.teardown_request
@site.route("/")
@requires_project
@site.route("/settings/")
@requires_project
@site.route("/test")
@site.route("/resetdb")
@site.route("/log")
@requires_admin
@site.route("/dbconsole", methods=['GET', 'POST'])
@requires_global_admin
@site.route("/restart")
import auth.view
import users.view
import projects.view
if False:
print auth
print projects
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
379,
37023,
198,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
42903,
1330,
18941,
11,
8543,
62,
28243,
11,
6246,
11,
308,
11,
2581,
11,
39932,
11,
19016,
62,
1640,
... | 2.884211 | 380 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import kopf
import logging
import json, flatdict, os, os.path
import nuvolaris.config as cfg
import nuvolaris.kube as kube
import nuvolaris.couchdb as couchdb
import nuvolaris.mongodb as mongodb
import nuvolaris.bucket as bucket
import nuvolaris.openwhisk as openwhisk
# tested by an integration test
@kopf.on.login()
# tested by an integration test
@kopf.on.create('nuvolaris.org', 'v1', 'whisks')
# tested by an integration test
@kopf.on.delete('nuvolaris.org', 'v1', 'whisks')
# tested by integration test
@kopf.on.field("service", field='status.loadBalancer')
#@kopf.on.field("sts", field='status.availableReplicas')
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.340426 | 423 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2829,
17752,
355,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
9979,
415,
13,
22973,
3418... | 2.58 | 50 |