content stringlengths 1 1.05M | input_ids listlengths 1 883k | ratio_char_token float64 1 22.9 | token_count int64 1 883k |
|---|---|---|---|
from dialog_api.users_pb2 import RequestLoadFullUsers, ResponseLoadFullUsers, FullUser
| [
6738,
17310,
62,
15042,
13,
18417,
62,
40842,
17,
1330,
19390,
8912,
13295,
14490,
11,
18261,
8912,
13295,
14490,
11,
6462,
12982,
628
] | 3.826087 | 23 |
import json
| [
11748,
33918,
628
] | 4.333333 | 3 |
#Python's random module includes a function choice(data) that returns a
#random element from a non-empty sequence. The random modul includes a
#more basic function randrange,with parameterization similar to the
#built-in range function , that return a random choice from the given
#range.Using only the randrange funciton,implement your own version of
#the choice function.
import random
lottoMax = list()
lottoMax = [random.randrange(1,50,1) for i in range(1,8)]
print(lottoMax) | [
2,
37906,
338,
4738,
8265,
3407,
257,
2163,
3572,
7,
7890,
8,
326,
5860,
257,
220,
198,
2,
25120,
5002,
422,
257,
1729,
12,
28920,
8379,
13,
383,
4738,
953,
377,
3407,
257,
220,
198,
2,
3549,
4096,
2163,
43720,
9521,
11,
4480,
115... | 3.631579 | 133 |
#!/usr/bin/env python3
"""Tables:
data: pk timestamp field_name field_value source_record
We don't know what type each value will have, so have a column for
int, float, str and bool and leave all but the appropriate value type
NULL. Docs claim that NULL values take no space, so...
Still so many ways we could make this more space efficient, most
obviously by partitioning field_name (and even timestamp?) into
foreign keys.
field_name - could store this in a separate table so that it's only
a foreign key in the data table. Something like:
fields: id field_name field_type
source_record - an id indexing a table where raw source records are
stored, so that we can re-parse and recreate whatever data we want
if needed.
Current implementation is simple and inefficient in both computation
and storage.
TODO: Allow wildcarding field selection, so client can specify 'S330*,Knud*'
"""
import logging
import sys
import json
sys.path.append('.')
from logger.utils.formats import Python_Record
from logger.utils.das_record import DASRecord
try:
import pymongo
MONGO_ENABLED = True
except ImportError:
MONGO_ENABLED = False
################################################################################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
51,
2977,
25,
628,
220,
1366,
25,
279,
74,
41033,
2214,
62,
3672,
2214,
62,
8367,
2723,
62,
22105,
198,
198,
1135,
836,
470,
760,
644,
2099,
1123,
1988,
481,
423,
11,
52... | 3.634783 | 345 |
# =============================================================================
# Inicializacin de la aplicacin: __init__.py
# =============================================================================
"""
Este archivo tendr la funcin de crear la aplicacin, que iniciar la base de
datos y registrar los molodelos.
Para ejecutar:
1) En un terminal de linux ir a la ruta:
>> cd Documentos/TFM/ElectricRoute/flask_auth_app
>> export FLASK_APP=project
>> export FLASK_DEBUG=1
>> Si se va a lanzar flask_app en local: export DB_HOST=0.0.0.0
>> Si se va a lanzar flask_app en local: export GRAFANA_HOST=0.0.0.0
>> flask run
2) Abrir el navegador e ir a la ruta http://localhost:5000/login
3") Insertar un mail y una contrasea (cualquier)
Ejemplo:
User: blanca@hotmail.com
Password: blanca
"""
# Se cargan las librerias
from flask import Flask
from flask_login import LoginManager
from flask_mysqldb import MySQL
from os import environ
from .BE.Output import BaseDatos
# Se inicializa SQLAlchemy
db = MySQL()
# Se crea la app
| [
2,
38093,
25609,
198,
2,
220,
554,
6652,
528,
330,
259,
390,
8591,
257,
489,
291,
330,
259,
25,
11593,
15003,
834,
13,
9078,
198,
2,
38093,
25609,
198,
198,
37811,
198,
36,
4169,
3934,
23593,
4327,
81,
8591,
1257,
17879,
390,
1126,
... | 2.988604 | 351 |
from store.cifar10 import Cifar10
import torchvision.transforms as transforms
import time
import matplotlib.pyplot as plt
batch_size = 1
rel_sample_size = 10000
ds = Cifar10(input_data_folder="/home/aarati/datasets/cifar-10-batches-py", \
max_batches=2, batch_size=batch_size, rel_sample_size=rel_sample_size, \
max_samples=1, transform=transforms.ToTensor())
ds.count_num_points()
ds.generate_IR()
all_times = []
for i in range(10):
start = time.time()
ds.initialize_samples()
end = time.time()
all_times.append(end-start)
s = ds.samples[0].get()
print(all_times)
# Sample creation time for sample size:
# 1: [0.349, 0.306, 0.431, 0.303, 0.18, 0.69, 0.557, 0.681, 0.424, 0.300]
# 10: [0.742, 0.685, 0.679, 0.676, 0.673, 0.676, 0.551, 0.673, 0.669, 0.670]
# 100: [0.713, 0.672, 0.668, 0.671, 0.668, 0.680, 0.682, 0.675, 0.673, 0.669]
# 1000: [0.738, 0.689, 0.704, 0.693, 0.684, 0.683, 0.678, 0.677, 0.700, 0.687]
# 10000: [0.765, 0.727, 0.717, 0.740, 0.723, 0.774, 0.720, 0.868, 0.724, 0.771]
# Plotting code
# x = [1, 10, 50, 100, 1000, 10000]
# y = [0.45, 0.702, 0.703, 0.708, 0.715, 0.746]
# plt.plot(x, y, color='b', marker='o', markerfacecolor='k', markersize=10, fillstyle='full', linewidth=3, linestyle='solid')
# plt.xscale('log')
# plt.ylim(0.40, 0.78)
# plt.xlabel("Reservoir Sample Size", fontsize=20, fontweight='semibold', fontname='serif')
# plt.ylabel("Creation Time (s)", fontsize=20, fontweight='semibold', fontname='serif')
# plt.xticks(x, [1, 10, '', 100, 1000, 10000])
# _, ticks = plt.xticks()
# for tick in ticks:
# tick.set_fontsize(16)
# tick.set_fontweight('medium')
# tick.set_fontname('serif')
# _, ticks = plt.yticks()
# for tick in ticks:
# tick.set_fontsize(16)
# tick.set_fontweight('medium')
# tick.set_fontname('serif')
# plt.show()
| [
6738,
3650,
13,
66,
361,
283,
940,
1330,
327,
361,
283,
940,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
11748,
640,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
43501,
62,
7857,
796,
352,
... | 2.189904 | 832 |
"""Plugin that invokes the built-in template renderer."""
__all__ = [
"RenderOptions",
"render",
]
from typing import Dict, List
from pydantic import BaseModel
from beet import Context, configurable
| [
37811,
37233,
326,
800,
3369,
262,
3170,
12,
259,
11055,
9851,
11882,
526,
15931,
628,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
45819,
29046,
1600,
198,
220,
220,
220,
366,
13287,
1600,
198,
60,
628,
198,
6738,
19720,
... | 3.223881 | 67 |
import sys
import tkinter
from .customtkinter_tk import CTk
from .customtkinter_frame import CTkFrame
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_color_manager import CTkColorManager
| [
11748,
25064,
198,
11748,
256,
74,
3849,
198,
198,
6738,
764,
23144,
30488,
3849,
62,
30488,
1330,
16356,
74,
198,
6738,
764,
23144,
30488,
3849,
62,
14535,
1330,
16356,
74,
19778,
198,
6738,
764,
1324,
23435,
62,
14171,
62,
2213,
10735... | 3.606557 | 61 |
import pytest
from backtest.strategy import BuyAndHoldEqualAllocation
| [
11748,
12972,
9288,
198,
6738,
736,
9288,
13,
2536,
4338,
1330,
220,
11763,
1870,
26807,
36,
13255,
3237,
5040,
198
] | 3.55 | 20 |
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import onnx
from ..base import Base
from . import expect
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
... | 3.513158 | 76 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart, TemplateMixin
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
37906,
12,
77,
20306,
18,
318,
257,
11361,
29908,
329,
399,
8898,
18,
4823,
5888,
13,
198,
45,
8898,
18,
... | 2.982609 | 115 |
"""
Date: 2021/09/23
Target: config utilities for yml file.
implementation adapted from Slimmable: https://github.com/JiahuiYu/slimmable_networks.git
"""
import os
import yaml
def get_config(config_file):
assert os.path.exists(config_file), 'File {} not exist.'.format(config_file)
return Config(config_file) | [
37811,
198,
10430,
25,
33448,
14,
2931,
14,
1954,
198,
21745,
25,
4566,
20081,
329,
331,
4029,
2393,
13,
198,
198,
320,
32851,
16573,
422,
34199,
44102,
25,
3740,
1378,
12567,
13,
785,
14,
41,
9520,
9019,
40728,
14,
82,
2475,
44102,
... | 2.954128 | 109 |
from marshmallow import Schema, fields, validate
from sqlalchemy import Column, String, Integer, Float, Date, ForeignKey
from sqlalchemy.orm import relationship
from ..funders.entities import Funder, FunderSchema
from src.api import db
from src.shared.entity import Base
| [
6738,
22397,
42725,
1330,
10011,
2611,
11,
7032,
11,
26571,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
10903,
11,
34142,
11,
48436,
11,
7536,
11,
8708,
9218,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2776,
198,
6738,
11485,
10990... | 3.84507 | 71 |
###
# Copyright Notice:
# Copyright 2016 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/python-redfish-utility/blob/master/LICENSE.md
###
""" Commit Command for RDMC """
import sys
from optparse import OptionParser
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, \
NoChangesFoundOrMadeError, NoCurrentSessionEstablished
from rdmc_base_classes import RdmcCommandBase
| [
21017,
201,
198,
2,
15069,
17641,
25,
201,
198,
2,
15069,
1584,
4307,
6169,
8549,
15941,
5221,
11,
3457,
13,
1439,
2489,
10395,
13,
201,
198,
2,
13789,
25,
347,
10305,
513,
12,
2601,
682,
13789,
13,
1114,
1336,
2420,
766,
2792,
25,
... | 2.911111 | 180 |
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.encoding import smart_unicode, iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from rosetta.conf import settings as rosetta_settings
from rosetta.polib import pofile
from rosetta.poutil import find_pos, pagination_range
from rosetta.signals import entry_changed, post_save
import re
import rosetta
import datetime
import unicodedata
import hashlib
import os
def home(request):
"""
Displays a list of messages to be translated
"""
def fix_nls(in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
version = rosetta.get_version(True)
if 'rosetta_i18n_fn' in request.session:
rosetta_i18n_fn = request.session.get('rosetta_i18n_fn')
rosetta_i18n_app = get_app_name(rosetta_i18n_fn)
rosetta_i18n_lang_code = request.session['rosetta_i18n_lang_code']
rosetta_i18n_lang_bidi = rosetta_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI
rosetta_i18n_write = request.session.get('rosetta_i18n_write', True)
if rosetta_i18n_write:
rosetta_i18n_pofile = pofile(rosetta_i18n_fn)
for entry in rosetta_i18n_pofile:
entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest()
else:
rosetta_i18n_pofile = request.session.get('rosetta_i18n_pofile')
if 'filter' in request.GET:
if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'):
filter_ = request.GET.get('filter')
request.session['rosetta_i18n_filter'] = filter_
return HttpResponseRedirect(reverse('rosetta-home'))
rosetta_i18n_filter = request.session.get('rosetta_i18n_filter', 'all')
if '_next' in request.POST:
rx = re.compile(r'^m_([0-9a-f]+)')
rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)')
file_change = False
for key, value in request.POST.items():
md5hash = None
plural_id = None
if rx_plural.match(key):
md5hash = str(rx_plural.match(key).groups()[0])
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = unicode(rx_plural.match(key).groups()[1])
elif rx.match(key):
md5hash = str(rx.match(key).groups()[0])
if md5hash is not None:
entry = rosetta_i18n_pofile.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None:
plural_string = fix_nls(entry.msgstr_plural[plural_id], value)
entry.msgstr_plural[plural_id] = plural_string
else:
entry.msgstr = fix_nls(entry.msgid, value)
is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != value or old_fuzzy != is_fuzzy:
entry_changed.send(sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=rosetta_i18n_fn,
language_code=rosetta_i18n_lang_code,
)
else:
request.session['rosetta_last_save_error'] = True
if file_change and rosetta_i18n_write:
try:
# Provide defaults in case authorization is not required.
request.user.first_name = getattr(request.user, 'first_name', 'Anonymous')
request.user.last_name = getattr(request.user, 'last_name', 'User')
request.user.email = getattr(request.user, 'email', 'anonymous@user.tld')
rosetta_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (request.user.first_name, request.user.last_name, request.user.email)).encode('ascii', 'ignore')
rosetta_i18n_pofile.metadata['X-Translated-Using'] = u"django-rosetta %s" % rosetta.get_version(False)
rosetta_i18n_pofile.metadata['PO-Revision-Date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M%z')
except UnicodeDecodeError:
pass
try:
rosetta_i18n_pofile.save()
rosetta_i18n_pofile.save_as_mofile(rosetta_i18n_fn.replace('.po', '.mo'))
post_save.send(sender=None, language_code=rosetta_i18n_lang_code, request=request)
# Try auto-reloading via the WSGI daemon mode reload mechanism
if rosetta_settings.WSGI_AUTO_RELOAD and \
'mod_wsgi.process_group' in request.environ and \
request.environ.get('mod_wsgi.process_group', None) and \
'SCRIPT_FILENAME' in request.environ and \
int(request.environ.get('mod_wsgi.script_reloading', '0')):
try:
os.utime(request.environ.get('SCRIPT_FILENAME'), None)
except OSError:
pass
# Try auto-reloading via uwsgi daemon reload mechanism
if rosetta_settings.UWSGI_AUTO_RELOAD:
try:
import uwsgi
# pretty easy right?
uwsgi.reload()
except:
# we may not be running under uwsgi :P
pass
except:
request.session['rosetta_i18n_write'] = False
request.session['rosetta_i18n_pofile'] = rosetta_i18n_pofile
# Retain query arguments
query_arg = ''
if 'query' in request.REQUEST:
query_arg = '?query=%s' % request.REQUEST.get('query')
if 'page' in request.GET:
if query_arg:
query_arg = query_arg + '&'
else:
query_arg = '?'
query_arg = query_arg + 'page=%d' % int(request.GET.get('page'))
return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg))
rosetta_i18n_lang_name = _(request.session.get('rosetta_i18n_lang_name'))
rosetta_i18n_lang_code = request.session.get('rosetta_i18n_lang_code')
if 'query' in request.REQUEST and request.REQUEST.get('query', '').strip():
query = request.REQUEST.get('query').strip()
rx = re.compile(re.escape(query), re.IGNORECASE)
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete and rx.search(smart_unicode(e.msgstr) + smart_unicode(e.msgid) + u''.join([o[0] for o in e.occurrences]))], rosetta_settings.MESSAGES_PER_PAGE)
else:
if rosetta_i18n_filter == 'untranslated':
paginator = Paginator(rosetta_i18n_pofile.untranslated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'translated':
paginator = Paginator(rosetta_i18n_pofile.translated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'fuzzy':
paginator = Paginator([e for e in rosetta_i18n_pofile.fuzzy_entries() if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
else:
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
if 'page' in request.GET and int(request.GET.get('page')) <= paginator.num_pages and int(request.GET.get('page')) > 0:
page = int(request.GET.get('page'))
else:
page = 1
messages = paginator.page(page).object_list
if rosetta_settings.MAIN_LANGUAGE and rosetta_settings.MAIN_LANGUAGE != rosetta_i18n_lang_code:
main_language = None
for language in settings.LANGUAGES:
if language[0] == rosetta_settings.MAIN_LANGUAGE:
main_language = _(language[1])
break
fl = ("/%s/" % rosetta_settings.MAIN_LANGUAGE).join(rosetta_i18n_fn.split("/%s/" % rosetta_i18n_lang_code))
po = pofile(fl)
main_messages = []
for message in messages:
message.main_lang = po.find(message.msgid).msgstr
needs_pagination = paginator.num_pages > 1
if needs_pagination:
if paginator.num_pages >= 10:
page_range = pagination_range(1, paginator.num_pages, page)
else:
page_range = range(1, 1 + paginator.num_pages)
ADMIN_MEDIA_PREFIX = settings.STATIC_URL
ENABLE_TRANSLATION_SUGGESTIONS = rosetta_settings.BING_APP_ID and rosetta_settings.ENABLE_TRANSLATION_SUGGESTIONS
BING_APP_ID = rosetta_settings.BING_APP_ID
MESSAGES_SOURCE_LANGUAGE_NAME = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_NAME
MESSAGES_SOURCE_LANGUAGE_CODE = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE
if 'rosetta_last_save_error' in request.session:
del(request.session['rosetta_last_save_error'])
rosetta_last_save_error = True
return render_to_response('rosetta/pofile.html', locals(), context_instance=RequestContext(request))
else:
return list_languages(request)
home = never_cache(home)
home = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(home)
download_file = never_cache(download_file)
download_file = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(download_file)
def list_languages(request):
"""
Lists the languages for the current project, the gettext catalog files
that can be translated and their translation progress
"""
languages = []
if 'filter' in request.GET:
if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'):
filter_ = request.GET.get('filter')
request.session['rosetta_i18n_catalog_filter'] = filter_
return HttpResponseRedirect(reverse('rosetta-pick-file'))
rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
has_pos = False
for language in settings.LANGUAGES:
pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)
has_pos = has_pos or len(pos)
languages.append(
(language[0],
_(language[1]),
[(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos],
)
)
ADMIN_MEDIA_PREFIX = settings.STATIC_URL
version = rosetta.get_version(True)
return render_to_response('rosetta/languages.html', locals(), context_instance=RequestContext(request))
list_languages = never_cache(list_languages)
list_languages = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(list_languages)
def lang_sel(request, langid, idx):
"""
Selects a file to be translated
"""
if langid not in [l[0] for l in settings.LANGUAGES]:
raise Http404
else:
rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
file_ = find_pos(langid, project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)[int(idx)]
request.session['rosetta_i18n_lang_code'] = langid
request.session['rosetta_i18n_lang_name'] = unicode([l[1] for l in settings.LANGUAGES if l[0] == langid][0])
request.session['rosetta_i18n_fn'] = file_
po = pofile(file_)
for entry in po:
entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest()
request.session['rosetta_i18n_pofile'] = po
try:
os.utime(file_, None)
request.session['rosetta_i18n_write'] = True
except OSError:
request.session['rosetta_i18n_write'] = False
return HttpResponseRedirect(reverse('rosetta-home'))
lang_sel = never_cache(lang_sel)
lang_sel = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(lang_sel)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
2836,
62,
6603,
274,
62,
9288,
198,
6738,
42625,
14208,
13,
7295,
13,
79,
363,
20900,
1330,
31525,
20900,
198,
6... | 1.965794 | 7,484 |
# to run this, add code from experiments_HSCC2021.py
| [
2,
284,
1057,
428,
11,
751,
2438,
422,
10256,
62,
7998,
4093,
1238,
2481,
13,
9078,
628
] | 3.176471 | 17 |
# -*- coding: utf-8 -*-
#
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DRAC Management Driver
"""
from oslo.utils import excutils
from oslo.utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.drivers import base
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import resource_uris
from ironic.openstack.common import log as logging
pywsman = importutils.try_import('pywsman')
LOG = logging.getLogger(__name__)
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'HardDisk',
boot_devices.PXE: 'NIC',
boot_devices.CDROM: 'Optical',
}
# IsNext constants
PERSISTENT = '1'
""" Is the next boot config the system will use. """
NOT_NEXT = '2'
""" Is not the next boot config the system will use. """
ONE_TIME_BOOT = '3'
""" Is the next boot config the system will use, one time boot only. """
def _get_next_boot_mode(node):
"""Get the next boot mode.
To see a list of supported boot modes see: http://goo.gl/aEsvUH
(Section 7.2)
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:returns: a dictionary containing:
:instance_id: the instance id of the boot device.
:is_next: whether it's the next device to boot or not. One of
PERSISTENT, NOT_NEXT, ONE_TIME_BOOT constants.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
filter_query = ('select * from DCIM_BootConfigSetting where IsNext=%s '
'or IsNext=%s' % (PERSISTENT, ONE_TIME_BOOT))
try:
doc = client.wsman_enumerate(resource_uris.DCIM_BootConfigSetting,
options, filter_query=filter_query)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to get next boot mode for '
'node %(node_uuid)s. Reason: %(error)s.'),
{'node_uuid': node.uuid, 'error': exc})
items = drac_common.find_xml(doc, 'DCIM_BootConfigSetting',
resource_uris.DCIM_BootConfigSetting,
find_all=True)
# This list will have 2 items maximum, one for the persistent element
# and another one for the OneTime if set
boot_mode = None
for i in items:
instance_id = drac_common.find_xml(i, 'InstanceID',
resource_uris.DCIM_BootConfigSetting).text
is_next = drac_common.find_xml(i, 'IsNext',
resource_uris.DCIM_BootConfigSetting).text
boot_mode = {'instance_id': instance_id, 'is_next': is_next}
# If OneTime is set we should return it, because that's
# where the next boot device is
if is_next == ONE_TIME_BOOT:
break
return boot_mode
def _create_config_job(node):
"""Create a configuration job.
This method is used to apply the pending values created by
set_boot_device().
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:raises: DracConfigJobCreationError on an error when creating the job.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
options.add_selector('CreationClassName', 'DCIM_BIOSService')
options.add_selector('Name', 'DCIM:BIOSService')
options.add_selector('SystemCreationClassName', 'DCIM_ComputerSystem')
options.add_selector('SystemName', 'DCIM:ComputerSystem')
options.add_property('Target', 'BIOS.Setup.1-1')
options.add_property('ScheduledStartTime', 'TIME_NOW')
doc = client.wsman_invoke(resource_uris.DCIM_BIOSService,
options, 'CreateTargetedConfigJob')
return_value = drac_common.find_xml(doc, 'ReturnValue',
resource_uris.DCIM_BIOSService).text
# NOTE(lucasagomes): Possible return values are: RET_ERROR for error
# or RET_CREATED job created (but changes will be
# applied after the reboot)
# Boot Management Documentation: http://goo.gl/aEsvUH (Section 8.4)
if return_value == drac_common.RET_ERROR:
error_message = drac_common.find_xml(doc, 'Message',
resource_uris.DCIM_BIOSService).text
raise exception.DracConfigJobCreationError(error=error_message)
def _check_for_config_job(node):
"""Check if a configuration job is already created.
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:raises: DracConfigJobCreationError if the job is already created.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
try:
doc = client.wsman_enumerate(resource_uris.DCIM_LifecycleJob, options)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to list the configuration jobs '
'for node %(node_uuid)s. Reason: %(error)s.'),
{'node_uuid': node.uuid, 'error': exc})
items = drac_common.find_xml(doc, 'DCIM_LifecycleJob',
resource_uris.DCIM_LifecycleJob,
find_all=True)
for i in items:
name = drac_common.find_xml(i, 'Name', resource_uris.DCIM_LifecycleJob)
if 'BIOS.Setup.1-1' not in name.text:
continue
job_status = drac_common.find_xml(i, 'JobStatus',
resource_uris.DCIM_LifecycleJob).text
# If job is already completed or failed we can
# create another one.
# Job Control Documentation: http://goo.gl/o1dDD3 (Section 7.2.3.2)
if job_status.lower() not in ('completed', 'failed'):
job_id = drac_common.find_xml(i, 'InstanceID',
resource_uris.DCIM_LifecycleJob).text
reason = (_('Another job with ID "%s" is already created '
'to configure the BIOS. Wait until existing job '
'is completed or is cancelled') % job_id)
raise exception.DracConfigJobCreationError(error=reason)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
1946,
2297,
10983,
11,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.374497 | 2,980 |
import numpy as np
import matplotlib.pyplot as plt
#grid number on half space (without the origin)
N=150
#total grid number = 2*N + 1 (with origin)
N_g=2*N+1
#finite barrier potential value = 300 (meV)
potential_value=300
#building potential:
# #Hamiltonian matrix:
V=potential(potential_value)
H=Hamiltonian(V)
#sort the eigenvalue and get the corresponding eigenvector
eigenvalue,eigenvector=np.linalg.eig(H)
idx=np.argsort(eigenvalue)
eigenvalue=eigenvalue[idx]
eigenvector=eigenvector[:,idx]
#visualize
fig=plt.figure(figsize=(18,6))
ax1=fig.add_subplot(131)
x=np.linspace(0,10,11)
ax1.plot(x,eigenvalue[0:11],'r.',label='numerical')
ax1.set_xlabel('n')
ax1.set_ylabel('$E_n (meV)$')
ax1.set_title('eigen energies')
ax1.grid(True)
ax1.legend()
ax2=fig.add_subplot(132)
x=np.linspace(-5,5,301)
#x/lamda_0
x=x/(np.sqrt(2)*10**(10-9)/np.pi)
y1=eigenvector[:,0]
y2=eigenvector[:,1]
y3=eigenvector[:,2]
y4=eigenvector[:,3]
y5=eigenvector[:,4]
ax2.plot(x,(y1),label='$_{n=0}(x)$')
ax2.plot(x,(y2),label='$_{n=1}(x)$')
ax2.plot(x,(y3),label='$_{n=2}(x)$')
ax2.set_xlabel('position ($x/_0$) ')
ax2.set_ylabel('wavefunction')
ax2.set_title('wave function in different eigen state')
ax2.legend()
ax2.grid(True)
ax3=fig.add_subplot(133)
ax3.plot(x,(y1**2),label='$^2_{n=0}(x)$')
ax3.plot(x,(y2**2),label='$^2_{n=1}(x)$')
ax3.plot(x,(y3**2),label='$^2_{n=2}(x)$')
ax3.set_xlabel('position ($x/_0$) ')
ax3.set_ylabel('square wavefunction')
ax3.set_title('probability distribution in finite barrier well')
ax3.grid(True)
ax3.legend()
plt.show() | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
25928,
1271,
319,
2063,
2272,
357,
19419,
262,
8159,
8,
198,
45,
28,
8628,
198,
2,
23350,
10706,
1271,
796,
362,
9,
45,
1343,
... | 2.097959 | 735 |
from .BaseSVDD import BaseSVDD | [
6738,
764,
14881,
50,
8898,
35,
1330,
7308,
50,
8898,
35
] | 2.727273 | 11 |
import setuptools
import glob
import os
required = [
"requests",
"pandas",
"arrow",
"socketIO-client-nexus"
]
setuptools.setup(name='iex-api-python',
version="0.0.5",
description='Fetch data from the IEX API',
long_description=open('README.md').read().strip(),
author='Daniel E. Cook',
author_email='danielecook@gmail.com',
url='http://www.github.com/danielecook/iex-api-python',
packages=['iex'],
install_requires=required,
keywords=['finance', 'stock', 'market', 'market-data', 'IEX', 'API'],
license='MIT License',
zip_safe=False)
| [
11748,
900,
37623,
10141,
198,
11748,
15095,
198,
11748,
28686,
198,
198,
35827,
796,
685,
198,
220,
220,
220,
366,
8897,
3558,
1600,
198,
220,
220,
220,
366,
79,
392,
292,
1600,
198,
220,
220,
220,
366,
6018,
1600,
198,
220,
220,
2... | 1.929504 | 383 |
import os
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from keras.callbacks import ModelCheckpoint, EarlyStopping
import src.utils.utils as ut
import src.utils.model_utils as mu
import src.models.model as md
import src.models.data_generator as dg
import src.data.dataframe as dat
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
load_dotenv(find_dotenv())
main()
| [
11748,
28686,
198,
11748,
3904,
198,
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
16605,
24330,
1330,
1064,
62,
26518,
24330,
11,
3440,
62,
26518,
24330,
198,
198,
6738,
41927,
292,
13,
13345,
10146,
1330,
9104,
9787,
4122... | 2.788732 | 213 |
"""
MIT License
Copyright (c) 2020 Shantanu Ghosh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy
from matplotlib import pyplot
import pandas as pd
import os
from Propensity_socre_network import Propensity_socre_network
from Utils import Utils
from dataloader import DataLoader
csv_path = "Dataset/ihdp_sample.csv"
# 139 treated
# 747 - 139 = 608 control
# 747 total
split_size = 0.8
device = Utils.get_device()
dL = DataLoader()
np_covariates_X_train, np_covariates_X_test, np_covariates_Y_train, np_covariates_Y_test = \
dL.preprocess_data_from_csv(csv_path, split_size)
ps_train_set = dL.convert_to_tensor(np_covariates_X_train, np_covariates_Y_train)
train_parameters_NN = {
"epochs": 75,
"lr": 0.001,
"batch_size": 32,
"shuffle": True,
"train_set": ps_train_set,
"model_save_path": "./Propensity_Model/NN_PS_model_iter_id_"
+ str(1) + "_epoch_{0}_lr_{1}.pth"
}
# ps using NN
ps_net_NN = Propensity_socre_network()
print("############### Propensity Score neural net Training ###############")
ps_net_NN.train(train_parameters_NN, device, phase="train")
# eval
eval_parameters_NN = {
"eval_set": ps_train_set,
"model_path": "./Propensity_Model/NN_PS_model_iter_id_{0}_epoch_75_lr_0.001.pth"
.format(1)
}
ps_score_list_NN = ps_net_NN.eval_return_complete_list(eval_parameters_NN, device, phase="eval")
treated_ps_list = [d["prop_score"] for d in ps_score_list_NN if d['treatment'] == 1]
control_ps_list = [d["prop_score"] for d in ps_score_list_NN if d['treatment'] == 0]
for ps_dict in treated_ps_list:
print(ps_dict)
print("--------------")
for ps_dict in control_ps_list:
print(ps_dict)
print("treated: " + str(len(treated_ps_list)))
print("control: " + str(len(control_ps_list)))
print("total: " + str(len(treated_ps_list) + len(control_ps_list)))
bins1 = numpy.linspace(0, 1, 100)
bins2 = numpy.linspace(0, 0.2, 100)
bins3 = numpy.linspace(0.2, 0.5, 100)
bins4 = numpy.linspace(0.5, 1, 100)
draw(treated_ps_list, control_ps_list, bins1)
draw(treated_ps_list, control_ps_list, bins2)
draw(treated_ps_list, control_ps_list, bins3)
draw(treated_ps_list, control_ps_list, bins4)
| [
37811,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
12131,
49892,
42357,
11972,
3768,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
10314,
3696,
... | 2.733391 | 1,159 |
# Copyright (c) 2016-2019. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
from Bio.Seq import reverse_complement
from pyensembl import Transcript
from ..common import groupby_field
from .transcript_helpers import interval_offset_on_transcript
from .effect_helpers import changes_exonic_splice_site
from .effect_collection import EffectCollection
from .effect_prediction_coding import predict_variant_coding_effect_on_transcript
from .effect_classes import (
Failure,
Intergenic,
Intragenic,
NoncodingTranscript,
IncompleteTranscript,
FivePrimeUTR,
ThreePrimeUTR,
Intronic,
IntronicSpliceSite,
SpliceAcceptor,
SpliceDonor,
StartLoss,
ExonLoss,
ExonicSpliceSite,
)
logger = logging.getLogger(__name__)
def predict_variant_effects(variant, raise_on_error=False):
"""Determine the effects of a variant on any transcripts it overlaps.
Returns an EffectCollection object.
Parameters
----------
variant : Variant
raise_on_error : bool
Raise an exception if we encounter an error while trying to
determine the effect of this variant on a transcript, or simply
log the error and continue.
"""
# if this variant isn't overlapping any genes, return a
# Intergenic effect
# TODO: look for nearby genes and mark those as Upstream and Downstream
# effects
try:
gene_ids = variant.gene_ids
transcripts = variant.transcripts
except:
if raise_on_error:
raise
else:
return []
if len(gene_ids) == 0:
effects = [Intergenic(variant)]
else:
# list of all MutationEffects for all genes & transcripts
effects = []
# group transcripts by their gene ID
transcripts_grouped_by_gene = \
groupby_field(transcripts, 'gene_id')
# want effects in the list grouped by the gene they come from
for gene_id in sorted(gene_ids):
if gene_id not in transcripts_grouped_by_gene:
# intragenic variant overlaps a gene but not any transcripts
gene = variant.genome.gene_by_id(gene_id)
effects.append(Intragenic(variant, gene))
else:
# gene ID has transcripts overlapped by this variant
for transcript in transcripts_grouped_by_gene[gene_id]:
if raise_on_error:
effect = predict_variant_effect_on_transcript(
variant=variant,
transcript=transcript)
else:
effect = predict_variant_effect_on_transcript_or_failure(
variant=variant,
transcript=transcript)
effects.append(effect)
return EffectCollection(effects)
def predict_variant_effect_on_transcript_or_failure(variant, transcript):
"""
Try predicting the effect of a variant on a particular transcript but
suppress raised exceptions by converting them into `Failure` effect
values.
"""
try:
return predict_variant_effect_on_transcript(
variant=variant,
transcript=transcript)
except (AssertionError, ValueError) as error:
logger.warn(
"Encountered error annotating %s for %s: %s",
variant,
transcript,
error)
return Failure(variant, transcript)
def predict_variant_effect_on_transcript(variant, transcript):
"""Return the transcript effect (such as FrameShift) that results from
applying this genomic variant to a particular transcript.
Parameters
----------
transcript : Transcript
Transcript we're going to apply mutation to.
"""
if transcript.__class__ is not Transcript:
raise TypeError(
"Expected %s : %s to have type Transcript" % (
transcript, type(transcript)))
# check for non-coding transcripts first, since
# every non-coding transcript is "incomplete".
if not transcript.is_protein_coding:
return NoncodingTranscript(variant, transcript)
if not transcript.complete:
return IncompleteTranscript(variant, transcript)
# since we're using inclusive base-1 coordinates,
# checking for overlap requires special logic for insertions
is_insertion = variant.is_insertion
# determine if any exons are deleted, and if not,
# what is the closest exon and how far is this variant
# from that exon (overlapping the exon = 0 distance)
completely_lost_exons = []
# list of which (exon #, Exon) pairs this mutation overlaps
overlapping_exon_numbers_and_exons = []
distance_to_nearest_exon = float("inf")
start_in_exon = False
end_in_exon = False
nearest_exon = None
variant_start = variant.trimmed_base1_start
variant_end = variant.trimmed_base1_end
for i, exon in enumerate(transcript.exons):
if variant_start <= exon.start and variant_end >= exon.end:
completely_lost_exons.append(exon)
if is_insertion and exon.strand == "+" and variant_end == exon.end:
# insertions after an exon don't overlap the exon
distance = 1
elif is_insertion and exon.strand == "-" and variant_start == exon.start:
distance = 1
else:
distance = exon.distance_to_interval(variant_start, variant_end)
if distance == 0:
overlapping_exon_numbers_and_exons.append((i + 1, exon))
# start is contained in current exon
if exon.start <= variant_start <= exon.end:
start_in_exon = True
# end is contained in current exon
if exon.end >= variant_end >= exon.start:
end_in_exon = True
elif distance < distance_to_nearest_exon:
distance_to_nearest_exon = distance
nearest_exon = exon
if len(overlapping_exon_numbers_and_exons) == 0:
intronic_effect_class = choose_intronic_effect_class(
variant=variant,
nearest_exon=nearest_exon,
distance_to_exon=distance_to_nearest_exon)
return intronic_effect_class(
variant=variant,
transcript=transcript,
nearest_exon=nearest_exon,
distance_to_exon=distance_to_nearest_exon)
elif len(completely_lost_exons) > 0 or (
len(overlapping_exon_numbers_and_exons) > 1):
# if spanning multiple exons, or completely deleted an exon
# then consider that an ExonLoss mutation
exons = [exon for (_, exon) in overlapping_exon_numbers_and_exons]
return ExonLoss(variant, transcript, exons)
assert len(overlapping_exon_numbers_and_exons) == 1
exon_number, exon = overlapping_exon_numbers_and_exons[0]
exonic_effect_annotation = exonic_transcript_effect(
variant, exon, exon_number, transcript)
# simple case: both start and end are in the same
if start_in_exon and end_in_exon:
return exonic_effect_annotation
elif isinstance(exonic_effect_annotation, ExonicSpliceSite):
# if mutation bleeds over into intro but even just
# the exonic portion got annotated as an exonic splice site
# then return it
return exonic_effect_annotation
return ExonicSpliceSite(
variant=variant,
transcript=transcript,
exon=exon,
alternate_effect=exonic_effect_annotation)
def choose_intronic_effect_class(
variant,
nearest_exon,
distance_to_exon):
"""
Infer effect of variant which does not overlap any exon of
the given transcript.
"""
assert distance_to_exon > 0, \
"Expected intronic effect to have distance_to_exon > 0, got %d" % (
distance_to_exon,)
if nearest_exon.strand == "+":
# if exon on positive strand
start_before = variant.trimmed_base1_start < nearest_exon.start
start_same = variant.trimmed_base1_start == nearest_exon.start
before_exon = start_before or (variant.is_insertion and start_same)
else:
# if exon on negative strand
end_after = variant.trimmed_base1_end > nearest_exon.end
end_same = variant.trimmed_base1_end == nearest_exon.end
before_exon = end_after or (variant.is_insertion and end_same)
# distance cutoffs based on consensus splice sequences from
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/
# 5' splice site: MAG|GURAGU consensus
# M is A or C; R is purine; | is the exon-intron boundary
# 3' splice site: YAG|R
if distance_to_exon <= 2:
if before_exon:
# 2 last nucleotides of intron before exon are the splice
# acceptor site, typically "AG"
return SpliceAcceptor
else:
# 2 first nucleotides of intron after exon are the splice donor
# site, typically "GT"
return SpliceDonor
elif not before_exon and distance_to_exon <= 6:
# variants in nucleotides 3-6 at start of intron aren't as certain
# to cause problems as nucleotides 1-2 but still implicated in
# alternative splicing
return IntronicSpliceSite
elif before_exon and distance_to_exon <= 3:
# nucleotide -3 before exon is part of the 3' splicing
# motif but allows for more degeneracy than the -2, -1 nucleotides
return IntronicSpliceSite
else:
# intronic mutation unrelated to splicing
return Intronic
def exonic_transcript_effect(variant, exon, exon_number, transcript):
"""Effect of this variant on a Transcript, assuming we already know
that this variant overlaps some exon of the transcript.
Parameters
----------
variant : Variant
exon : pyensembl.Exon
Exon which this variant overlaps
exon_number : int
Index (starting from 1) of the given exon in the transcript's
sequence of exons.
transcript : pyensembl.Transcript
"""
genome_ref = variant.trimmed_ref
genome_alt = variant.trimmed_alt
variant_start = variant.trimmed_base1_start
variant_end = variant.trimmed_base1_end
# clip mutation to only affect the current exon
if variant_start < exon.start:
# if mutation starts before current exon then only look
# at nucleotides which overlap the exon
logger.info('Mutation in variant %s starts before exon %s', variant, exon)
assert len(genome_ref) > 0, "Unexpected insertion into intron"
n_skip_start = exon.start - variant_start
genome_ref = genome_ref[n_skip_start:]
genome_alt = genome_alt[n_skip_start:]
genome_start = exon.start
else:
genome_start = variant_start
if variant_end > exon.end:
# if mutation goes past exon end then only look at nucleotides
# which overlap the exon
logger.info('Mutation in variant %s ends after exon %s', variant, exon)
n_skip_end = variant_end - exon.end
genome_ref = genome_ref[:-n_skip_end]
genome_alt = genome_alt[:len(genome_ref)]
genome_end = exon.end
else:
genome_end = variant_end
transcript_offset = interval_offset_on_transcript(
genome_start, genome_end, transcript)
if transcript.on_backward_strand:
cdna_ref = reverse_complement(genome_ref)
cdna_alt = reverse_complement(genome_alt)
else:
cdna_ref = genome_ref
cdna_alt = genome_alt
n_ref = len(cdna_ref)
expected_ref = str(
transcript.sequence[transcript_offset:transcript_offset + n_ref])
if cdna_ref != expected_ref:
raise ValueError(
("Found ref nucleotides '%s' in sequence"
" of %s at offset %d (chromosome positions %d:%d)"
" but variant %s has '%s'") % (
expected_ref,
transcript,
transcript_offset,
genome_start,
genome_end,
variant,
cdna_ref))
utr5_length = min(transcript.start_codon_spliced_offsets)
# does the variant start inside the 5' UTR?
if utr5_length > transcript_offset:
# does the variant end after the 5' UTR, within the coding region?
if utr5_length < transcript_offset + n_ref:
# TODO: we *might* lose the Kozak sequence or the start codon
# but without looking at the modified sequence how can we tell
# for sure that this is a start-loss variant?
return StartLoss(variant, transcript)
else:
# if variant contained within 5' UTR
return FivePrimeUTR(variant, transcript)
utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1
if transcript_offset >= utr3_offset:
return ThreePrimeUTR(variant, transcript)
exon_start_offset = interval_offset_on_transcript(
exon.start, exon.end, transcript)
exon_end_offset = exon_start_offset + len(exon) - 1
# Further below we're going to try to predict exonic splice site
# modifications, which will take this effect_annotation as their
# alternative hypothesis for what happens if splicing doesn't change.
# If the mutation doesn't affect an exonic splice site, then
# we'll just return this effect.
coding_effect_annotation = predict_variant_coding_effect_on_transcript(
variant=variant,
transcript=transcript,
trimmed_cdna_ref=cdna_ref,
trimmed_cdna_alt=cdna_alt,
transcript_offset=transcript_offset)
if changes_exonic_splice_site(
transcript=transcript,
transcript_ref=cdna_ref,
transcript_alt=cdna_alt,
transcript_offset=transcript_offset,
exon_start_offset=exon_start_offset,
exon_end_offset=exon_end_offset,
exon_number=exon_number):
return ExonicSpliceSite(
variant=variant,
transcript=transcript,
exon=exon,
alternate_effect=coding_effect_annotation)
return coding_effect_annotation
| [
2,
15069,
357,
66,
8,
1584,
12,
23344,
13,
5628,
33745,
3961,
286,
11558,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 2.361854 | 6,428 |
# OS-Level Imports
import os
import sys
import multiprocessing
from multiprocessing import cpu_count
# Library Imports
import tensorflow as tf
from tensorflow.keras import mixed_precision
from tensorflow.python.distribute.distribute_lib import Strategy
# Internal Imports
from Utils.enums import Environment, Accelerator
# Global Configuration Variables
environment = Environment.GoogleColab
accelerator = Accelerator.GPU
strategy = None
cpu_no = multiprocessing.cpu_count()
batch_size = 64
latent_dim = 100
epochs = 10
supervised_samples_ratio = 0.05
save_interval = 17
super_batches = 1
unsuper_batches = 1
prefetch_no = tf.data.AUTOTUNE
eager_execution = True
model_summary = False
resume_training = False
result_path = './results/'
dataset_path = './dataset/'
| [
2,
7294,
12,
4971,
1846,
3742,
220,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
11748,
18540,
305,
919,
278,
201,
198,
6738,
18540,
305,
919,
278,
1330,
42804,
62,
9127,
201,
198,
201,
198,
2,
10074,
1846,
3742,
201,
1... | 2.790378 | 291 |
from power_audit import PowerAudit
if __name__ == '__main__':
main()
| [
6738,
1176,
62,
3885,
270,
1330,
4333,
16353,
270,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.678571 | 28 |
from turbo.flux import Mutation, register, dispatch, register_dispatch
import mutation_types
| [
6738,
29292,
13,
69,
22564,
1330,
337,
7094,
11,
7881,
11,
27965,
11,
7881,
62,
6381,
17147,
198,
198,
11748,
15148,
62,
19199,
628,
198
] | 3.84 | 25 |
import logging
import os
import io
from fastapi import APIRouter, Depends, Header
from fastapi.responses import FileResponse, StreamingResponse
from fastapi import HTTPException, status
import pyarrow as pa
import pyarrow.parquet as pq
from data_service.api.query_models import (
InputTimePeriodQuery, InputTimeQuery, InputFixedQuery
)
from data_service.config import config
from data_service.api.response_models import ErrorMessage
from data_service.config.config import get_settings
from data_service.config.dependencies import get_processor
from data_service.core.processor import Processor
from data_service.api.auth import authorize_user
data_router = APIRouter()
log = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
33245,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
2129,
2412,
11,
48900,
198,
6738,
3049,
15042,
13,
16733,
274,
1330,
9220,
31077,
11,
43124,
31077,
198,
6738,
3049,
15042,
1330,
146... | 3.676923 | 195 |
from utils import sanitize
| [
6738,
3384,
4487,
1330,
5336,
270,
1096,
628
] | 3.5 | 8 |
import json
import argparse
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset
from src.datasets import get_corrected_noisy_data, FreesoundCorrectedNoisyDataset
from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb
from src.transforms import get_transforms
from src.argus_models import FreesoundModel
from src.utils import load_noisy_data, load_folds_data
from src import config
parser = argparse.ArgumentParser()
parser.add_argument('--experiment', required=True, type=str)
args = parser.parse_args()
BATCH_SIZE = 128
CROP_SIZE = 256
DATASET_SIZE = 128 * 256
NOISY_PROB = 0.01
CORR_NOISY_PROB = 0.42
MIXER_PROB = 0.8
WRAP_PAD_PROB = 0.5
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 8
SAVE_DIR = config.experiments_dir / args.experiment
PARAMS = {
'nn_module': ('AuxSkipAttention', {
'num_classes': len(config.classes),
'base_size': 64,
'dropout': 0.4,
'ratio': 16,
'kernel_size': 7,
'last_filters': 8,
'last_fc': 4
}),
'loss': ('OnlyNoisyLSoftLoss', {
'beta': 0.7,
'noisy_weight': 0.5,
'curated_weight': 0.5
}),
'optimizer': ('Adam', {'lr': 0.0009}),
'device': 'cuda',
'aux': {
'weights': [1.0, 0.4, 0.2, 0.1]
},
'amp': {
'opt_level': 'O2',
'keep_batchnorm_fp32': True,
'loss_scale': "dynamic"
}
}
if __name__ == "__main__":
if not SAVE_DIR.exists():
SAVE_DIR.mkdir(parents=True, exist_ok=True)
else:
print(f"Folder {SAVE_DIR} already exists.")
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
print("Model params", PARAMS)
with open(SAVE_DIR / 'params.json', 'w') as outfile:
json.dump(PARAMS, outfile)
folds_data = load_folds_data(use_corrections=CORRECTIONS)
noisy_data = load_noisy_data()
corrected_noisy_data = get_corrected_noisy_data()
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = SAVE_DIR / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_fold(save_fold_dir, train_folds, val_folds,
folds_data, noisy_data, corrected_noisy_data)
| [
11748,
33918,
198,
11748,
1822,
29572,
198,
198,
6738,
1822,
385,
13,
13345,
10146,
1330,
18289,
9787,
4122,
11,
3467,
198,
220,
220,
220,
12556,
1273,
33307,
11,
5972,
2667,
2514,
8979,
11,
44048,
35972,
2202,
3646,
378,
559,
198,
198,... | 2.237095 | 1,143 |
#Day 1.3 Exercise!!
#First way I thought to do it without help
name = input("What is your name? ")
print(len(name))
#Way I found to do it from searching google
print(len(input("What is your name? "))) | [
2,
12393,
352,
13,
18,
32900,
3228,
198,
198,
2,
5962,
835,
314,
1807,
284,
466,
340,
1231,
1037,
198,
3672,
796,
5128,
7203,
2061,
318,
534,
1438,
30,
366,
8,
198,
4798,
7,
11925,
7,
3672,
4008,
198,
198,
2,
25309,
314,
1043,
2... | 3.15625 | 64 |
from typing import TypedDict
from .utils.Classes.String import String
from .utils.assert_string import assert_string
from .utils.merge import merge
default_options: _IsStrongPasswordOptions = {
"min_length": 8,
"min_uppercase": 1,
"min_lowercase": 1,
"min_numbers": 1,
"min_symbols": 1,
"return_score": False,
"points_per_unique": 1,
"points_per_repeat": 0.5,
"points_for_containing_lower": 10,
"points_for_containing_upper": 10,
"points_for_containing_number": 10,
"points_for_containing_symbol": 10,
}
| [
6738,
19720,
1330,
17134,
276,
35,
713,
198,
198,
6738,
764,
26791,
13,
9487,
274,
13,
10100,
1330,
10903,
198,
6738,
764,
26791,
13,
30493,
62,
8841,
1330,
6818,
62,
8841,
198,
6738,
764,
26791,
13,
647,
469,
1330,
20121,
628,
628,
... | 2.55 | 220 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Python imports.
import logging
import datetime
import calendar
# Django imports.
from django.db import transaction
# Rest Framework imports.
from rest_framework import serializers
# Third Party Library imports
# local imports.
from boilerplate_app.models import User, Projects
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
2,
11361,
17944,
13,
198,
11748,
18931,
198,... | 3.518868 | 106 |
from __future__ import absolute_import, unicode_literals
import warnings
from wagtail.wagtailcore.blocks import * # noqa
warnings.warn("wagtail.wagtailadmin.blocks has moved to wagtail.wagtailcore.blocks", UserWarning, stacklevel=2)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
14601,
198,
198,
6738,
266,
363,
13199,
13,
86,
363,
13199,
7295,
13,
27372,
1330,
1635,
220,
1303,
645,
20402,
198,
198,
40539,
654,
13... | 3.077922 | 77 |
import codecs
from datetime import datetime, timedelta
from optparse import make_option
from os import path, unlink
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import commonware.log
from olympia import amo
from olympia.addons.models import Addon, Persona
from olympia.stats.models import ThemeUpdateCount
from . import get_date_from_file, save_stats_to_file
log = commonware.log.getLogger('adi.themeupdatecount')
| [
11748,
40481,
82,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
2172,
29572,
1330,
787,
62,
18076,
198,
6738,
28686,
1330,
3108,
11,
555,
8726,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
4262... | 3.467153 | 137 |
import numpy as np
import pickle
import matplotlib.pyplot as plt
import os
import fnmatch
folder = "GP/"
ktype = "lin/"
matrices=os.listdir(folder+ktype)
for matrix in matrices:
if fnmatch.fnmatch(matrix, '*_val_*'):
with open(folder+ktype+matrix, "rb") as pickleFile:
results = pickle.load(pickleFile)
arrray = results[2]
# Enable interactive mode
plt.ion()
# Draw the grid lines
plt.grid(True)
plt.plot(results[1],results[2],label=matrix)
plt.xscale('symlog', linthreshx=20)
plt.legend(loc='upper left')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
198,
11748,
24714,
15699,
198,
43551,
796,
366,
16960,
30487,
198,
74,
4906,
796,
366,
2815,
30487,
1... | 1.979104 | 335 |
#!/usr/bin/env python2.7
# coding=utf-8
import logging
import traceback
import time
from FATERUI.common.camera.camera import Camera
from . import CameraMindVision
from FATERUI.common.camera.common_tools import *
import cv2
# from aoi.common.infraredcontrol import infraredcontrol
from time import sleep
import datetime
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
18931,
198,
11748,
12854,
1891,
198,
11748,
640,
198,
6738,
376,
23261,
10080,
13,
11321,
13,
25695,
13,
25695,
1330,
20432,
... | 3.375 | 96 |
import pytest
| [
11748,
12972,
9288,
628,
628,
628,
628,
628,
198
] | 2.666667 | 9 |
# This script allows to download a single file from a remote ZIP archive
# without downloading the whole ZIP file itself.
# The hosting server needs to support the HTTP range header for it to work
import zipfile
import requests
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('URL', type=str, help='URL to zip file, e.g. https://example.com/myfile.zip')
parser.add_argument('FILE_PATH', type=str, help='Path of the desired file in the ZIP file, e.g. myfolder/mydocument.docx')
parser.add_argument('OUTPUT_FILE', type=str, help='Local path to write the file to, e.g. /home/user/mydocument.docx')
args = parser.parse_args()
download_file(args.URL, args.FILE_PATH, args.OUTPUT_FILE)
| [
2,
770,
4226,
3578,
284,
4321,
257,
2060,
2393,
422,
257,
6569,
42977,
15424,
198,
2,
1231,
22023,
262,
2187,
42977,
2393,
2346,
13,
198,
2,
383,
13662,
4382,
2476,
284,
1104,
262,
14626,
2837,
13639,
329,
340,
284,
670,
198,
198,
1... | 3.119342 | 243 |
import Image
import readMDA
import h5py
import os
import numpy
from mmpad_image import open_mmpad_tif
import numpy as np
import scipy as sp
import sys
#import libtiff
from cxparams import CXParams as CXP
| [
11748,
7412,
198,
11748,
1100,
44,
5631,
198,
11748,
289,
20,
9078,
198,
11748,
28686,
198,
11748,
299,
32152,
198,
6738,
285,
3149,
324,
62,
9060,
1330,
1280,
62,
3020,
15636,
62,
49929,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
... | 2.985507 | 69 |
"""
Support for file notification.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.file/
"""
import logging
import os
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_TITLE, DOMAIN, BaseNotificationService)
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config):
"""Get the file notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: ['filename',
'timestamp']},
_LOGGER):
return None
filename = config['filename']
timestamp = config['timestamp']
return FileNotificationService(hass, filename, timestamp)
# pylint: disable=too-few-public-methods
| [
37811,
198,
15514,
329,
2393,
14483,
13,
198,
198,
1890,
517,
3307,
546,
428,
3859,
11,
3387,
3522,
284,
262,
10314,
379,
198,
5450,
1378,
11195,
12,
562,
10167,
13,
952,
14,
5589,
3906,
14,
1662,
1958,
13,
7753,
14,
198,
37811,
198... | 2.613772 | 334 |
""" Example 035: Scheduled sending and delayed routing """
from os import path
from docusign_esign.client.api_exception import ApiException
from flask import render_template, session, Blueprint, request
from ..examples.eg035_scheduled_sending import Eg035ScheduledSendingController
from ...docusign import authenticate
from ...ds_config import DS_CONFIG
from ...error_handlers import process_error
from ...consts import pattern
eg = "eg035" # reference (and url) for this example
eg035 = Blueprint("eg035", __name__)
def get_args():
"""Get request and session arguments"""
# More data validation would be a good idea here
# Strip anything other than characters listed
signer_email = pattern.sub("", request.form.get("signer_email"))
signer_name = pattern.sub("", request.form.get("signer_name"))
resume_date = request.form.get("resume_date")
envelope_args = {
"signer_email": signer_email,
"signer_name": signer_name,
"resume_date": resume_date,
"status": "sent",
}
args = {
"account_id": session["ds_account_id"],
"base_path": session["ds_base_path"],
"access_token": session["ds_access_token"],
"envelope_args": envelope_args
}
return args
| [
198,
37811,
17934,
657,
2327,
25,
27774,
6309,
7216,
290,
11038,
28166,
37227,
198,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
2205,
385,
570,
62,
274,
570,
13,
16366,
13,
15042,
62,
1069,
4516,
1330,
5949,
72,
16922,
198,
6738,
42... | 2.848416 | 442 |
#-- THIS LINE SHOULD BE THE FIRST LINE OF YOUR SUBMISSION! --#
#-- THIS LINE SHOULD BE THE LAST LINE OF YOUR SUBMISSION! ---#
### DO NOT SUBMIT THE FOLLOWING LINES!!! THESE ARE FOR LOCAL TESTING ONLY!
# ((10+24) - (3+4+3)) * 0.3
assert(tally([10,24], [3,4,3], 0.30) == 7.20)
# if the result would be negative, 0 is returned instead
assert(tally([10], [20], 0.1) == 0) | [
2,
438,
12680,
48920,
40312,
9348,
3336,
31328,
48920,
3963,
16592,
28932,
44,
40373,
0,
1377,
2,
198,
198,
2,
438,
12680,
48920,
40312,
9348,
3336,
41894,
48920,
3963,
16592,
28932,
44,
40373,
0,
11420,
2,
198,
198,
21017,
8410,
5626,
... | 2.733333 | 135 |
#!/usr/bin/env python3
"""
Given a set of dominos, construct a linear sequence
For example, if the set of dominos is
[ (0,0) (1,0), (1,1)]
then a valid linear sequence of length four would be
(0,0),(0,1),(1,1),(1,0)
In this script we first create a set of dominos to sample from.
Then every permutation of that set is tested to see whether
the sequence is a valid linear sequence.
If the sequence is invalid, a counter is incremented to
record how long the sequence was.
"""
# http://www.domino-games.com/domino-rules/double-six.html
import itertools
'''
list_of_dominos = [ (0,0), (1,0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
(1,1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1),
(2, 2), (3, 2), (4, 2), (5, 2), (6, 2),
(3, 3), (4, 3), (5, 3), (6, 3),
(4, 4), (5, 4), (6, 4),
(5, 5), (6, 5),
(6, 6)]
'''
list_of_dominos = [(0,0),(1,0),(2,0),(3,0),
(1,1),(2,1),(3,1),
(2,2),(3,2),
(3,3)]
print("number of unique dominos in this set is",len(list_of_dominos))
# 28! = 3*10^29
#print(list_of_dominos)
broke_on={}
for indx in range(11):
broke_on[indx+1]=0
print("initialized data structure (nothing up my sleeve):",broke_on)
for this_perm in itertools.permutations(list_of_dominos):
if(this_perm[0][1] != this_perm[1][0]):
#print("broke on first pair")
broke_on[1] += 1
elif(this_perm[1][1] != this_perm[2][0]):
#print("broke on second pair")
broke_on[2] += 1
elif(this_perm[2][1] != this_perm[3][0]):
broke_on[3] += 1
elif(this_perm[3][1] != this_perm[4][0]):
broke_on[4] += 1
elif(this_perm[4][1] != this_perm[5][0]):
broke_on[5] += 1
elif(this_perm[5][1] != this_perm[6][0]):
broke_on[6] += 1
elif(this_perm[6][1] != this_perm[7][0]):
broke_on[7] += 1
elif(this_perm[7][1] != this_perm[8][0]):
broke_on[8] += 1
elif(this_perm[8][1] != this_perm[9][0]):
broke_on[9] += 1
elif(this_perm[9][1] != this_perm[10][0]):
broke_on[10] += 1
else:
print("made it to another pair")
print(this_perm)
break
print(broke_on)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
15056,
257,
900,
286,
7462,
418,
11,
5678,
257,
14174,
8379,
198,
1890,
1672,
11,
611,
262,
900,
286,
7462,
418,
318,
198,
58,
357,
15,
11,
15,
8,
357,
16,
11,
15... | 1.788321 | 1,370 |
## Unit 4 Project - Two Player Game
## Gavin Weiss - Computer Programming II
## The Elder Scrolls X
# A fan made 2 player game successor the The Elder Scrolls Series
# Two players start off in an arena
# Can choose starting items
# Can choose classes
## Libraries
import time # Self explanatory
import random # Self explanatory
import os # Used for Linux commands
import os, platform # For Linux intro
## Functions
## Code
def intro1(): # This is an intro for Linux
sleep()
os.system("figlet Elder Scrolls X")
sleep()
return
def intro2(): # Intro for anything else
sleep()
print("\n\t Elder Scrolls X")
sleep()
return
if platform.system() == "Linux":
intro1()
else:
intro2()
def CharCreation(): # Function to ask questions for class choosing
sleep()
print("=> What kind of class do you want?")
sleep()
print("> 1 - Knight")
#sleep()
print("> 2 - Thief")
#sleep()
print("> 3 - Lancer")
sleep()
return
sleep()
print("=> Player 1 : What is your name?")
name1 = input("> ") # "name1" is Player 1's name
sleep()
print("=> Player 1,")
CharCreation()
CharCreationChoice1 = input("> ")
if CharCreationChoice1 == ("1"): # Knight
player1 = Player1(name1, 200, 150, 50, 200)
if CharCreationChoice1 == ("2"): # Thief
player1 = Player1(name1, 100, 200, 100, 50)
if CharCreationChoice1 == ("3"): # Lancer
player1 = Player1(name1, 100, 100, 100, 100)
sleep()
player1.Stats() # Prints the stats for Player 1
sleep()
print("=> Player 2 : What is your name?")
name2 = input("> ") # "name2" is Player 2's name
CharCreation()
CharCreationChoice2 = input("> ")
if CharCreationChoice2 == ("1"): # Knight
player2 = Player2(name2, 200, 150, 50, 200)
if CharCreationChoice2 == ("2"): # Thief
player2 = Player2(name2, 100, 200, 100, 50)
if CharCreationChoice2 == ("3"): # Lancer
player2 = Player2(name2, 100, 100, 100, 100)
player2.Stats() # Prints Player 2's stats
| [
2235,
11801,
604,
4935,
532,
4930,
7853,
3776,
198,
2235,
30857,
16152,
532,
13851,
30297,
2873,
198,
198,
2235,
383,
15624,
28859,
1395,
198,
2,
317,
4336,
925,
362,
2137,
983,
17270,
262,
383,
15624,
28859,
7171,
198,
220,
220,
220,
... | 2.85592 | 701 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
from catapult_base import cloud_storage
from telemetry.core import platform
from telemetry.util import image_util
from telemetry.util import rgba_color
HIGHLIGHT_ORANGE_FRAME = rgba_color.WEB_PAGE_TEST_ORANGE
| [
2,
15069,
1946,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
11748... | 3.447368 | 114 |
"""Proxy to handle account communication with Renault servers."""
from __future__ import annotations
from collections.abc import Awaitable
from datetime import timedelta
import logging
from typing import Callable, TypeVar
from renault_api.kamereon.exceptions import (
AccessDeniedException,
KamereonResponseException,
NotSupportedException,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
T = TypeVar("T")
| [
37811,
44148,
284,
5412,
1848,
6946,
351,
44083,
9597,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
17268,
13,
39305,
1330,
5851,
4548,
540,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
18931,
198,
6738... | 3.757353 | 136 |
#Programa Principal
soma(4,5)
| [
198,
198,
2,
15167,
64,
32641,
198,
82,
6086,
7,
19,
11,
20,
8,
198
] | 2.133333 | 15 |
#!/usr/bin/env python3
# ===================================================================================
# Copyright (C) 2019 Fraunhofer Gesellschaft. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by Fraunhofer Gesellschaft
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END==========================================================
"""
Provides an example of Docker URI cli on-boarding
"""
import requests
import os
import json
# properties of the model
model_name = "my-model-1"
dockerImageURI = "cicd.ai4eu-dev.eu:7444/myimages/onboardingtest:v3" #Docker image URI looks like: example.com:port/image-tag:version
license_file = "./license-1.0.0.json"
protobuf_file = "./model.proto"
# setup parameters
host = os.environ['ACUMOS_HOST'] # FQHN like aiexp-preprod.ai4europe.eu
token = os.environ['ACUMOS_TOKEN'] # format is 'acumos_username:API_TOKEN'
advanced_api = "https://" + host + ":443/onboarding-app/v2/advancedModel"
files= {'license': ('license.json', open(license_file, 'rb'), 'application.json'),
'protobuf': ('model.proto', open(protobuf_file, 'rb'), 'text/plain')}
headers = {"Accept": "application/json",
"modelname": model_name,
"Authorization": token,
"dockerFileURL": dockerImageURI,
'isCreateMicroservice': 'false'}
#send request
response = requests.post(advanced_api, files=files, headers=headers)
#check response
if response.status_code == 201:
body = json.loads(response.text)
solution_id = body['result']['solutionId']
print("Docker uri is pushed successfully on {" + host + "}, response is: ", response.status_code, " - solutionId: ", solution_id)
else:
print("Docker uri is not pushed on {" + host + "}, response is: ", response.status_code)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
38093,
4770,
855,
198,
2,
15069,
357,
34,
8,
13130,
39313,
403,
71,
30288,
45371,
19187,
11693,
701,
13,
1439,
2489,
10395,
13,
198,
2,
38093,
4770,
855,
198,
2,
770,
4013,
... | 3.165969 | 717 |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ResNetVariant for Detection."""
from zeus.common import ClassType, ClassFactory
from zeus.modules.connections.connections import MultiOutputGetter
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
34,
8,
12131,
13,
43208,
21852,
1766,
1539,
12052,
13,
1439,
2489,
10395,
13,
198,
2,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
... | 3.721519 | 158 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 20 08:40:22 2017
@author: fabio
"""
import ee
import ee.mapclient
ee.Initialize()
collection = ee.ImageCollection('MODIS/MCD43A4_NDVI')
lista = collection.toList(10)
#print lista.getInfo()
image = ee.Image('LC8_L1T/LC81910312016217LGN00')
#print image.getInfo()
bandNames = image.bandNames()
print('Band Names: ', bandNames.getInfo())
b1scale = image.select('B1').projection().nominalScale()
print('Band 1 scale: ', b1scale.getInfo())
b8scale = image.select('B8').projection().nominalScale()
print('Band 8 scale: ', b8scale.getInfo())
ndvi = image.normalizedDifference(['B5', 'B4'])
ee.mapclient.addToMap(ndvi,
{'min' : -1,
"max": 1},
"NDVI")
ee.mapclient.centerMap(12.3536,41.7686,9)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2365,
1160,
8487,
25,
1821,
25,
1828,
2177,
198,
198,
31,
9800,
25,
7843,
952,
198... | 2.217507 | 377 |
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from private import helpers
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
13130,
383,
347,
41319,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 3.721649 | 194 |
from django.contrib import admin
from .models import Contato, Venda, FormaPagamento
admin.site.register(Contato)
admin.site.register(Venda)
admin.site.register(FormaPagamento)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
2345,
5549,
11,
569,
7438,
11,
5178,
64,
47,
363,
3263,
78,
198,
198,
28482,
13,
15654,
13,
30238,
7,
4264,
5549,
8,
198,
28482,
13,
15654,
13,
30238,
7,
... | 2.901639 | 61 |
import os
import re
import urllib
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple, Union
from money.currency import Currency
from money.money import Money
from werkzeug.utils import secure_filename
from pome import g
from pome.models.encoder import PomeEncodable
RECORDED_TX_FOLDER_NAME = os.path.join("transactions", "recorded")
def total_amount(self, formatted=False) -> Union[Money, str]:
to_return = Money("0", Currency(g.company.accounts_currency_code))
for line in self.lines:
to_return += line.amount.amount()
if not formatted:
return to_return
return to_return.format(g.company.locale)
regex_date = re.compile("^\d{4}\-(0[1-9]|1[012])\-(0[1-9]|[12][0-9]|3[01])$")
regex_ISO8601 = re.compile(
"^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$"
)
| [
11748,
28686,
198,
11748,
302,
198,
11748,
2956,
297,
571,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
309,
29291,
11,
4479,
198,
198,
6738,
1637,
13,
34... | 2.124473 | 474 |
from __future__ import unicode_literals
from __future__ import print_function
import moya
from moya.compat import text_type
from requests_oauthlib import OAuth1Session
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
285,
23790,
198,
6738,
285,
23790,
13,
5589,
265,
1330,
2420,
62,
4906,
198,
198,
6738,
7007,
62,
12162,
... | 3.431373 | 51 |
"""
Cartesian genetic programming
"""
import operator as op
import random
import copy
import math
from settings import VERBOSE, N_COLS, LEVEL_BACK
# function set
def protected_div(a, b):
if abs(b) < 1e-6:
return a
return a / b
fs = [Function(op.add, 2), Function(op.sub, 2), Function(op.mul, 2), Function(protected_div, 2), Function(op.neg, 1), Function(math.cos, 1), Function(math.sin, 1), Function(math.tan, 1), Function(math.atan2, 2)]
Individual.function_set = fs
Individual.max_arity = max(f.arity for f in fs)
def evolve(pop, mut_rate, mu, lambda_):
"""
Evolve the population *pop* using the mu + lambda evolutionary strategy
:param pop: a list of individuals, whose size is mu + lambda. The first mu ones are previous parents.
:param mut_rate: mutation rate
:return: a new generation of individuals of the same size
"""
pop = sorted(pop, key=lambda ind: ind.fitness) # stable sorting
parents = pop[-mu:]
# generate lambda new children via mutation
offspring = []
for _ in range(lambda_):
parent = random.choice(parents)
offspring.append(parent.mutate(mut_rate))
return parents + offspring
def create_population(n):
"""
Create a random population composed of n individuals.
"""
return [Individual() for _ in range(n)]
| [
37811,
198,
43476,
35610,
8513,
8300,
198,
37811,
198,
11748,
10088,
355,
1034,
198,
11748,
4738,
198,
11748,
4866,
198,
11748,
10688,
628,
198,
6738,
6460,
1330,
33310,
33,
14058,
11,
399,
62,
25154,
50,
11,
49277,
62,
31098,
628,
628,... | 2.911447 | 463 |
from historia.pops.logic.logic_base import LogicBase
from historia.economy.enums.resource import Good
| [
6738,
3752,
544,
13,
79,
2840,
13,
6404,
291,
13,
6404,
291,
62,
8692,
1330,
30146,
14881,
198,
6738,
3752,
544,
13,
13926,
88,
13,
268,
5700,
13,
31092,
1330,
4599,
198
] | 3.1875 | 32 |
import argparse
import cv2
import numpy as np
import os
import _pickle as pickle
from descriptors import HOG
#from skimage.morphology import skeletonize
# run image filtering and HOG feature extraction
if __name__ == '__main__':
# require image directory and name of descriptor to use
parser = argparse.ArgumentParser(description='Extract image feature vectors using feature descriptors')
parser.add_argument('-p', '--path', required=True,
nargs='?', action='store', const='./images/',
type=str, dest='im_path',
help='The filepath of the image directory')
parser.add_argument('-d', '--descriptor', required=True,
choices=['hog'],
nargs='?', action='store', const='hog',
type=str, dest='desc_name',
help='The name of the descriptor to use')
args = vars(parser.parse_args())
im_path = args['im_path']
desc_name = args['desc_name']
main(im_path, desc_name)
| [
11748,
1822,
29572,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
4808,
27729,
293,
355,
2298,
293,
198,
198,
6738,
12145,
669,
1330,
367,
7730,
198,
2,
6738,
1341,
9060,
13,
24503,
1435,
... | 2.350333 | 451 |
import abc
| [
11748,
450,
66,
628,
198
] | 2.6 | 5 |
from django.contrib import admin
from .models import Placement_Company_Detail,Profile,StudentBlogModel,ResorcesModel
admin.site.register(Placement_Company_Detail)
admin.site.register(Profile)
admin.site.register(StudentBlogModel)
admin.site.register(ResorcesModel) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
1345,
5592,
62,
39154,
62,
11242,
603,
11,
37046,
11,
38778,
42383,
17633,
11,
4965,
273,
728,
17633,
198,
198,
28482,
13,
15654,
13,
30238,
7,
3646,
5592,
6... | 3.35443 | 79 |
import torch as t
import torch_geometric.utils as utils
def qw_score(graph):
"""
qw_score,
:param graph:
"""
score = utils.degree(graph.edge_index[0])
return score.sort()
def construct_node_tree(graph, node, trees, opt):
"""
K_level, m_ary
:param graph:
:param node:
:param opt:
"""
m = opt.m
K = opt.K
tree = [node]
now = 0
for i in range(K - 1):
for j in range(m ** i):
root = tree[now]
tree += trees[root]
now += 1
zero = t.zeros(graph.x[-1].shape)
x = graph.x
graph.x = t.cat([graph.x, zero[None, :]], dim=0)
tree = graph.x[tree]
graph.x = x
return tree
| [
11748,
28034,
355,
256,
198,
11748,
28034,
62,
469,
16996,
13,
26791,
355,
3384,
4487,
628,
198,
4299,
10662,
86,
62,
26675,
7,
34960,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
10662,
86,
62,
26675,
11,
198,
220,
220,
220... | 2.037791 | 344 |
from typing import Optional, Any
def test(actions: list[str], val: list[list[int]]):
cache: Optional[LFUCache] = None
result: list[Optional[Any]] = []
for i, v in zip(actions, val):
if i == 'LFUCache':
cache = LFUCache(v[0])
result.append(None)
elif i == 'put':
assert cache
result.append((i, v, cache.put(v[0], v[1])))
elif i == 'get':
assert cache
result.append((i, v, cache.get(v[0])))
print(result)
if __name__ == '__main__':
actions = ["LFUCache","put","put","get","put","get","get","put","get","get","get"]
values = [[2],[1,1],[2,2],[1],[3,3],[2],[3],[4,4],[1],[3],[4]]
test(actions, values)
# actions2 = ["LFUCache","put","put","get","get","get","put","put","get","get","get","get"]
# values2 = [[3],[2,2],[1,1],[2],[1],[2],[3,3],[4,4],[3],[2],[1],[4]]
# test(actions2, values2)
# actions3 = ["LFUCache","put","put","put","put","put","get","put","get","get","put","get","put","put","put","get","put","get","get","get","get","put","put","get","get","get","put","put","get","put","get","put","get","get","get","put","put","put","get","put","get","get","put","put","get","put","put","put","put","get","put","put","get","put","put","get","put","put","put","put","put","get","put","put","get","put","get","get","get","put","get","get","put","put","put","put","get","put","put","put","put","get","get","get","put","put","put","get","put","put","put","get","put","put","put","get","get","get","put","put","put","put","get","put","put","put","put","put","put","put"]
# values3 = [[10],[10,13],[3,17],[6,11],[10,5],[9,10],[13],[2,19],[2],[3],[5,25],[8],[9,22],[5,5],[1,30],[11],[9,12],[7],[5],[8],[9],[4,30],[9,3],[9],[10],[10],[6,14],[3,1],[3],[10,11],[8],[2,14],[1],[5],[4],[11,4],[12,24],[5,18],[13],[7,23],[8],[12],[3,27],[2,12],[5],[2,9],[13,4],[8,18],[1,7],[6],[9,29],[8,21],[5],[6,30],[1,12],[10],[4,15],[7,22],[11,26],[8,17],[9,29],[5],[3,4],[11,30],[12],[4,29],[3],[9],[6],[3,4],[1],[10],[3,29],[10,28],[1,20],[11,13],[3],[3,12],[3,8],[10,9],[3,26],[8],[7],[5],[13,17],[2,27],[11,15],[12],[9,19],[2,15],[3,16],[1],[12,17],[9,1],[6,19],[4],[5],[5],[8,1],[11,7],[5,2],[9,28],[1],[2,2],[7,4],[4,22],[7,24],[9,26],[13,28],[11,26]]
# test(actions3, values3) | [
6738,
19720,
1330,
32233,
11,
4377,
628,
198,
4299,
1332,
7,
4658,
25,
1351,
58,
2536,
4357,
1188,
25,
1351,
58,
4868,
58,
600,
11907,
2599,
198,
220,
220,
220,
12940,
25,
32233,
58,
43,
37,
9598,
4891,
60,
796,
6045,
198,
220,
22... | 2.210325 | 1,046 |
#/*
# * Copyright (C) 2017 - This file is part of libecc project
# *
# * Authors:
# * Ryad BENADJILA <ryadbenadjila@gmail.com>
# * Arnaud EBALARD <arnaud.ebalard@ssi.gouv.fr>
# * Jean-Pierre FLORI <jean-pierre.flori@ssi.gouv.fr>
# *
# * Contributors:
# * Nicolas VIVET <nicolas.vivet@ssi.gouv.fr>
# * Karim KHALFALLAH <karim.khalfallah@ssi.gouv.fr>
# *
# * This software is licensed under a dual BSD and GPL v2 license.
# * See LICENSE file at the root folder of the project.
# */
import struct
keccak_rc = [
0x0000000000000001, 0x0000000000008082, 0x800000000000808A, 0x8000000080008000,
0x000000000000808B, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009,
0x000000000000008A, 0x0000000000000088, 0x0000000080008009, 0x000000008000000A,
0x000000008000808B, 0x800000000000008B, 0x8000000000008089, 0x8000000000008003,
0x8000000000008002, 0x8000000000000080, 0x000000000000800A, 0x800000008000000A,
0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
]
keccak_rot = [
[ 0, 36, 3, 41, 18 ],
[ 1, 44, 10, 45, 2 ],
[ 62, 6, 43, 15, 61 ],
[ 28, 55, 25, 21, 56 ],
[ 27, 20, 39, 8, 14 ],
]
# Keccak function
# SHA-3 context class
| [
2,
15211,
198,
2,
1635,
220,
15069,
357,
34,
8,
2177,
532,
770,
2393,
318,
636,
286,
9195,
68,
535,
1628,
198,
2,
1635,
198,
2,
1635,
220,
46665,
25,
198,
2,
1635,
220,
220,
220,
220,
220,
11089,
324,
44849,
2885,
41,
47164,
127... | 2.160804 | 597 |
import csv
from pathlib import Path
from datetime import datetime
from lps.models import *
from lps.schemas import *
SEED_FOLDER_PATH = Path("db/seeds/")
| [
11748,
269,
21370,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
300,
862,
13,
27530,
1330,
1635,
198,
6738,
300,
862,
13,
1416,
4411,
292,
1330,
1635,
628,
198,
5188,
1961,
62,
37,
3535,... | 2.824561 | 57 |
a = int(input())
b = int(input())
print(a*b//(GCD(a,b)))
| [
198,
64,
796,
493,
7,
15414,
28955,
198,
65,
796,
493,
7,
15414,
28955,
198,
4798,
7,
64,
9,
65,
1003,
7,
38,
8610,
7,
64,
11,
65,
22305,
198
] | 1.933333 | 30 |
"""
Build a derived collection with the maximum
value from each 'group' defined in the source
collection.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '5/21/14'
from matgendb.builders import core
from matgendb.builders import util
from matgendb.query_engine import QueryEngine
_log = util.get_builder_log("incr") | [
37811,
198,
15580,
257,
10944,
4947,
351,
262,
5415,
198,
8367,
422,
1123,
705,
8094,
6,
5447,
287,
262,
2723,
198,
43681,
13,
198,
37811,
198,
834,
9800,
834,
796,
705,
21174,
6748,
353,
1279,
34388,
7145,
353,
31,
75,
2436,
13,
95... | 3.074766 | 107 |
from __future__ import unicode_literals
from dvc.output.ssh import OutputSSH
from dvc.dependency.base import DependencyBase
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
288,
28435,
13,
22915,
13,
45824,
1330,
25235,
5432,
39,
198,
6738,
288,
28435,
13,
45841,
1387,
13,
8692,
1330,
37947,
1387,
14881,
628
] | 3.405405 | 37 |
import torch
import torch.nn as nn
import optflow.compute_tvl1_energy as compute_tvl1_energy
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
2172,
11125,
13,
5589,
1133,
62,
14981,
75,
16,
62,
22554,
355,
24061,
62,
14981,
75,
16,
62,
22554,
198
] | 2.90625 | 32 |
import numpy as np
import time
import cv2
import argparse
import sys
import os
import glob
import json
from pathlib import Path
if __name__ == "__main__":
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
269,
85,
17,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
15095,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
220,
628,
198,
361,
11593,
... | 2.461538 | 78 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making PaaS (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import sys
from django.test import TestCase
from django.utils.module_loading import import_string
from pipeline.tests.mock import * # noqa
from pipeline.tests.mock_settings import * # noqa
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
24893,
1087,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
350,
7252,
50,
357,
14573,
15708,
350,
7252,
50,
8108,
198,
7407,
653,
8,
1695,
13,
1... | 3.841004 | 239 |
import numpy as np
def _check_mne(name):
"""Helper to check if h5py is installed"""
try:
import mne
except ImportError:
raise ImportError('Please install MNE-python to use %s.' % name)
return mne
def raw_to_mask(raw, ixs, events=None, tmin=None, tmax=None):
"""
A function to transform MNE data into pactools input signals.
It select the one channel on which you to estimate PAC, or two channels
for cross-channel PAC. It also returns a mask generator, that mask the
data outside a given window around an event. The mask generator returns
a number of masks equal to the number of events times the number of
windows (i.e. the number of pairs (tmin, tmax)).
Warning: events is stored in indices, tmin and tmax are stored in seconds.
Parameters
----------
raw : an instance of Raw, containing data of shape (n_channels, n_times)
The data used to calculate PAC
ixs : int or couple of int
The indices for the low/high frequency channels. If only one is given,
the same channel is used for both low_sig and high_sig.
events : array, shape (n_events, 3) | array, shape (n_events,) | None
MNE events array. To be supplied if data is 2D and output should be
split by events. In this case, `tmin` and `tmax` must be provided. If
`ndim == 1`, it is assumed to be event indices, and all events will be
grouped together. Otherwise, events will be grouped along the third
dimension.
tmin : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the start time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include before
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `min(raw.times)`.
tmax : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the stop time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include after
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `max(raw.times)`.
Attributes
----------
low_sig : array, shape (1, n_points)
Input data for the phase signal
high_sig : array or None, shape (1, n_points)
Input data for the amplitude signal.
If None, we use low_sig for both signals.
mask : MaskIterator instance
Object that behaves like a list of mask, without storing them all.
The PAC will only be evaluated where the mask is False.
Examples
--------
>>> from pactools import raw_to_mask
>>> low_sig, high_sig, mask = raw_to_mask(raw, ixs, events, tmin, tmax)
>>> n_masks = len(mask)
>>> for one_mask in mask:
... pass
"""
mne = _check_mne('raw_to_mask')
if not isinstance(raw, mne.io.BaseRaw):
raise ValueError('Must supply Raw as input')
ixs = np.atleast_1d(ixs)
fs = raw.info['sfreq']
data = raw[:][0]
n_channels, n_points = data.shape
low_sig = data[ixs[0]][None, :]
if ixs.shape[0] > 1:
high_sig = data[ixs[1]][None, :]
else:
high_sig = None
mask = MaskIterator(events, tmin, tmax, n_points, fs)
return low_sig, high_sig, mask
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
4808,
9122,
62,
76,
710,
7,
3672,
2599,
198,
220,
220,
220,
37227,
47429,
284,
2198,
611,
289,
20,
9078,
318,
6589,
37811,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
... | 2.693038 | 1,264 |
import collections
from itertools import combinations
from collections import Counter
| [
11748,
17268,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
6738,
17268,
1330,
15034,
198
] | 5.733333 | 15 |
import random
| [
11748,
4738,
628
] | 5 | 3 |
import pymysql.cursors
from ..mod_check import app
| [
11748,
279,
4948,
893,
13976,
13,
66,
1834,
669,
198,
6738,
11485,
4666,
62,
9122,
1330,
598,
628
] | 2.888889 | 18 |
import time
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import neopixel
# Import Philips Hue Bridge
from adafruit_hue import Bridge
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi and API secrets are kept in secrets.py, please add them there!")
raise
# ESP32 SPI
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Attempt to load bridge username and IP address from secrets.py
try:
username = secrets['hue_username']
bridge_ip = secrets['bridge_ip']
my_bridge = Bridge(wifi, bridge_ip, username)
except:
# Perform first-time bridge setup
my_bridge = Bridge(wifi)
ip = my_bridge.discover_bridge()
username = my_bridge.register_username()
print('ADD THESE VALUES TO SECRETS.PY: \
\n\t"bridge_ip":"{0}", \
\n\t"hue_username":"{1}"'.format(ip, username))
raise
# Enumerate all lights on the bridge
my_bridge.get_lights()
# Turn on the light
my_bridge.set_light(1, on=True)
# RGB colors to Hue-Compatible HSL colors
hsl_y = my_bridge.rgb_to_hsb([255, 255, 0])
hsl_b = my_bridge.rgb_to_hsb([0, 0, 255])
hsl_w = my_bridge.rgb_to_hsb([255, 255, 255])
hsl_colors = [hsl_y, hsl_b, hsl_w]
# Set the light to Python colors!
for color in hsl_colors:
my_bridge.set_light(1, hue=int(color[0]), sat=int(color[1]), bri=int(color[2]))
time.sleep(5)
# Set a predefinedscene
# my_bridge.set_group(1, scene='AB34EF5')
# Turn off the light
my_bridge.set_light(1, on=False)
| [
11748,
640,
201,
198,
11748,
3096,
201,
198,
11748,
1323,
952,
201,
198,
6738,
4875,
952,
1330,
10231,
818,
7975,
201,
198,
6738,
512,
1878,
4872,
62,
9774,
2624,
2777,
72,
1330,
512,
1878,
4872,
62,
9774,
2624,
2777,
72,
201,
198,
... | 2.344944 | 890 |
#
# Copyright (c) 2013-2018 Joris Vink <joris@coders.se>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import kore
import socket
# Setup the server object.
server = EchoServer()
# Create a task that will execute inside of Kore as a co-routine.
kore.task_create(server.run())
| [
2,
198,
2,
15069,
357,
66,
8,
2211,
12,
7908,
449,
37279,
569,
676,
1279,
73,
37279,
31,
19815,
364,
13,
325,
29,
198,
2,
198,
2,
2448,
3411,
284,
779,
11,
4866,
11,
13096,
11,
290,
14983,
428,
3788,
329,
597,
198,
2,
4007,
35... | 3.408602 | 279 |
# -*- coding:utf-8 -*-
"""
"""
__date__ = "14/12/2017"
__author__ = "zhaojm"
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
37811,
198,
834,
4475,
834,
796,
366,
1415,
14,
1065,
14,
5539,
1,
198,
834,
9800,
834,
796,
366,
89,
3099,
13210,
76,
1,
198
] | 1.925 | 40 |
import asyncio
import logging
import traceback
import uuid
from typing import Optional, Tuple, Any, Callable
from pesto.ws.core.payload_parser import PayloadParser, PestoConfig
from pesto.ws.core.pesto_feature import PestoFeatures
from pesto.ws.core.utils import load_class, async_exec
from pesto.ws.features.algorithm_wrapper import AlgorithmWrapper
from pesto.ws.features.converter.image.image_roi import ImageROI, DummyImageROI
from pesto.ws.features.payload_converter import PayloadConverter
from pesto.ws.features.payload_debug import PayloadDebug
from pesto.ws.features.response_serializer import ResponseSerializer
from pesto.ws.features.schema_validation import SchemaValidation
from pesto.ws.features.stateful_response import StatefulResponse
from pesto.ws.features.stateless_response import StatelessResponse
from pesto.ws.service.describe import DescribeService
from pesto.ws.service.job_result import ResultType
log = logging.getLogger(__name__)
| [
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
12854,
1891,
198,
11748,
334,
27112,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
11,
4377,
11,
4889,
540,
198,
198,
6738,
28064,
78,
13,
18504,
13,
7295,
13,
15577,
2220,
62,
48610,
... | 3.348432 | 287 |
# -*- coding: utf-8 -*-
import logging
import os
import re
import sys
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
628
] | 2.730769 | 26 |
import configparser
import os
import typing
from sitri.providers.base import ConfigProvider
| [
11748,
4566,
48610,
198,
11748,
28686,
198,
11748,
19720,
198,
198,
6738,
1650,
380,
13,
15234,
4157,
13,
8692,
1330,
17056,
29495,
628
] | 4.086957 | 23 |
from typing import Any, Dict, List
from xpanse.const import V2_PREFIX
from xpanse.endpoint import ExEndpoint
from xpanse.iterator import ExResultIterator
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
198,
198,
6738,
2124,
6839,
325,
13,
9979,
1330,
569,
17,
62,
47,
31688,
10426,
198,
6738,
2124,
6839,
325,
13,
437,
4122,
1330,
1475,
12915,
4122,
198,
6738,
2124,
6839,
325,
13,
487... | 3.25 | 48 |
from bfio import BioReader
import argparse, logging
import numpy as np
from pathlib import Path
import filepattern, multiprocessing, utils
from concurrent.futures import ThreadPoolExecutor
COLORS = ['red',
'green',
'blue',
'yellow',
'magenta',
'cyan',
'gray']
def get_number(s):
""" Check that s is number
If s is a number, first attempt to convert it to an int.
If integer conversion fails, attempt to convert it to a float.
If float conversion fails, return None.
Inputs:
s - An input string or number
Outputs:
value - Either float, int or None
"""
try:
return [int(si) for si in s.split('-')]
except ValueError:
try:
return [float(si) for si in s.split('-')]
except ValueError:
return None
def get_bounds(br,lower_bound,upper_bound):
""" Calculate upper and lower pixel values for image rescaling
This method calculates the upper and lower percentiles
for a given image. The lower_bound and upper_bound must
be floats in the range 0-1, where 0.01 indicates 1%. The
values returned are pixel intensity values.
Images are read in tiles to prevent images from being
completely read into memory. This permits calculation
of percentiles on images that are larger than memory.
Args:
br (bfio.BioReader): A BioReader object to access a tiled tiff
lower_bound (float): Lower bound percentile, must be in 0.00-1.00
upper_bound (float): Upper bound percentile, must be in 0.00-1.00
Returns:
[list]: List of upper and lower bound values in pixel intensity units.
"""
# TODO: Replace pixel buffer with histogram/fingerprint to handle
# larger images and/or larger percentile values
# Make sure the inputs are properly formatted
assert isinstance(lower_bound,float) and isinstance(upper_bound,float)
assert lower_bound >= 0 and lower_bound <= 1.0
assert upper_bound >= 0 and upper_bound <= 1.0
# Get the image size in pixels
image_size = br.num_x() * br.num_y()
# Get number of pixels needed to get percentile information
upper_bound_size = int(image_size * (1-upper_bound))
lower_bound_size = int(image_size * lower_bound)
# Create the pixel buffer
dtype = br.read_metadata().image().Pixels.get_PixelType()
upper_bound_vals = np.zeros((2*upper_bound_size,),dtype=dtype)
lower_bound_vals = np.full((2*lower_bound_size,),np.iinfo(dtype).max,dtype=dtype)
# Load image tiles and sort pixels
for x in range(0,br.num_x(),1024):
# Load the first tile
tile = br.read_image(X=[x,min([x+1024,br.num_x()])],Z=[0,1])
# Sort the non-zero values
tile_sorted = np.sort(tile[tile.nonzero()],axis=None)
# Store the upper and lower bound pixel values
temp = tile_sorted[-upper_bound_size:]
upper_bound_vals[:temp.size] = temp
temp = tile_sorted[:lower_bound_size]
lower_bound_vals[-temp.size:] = temp
# Resort the pixels
upper_bound_vals = np.sort(upper_bound_vals,axis=None)
lower_bound_vals = np.sort(lower_bound_vals,axis=None)
return [lower_bound_vals[lower_bound_size],upper_bound_vals[-upper_bound_size]]
if __name__=="__main__":
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
''' Argument parsing '''
logger.info("Parsing arguments...")
parser = argparse.ArgumentParser(prog='main', description='Builds a DeepZoom color pyramid.')
# Input arguments
parser.add_argument('--filePattern', dest='filePattern', type=str,
help='Filename pattern used to separate data', required=True)
parser.add_argument('--inpDir', dest='inpDir', type=str,
help='Input image collection to be processed by this plugin', required=True)
parser.add_argument('--layout', dest='layout', type=str,
help='Color ordering (e.g. 1,11,,,,5,6)', required=True)
parser.add_argument('--bounds', dest='bounds', type=str,
help='Set bounds (should be float-float, int-int, or blank, e.g. 0.01-0.99,0-16000,,,,,)', required=False)
# Output arguments
parser.add_argument('--outDir', dest='outDir', type=str,
help='Output pyramid path.', required=True)
# Parse the arguments
args = parser.parse_args()
filePattern = args.filePattern
logger.info('filePattern = {}'.format(filePattern))
inpDir = args.inpDir
if (Path.is_dir(Path(args.inpDir).joinpath('images'))):
# switch to images folder if present
fpath = str(Path(args.inpDir).joinpath('images').absolute())
logger.info('inpDir = {}'.format(inpDir))
layout = args.layout
logger.info('layout = {}'.format(layout))
bounds = args.bounds
logger.info('bounds = {}'.format(bounds))
outDir = args.outDir
logger.info('outDir = {}'.format(outDir))
outDir = Path(outDir)
# Parse the layout
layout = [None if l=='' else int(l) for l in layout.split(',')]
if len(layout)>7:
layout = layout[:7]
# Parse the bounds
if bounds != None:
bounds = [[None] if l=='' else get_number(l) for l in bounds.split(',')]
bounds = bounds[:len(layout)]
else:
bounds = [[None] for _ in layout]
# Parse files
fp = filepattern.FilePattern(inpDir,filePattern)
count = 0
for files in fp.iterate(group_by='c'):
outDirFrame = outDir.joinpath('{}_files'.format(count))
outDirFrame.mkdir()
count += 1
bioreaders = []
threads = []
with ThreadPoolExecutor(max([multiprocessing.cpu_count()//2,2])) as executor:
for i,l in enumerate(layout):
if l == None:
bioreaders.append(None)
continue
f_path = [f for f in files if f['c']==l]
if len(f_path)==0:
continue
f_path = f_path[0]['file']
bioreaders.append(BioReader(f_path,max_workers=multiprocessing.cpu_count()))
if layout[i] != None:
if isinstance(bounds[i][0],float):
logger.info('{}: Getting percentile bounds {}...'.format(Path(bioreaders[-1]._file_path).name,
bounds[i]))
threads.append(executor.submit(get_bounds,bioreaders[-1],bounds[i][0],bounds[i][1]))
elif isinstance(bounds[i][0],int):
bioreaders[-1].bounds = bounds[i]
else:
bioreaders[-1].bounds = [0,np.iinfo(bioreaders[-1].read_metadata().image().Pixels.get_PixelType()).max]
for i in reversed(range(len(layout))):
if isinstance(bounds[i][0],int):
logger.info('Color {}: {} (rescaling to {})'.format(COLORS[i],
Path(Path(bioreaders[i]._file_path).name).name,
bioreaders[i].bounds))
continue
if layout[i] == None:
continue
bioreaders[i].bounds = threads.pop().result()
logger.info('Color {}: {} (rescaling to {})'.format(COLORS[i],
Path(Path(bioreaders[i]._file_path).name).name,
bioreaders[i].bounds))
for br in bioreaders:
if br != None:
br_meta = br
file_info = utils.dzi_file(br_meta,outDirFrame,0)
encoder = utils.DeepZoomChunkEncoder(file_info)
file_writer = utils.DeepZoomWriter(outDirFrame)
utils._get_higher_res(0,bioreaders,file_writer,encoder)
| [
6738,
275,
69,
952,
1330,
16024,
33634,
198,
11748,
1822,
29572,
11,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
2393,
33279,
11,
18540,
305,
919,
278,
11,
3384,
4487,
198,
6738,
24580,
13,... | 2.127328 | 3,919 |
from kinetics.reaction_classes.reaction_base_class import Reaction | [
6738,
18967,
14596,
13,
260,
2673,
62,
37724,
13,
260,
2673,
62,
8692,
62,
4871,
1330,
39912
] | 3.882353 | 17 |
from ctypes import *
from migi.decorators import stdcall
_native_message_box_w.intercept()
| [
6738,
269,
19199,
1330,
1635,
198,
198,
6738,
285,
25754,
13,
12501,
273,
2024,
1330,
14367,
13345,
628,
628,
198,
198,
62,
30191,
62,
20500,
62,
3524,
62,
86,
13,
3849,
984,
3419,
198
] | 2.852941 | 34 |
from adapters.adapter_with_battery import AdapterWithBattery
from devices.switch.selector_switch import SelectorSwitch
| [
6738,
46363,
13,
324,
3429,
62,
4480,
62,
65,
16296,
1330,
43721,
3152,
47006,
198,
6738,
4410,
13,
31943,
13,
19738,
273,
62,
31943,
1330,
9683,
273,
38978,
628,
198
] | 4.033333 | 30 |
# -*- coding: utf-8 -*-
"""
"""
import scrapy
from tennis_model.tennis_model_scraper.tennis_model_scraper import items
if __name__ == '__main__':
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
198,
37811,
198,
198,
11748,
15881,
88,
198,
198,
6738,
20790,
62,
19849,
13,
1452,
21361,
62,
19849,
62,
1416,
38545,
13,
1452,
21361,
62,
19849,
62,
... | 2.439394 | 66 |
from aoc20211219a import *
| [
6738,
257,
420,
1238,
2481,
1065,
1129,
64,
1330,
1635,
628
] | 2.545455 | 11 |
import json
import requests
from .exceptions import (
RequestsError,
RequestsTimeoutError,
RPCError
)
_default_endpoint = 'http://localhost:9500'
_default_timeout = 30
def base_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Basic RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
Raw output from the request
Raises
------
TypeError
If params is not a list or None
RequestsTimeoutError
If request timed out
RequestsError
If other request error occured
"""
if params is None:
params = []
elif not isinstance(params, list):
raise TypeError(f'invalid type {params.__class__}')
try:
payload = {
"id": "1",
"jsonrpc": "2.0",
"method": method,
"params": params
}
headers = {
'Content-Type': 'application/json'
}
resp = requests.request('POST', endpoint, headers=headers, data=json.dumps(payload),
timeout=timeout, allow_redirects=True)
return resp.content
except requests.exceptions.Timeout as err:
raise RequestsTimeoutError(endpoint) from err
except requests.exceptions.RequestException as err:
raise RequestsError(endpoint) from err
def rpc_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
Returns dictionary representation of RPC response
Example format:
{
"jsonrpc": "2.0",
"id": 1,
"result": ...
}
Raises
------
RPCError
If RPC response returned a blockchain error
See Also
--------
base_request
"""
raw_resp = base_request(method, params, endpoint, timeout)
try:
resp = json.loads(raw_resp)
if 'error' in resp:
raise RPCError(method, endpoint, str(resp['error']))
return resp
except json.decoder.JSONDecodeError as err:
raise RPCError(method, endpoint, raw_resp) from err
# TODO: Add GET requests
| [
11748,
33918,
198,
198,
11748,
7007,
198,
198,
6738,
764,
1069,
11755,
1330,
357,
198,
220,
220,
220,
9394,
3558,
12331,
11,
198,
220,
220,
220,
9394,
3558,
48031,
12331,
11,
198,
220,
220,
220,
25812,
5222,
81,
1472,
198,
8,
628,
1... | 2.394231 | 1,144 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import socket
import threading
import rospy
from publisher import *
import cv2
import imagezmq
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
# setup socket to python3 video streamer
# Helper class implementing an IO daemon thread for imgzmq recv
# Receive from broadcast
# There are 2 hostname styles; comment out the one you don't need
hostname = "127.0.0.1" # Use to receive from localhost
# hostname = "192.168.86.38" # Use to receive from other computer
if __name__ == '__main__':
try:
# parser = argparse.ArgumentParser()
# parser.add_argument('--gp', action="store_true", help="Option to publish gaze position (2D) data")
# args = parser.parse_args(rospy.myargv()[1:])
'''
Initiate the Video Stream Subscription over Image ZMQ
'''
imgzmq_port = 5555
hostname = "/tmp/tobiiVid"; imgzmq_port = 0
receiver = VideoStreamSubscriber(hostname, imgzmq_port)
'''
Create publisher
'''
# Default publish the 3D gaze position data
vidpub = rospy.Publisher("tobii_video", Image, queue_size=10)
bridge = CvBridge()
rospy.init_node('tobii_image_sender', anonymous=True)
while not rospy.is_shutdown():
# get from py3
sent_msg_string, frame = receiver.receive()
image = cv2.imdecode(np.frombuffer(frame, dtype='uint8'), -1)
image = np.frombuffer(frame, dtype='uint8')
image = image.reshape(1080, 1920, 3)
print(image.shape, sent_msg_string)
# Parse sent message to convert to ros formats
frametime, counter = parse_sent_msg(sent_msg_string)
# publish to ROS
im_ros = bridge.cv2_to_imgmsg(image, "bgr8")
im_ros.header.stamp = rospy.Time.from_sec(frametime)
im_ros.header.frame_id = str(counter)
vidpub.publish(im_ros)
except (rospy.ROSInterruptException, KeyboardInterrupt, SystemExit):
sys.exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
640,
198,
11748,
17802,
198,
11748,
4704,
278,
198,
11748,
686,
2777,
88,
198,
6738,
9991,
1330,
1635,
198,... | 2.324561 | 912 |
import sys
import numpy
import numpy as np
from snappy import Product
from snappy import ProductData
from snappy import ProductIO
from snappy import ProductUtils
from snappy import FlagCoding
##############
import csv
###############MSVR
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
########################
if len(sys.argv) != 2:
print("usage: %s <file>" % sys.argv[0])
sys.exit(1)
file = sys.argv[1]
print("Reading...")
product = ProductIO.readProduct(file)
width = product.getSceneRasterWidth()
height = product.getSceneRasterHeight()
name = product.getName()
description = product.getDescription()
band_names = product.getBandNames()
print("Product: %s, %s" % (name, description))
print("Raster size: %d x %d pixels" % (width, height))
print("Start time: " + str(product.getStartTime()))
print("End time: " + str(product.getEndTime()))
print("Bands: %s" % (list(band_names)))
##---------------------------------------------------------------------------------
with open('rice_LUT.csv','r') as dest_f:
data_iter = csv.reader(dest_f,
delimiter = ',',
quotechar = '"')
data = [data for data in data_iter]
data_array = np.asarray(data, dtype = np.float32)
VV = data_array[:,1]
VH = data_array[:,2]
PAI = data_array[:,0]
X=np.column_stack((VV,VH))
Y = PAI
#SVR training
pipeline = make_pipeline(StandardScaler(),
SVR(kernel='rbf', epsilon=0.105, C=250, gamma = 2.8),
)
SVRmodel=pipeline.fit(X,Y)
# Predictfor validation data
valX = X;
y_out = pipeline.predict(valX);
##---------------------------------------------------------------------------------
bandc11 = product.getBand('C11')
bandc22 = product.getBand('C22')
laiProduct = Product('LAI', 'LAI', width, height)
laiBand = laiProduct.addBand('lai', ProductData.TYPE_FLOAT32)
laiFlagsBand = laiProduct.addBand('lai_flags', ProductData.TYPE_UINT8)
writer = ProductIO.getProductWriter('BEAM-DIMAP')
ProductUtils.copyGeoCoding(product, laiProduct)
ProductUtils.copyMetadata(product, laiProduct)
ProductUtils.copyTiePointGrids(product, laiProduct)
laiFlagCoding = FlagCoding('lai_flags')
laiFlagCoding.addFlag("LAI_LOW", 1, "LAI below 0")
laiFlagCoding.addFlag("LAI_HIGH", 2, "LAI above 5")
group = laiProduct.getFlagCodingGroup()
#print(dir(group))
group.add(laiFlagCoding)
laiFlagsBand.setSampleCoding(laiFlagCoding)
laiProduct.setProductWriter(writer)
laiProduct.writeHeader('LAImap_output.dim')
c11 = numpy.zeros(width, dtype=numpy.float32)
c22 = numpy.zeros(width, dtype=numpy.float32)
print("Writing...")
for y in range(height):
print("processing line ", y, " of ", height)
c11 = bandc11.readPixels(0, y, width, 1, c11)
c22 = bandc22.readPixels(0, y, width, 1, c22)
Z=np.column_stack((c11,c22))
#ndvi = (r10 - r7) / (r10 + r7)
lai = pipeline.predict(Z);
laiBand.writePixels(0, y, width, 1, lai)
laiLow = lai < 0.0
laiHigh = lai > 5.0
laiFlags = numpy.array(laiLow + 2 * laiHigh, dtype=numpy.int32)
laiFlagsBand.writePixels(0, y, width, 1, laiFlags)
laiProduct.closeIO()
print("Done.") | [
11748,
25064,
198,
198,
11748,
299,
32152,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3013,
7774,
1330,
8721,
198,
6738,
3013,
7774,
1330,
8721,
6601,
198,
6738,
3013,
7774,
1330,
8721,
9399,
198,
6738,
3013,
7774,
1330,
8721,
18274,
... | 2.551364 | 1,246 |
import fiona
d = {
"type": "Feature",
"id": "0",
"properties": {
"ADMINFORES": "99081600010343",
"REGION": "08",
"FORESTNUMB": "16",
"FORESTORGC": "0816",
"FORESTNAME": "El Yunque National Forest",
"GIS_ACRES": 55829.81,
"SHAPE_AREA": 0.0193062316937,
"SHAPE_LEN": 0.754287568301,
},
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[
[
[-65.73293016000002, 18.33284838999998],
[-65.73293445000002, 18.331367639999996],
[-65.73189660000003, 18.331369719999998],
[-65.73040952000002, 18.33137273],
[-65.72620770999998, 18.33138113000001],
[-65.72303074000001, 18.331387389999975],
[-65.71763471000003, 18.331393549999973],
[-65.71717587, 18.331394069999988],
[-65.71297922999997, 18.331403290000026],
[-65.71248787000002, 18.33140437999998],
[-65.70898332000002, 18.33141236],
[-65.70846269999998, 18.331413540000028],
[-65.70470655999998, 18.331422009999983],
[-65.70340513999997, 18.33142491000001],
[-65.70268779000003, 18.331419400000016],
[-65.70098910000002, 18.33140635000001],
[-65.69978839999999, 18.33139711000001],
[-65.69977925, 18.32948927000001],
[-65.69976860000003, 18.32723274],
[-65.69976336000002, 18.326155840000013],
[-65.69975882, 18.32519180999998],
[-65.69975420999998, 18.324281380000002],
[-65.69975116, 18.323670390000018],
[-65.69974878, 18.323214399999983],
[-65.69972460999998, 18.317907339999977],
[-65.69972661000003, 18.31559458999999],
[-65.69972832000002, 18.314692869999988],
[-65.69972934999998, 18.312400700000012],
[-65.69973214999999, 18.309193600000015],
[-65.69973189000001, 18.308128119999992],
[-65.69971594999998, 18.304170699999986],
[-65.69971009, 18.302713270000027],
[-65.69969680999998, 18.29942688],
[-65.69968705999997, 18.297028839999996],
[-65.69968439000002, 18.294420890000026],
[-65.69968401, 18.294158770000024],
[-65.69968397000002, 18.29406161000003],
[-65.69968146999997, 18.29031968999999],
[-65.69967542, 18.286261500000023],
[-65.6996757, 18.286123120000013],
[-65.69967338999999, 18.284205750000012],
[-65.69967251000003, 18.283497660000023],
[-65.69967014000002, 18.281735219999973],
[-65.69967000000003, 18.28134633000002],
[-65.69994827, 18.28134559],
[-65.70099542999998, 18.28134276999998],
[-65.70358926, 18.28133575999999],
[-65.70616948000003, 18.281328770000016],
[-65.70911901, 18.28132070999999],
[-65.70971071999998, 18.28131909000001],
[-65.71624101999998, 18.28131652000002],
[-65.71624542, 18.276418089999993],
[-65.71624548, 18.27636744],
[-65.71624578000001, 18.275968209999974],
[-65.71624845000002, 18.27300660999998],
[-65.71624307000002, 18.271180739999977],
[-65.71623899999997, 18.26979332000002],
[-65.71623254999997, 18.267581380000024],
[-65.71623254999997, 18.267578500000013],
[-65.71623402, 18.267040029999976],
[-65.71623762000002, 18.265657929999975],
[-65.71623955000001, 18.26496930000002],
[-65.71624981999997, 18.260115170000006],
[-65.71625891999997, 18.257678180000028],
[-65.71625689000001, 18.25766888999999],
[-65.71628033000002, 18.252014929999973],
[-65.71628700000002, 18.250603020000028],
[-65.71629617000002, 18.248364939999988],
[-65.71629643, 18.248011659999975],
[-65.71974196999997, 18.248007089999987],
[-65.72038055000002, 18.24800706000002],
[-65.72076942000001, 18.24800829999998],
[-65.72464429000001, 18.248011910000002],
[-65.72465315, 18.248011519999977],
[-65.72509256000001, 18.24801222000002],
[-65.72707300000002, 18.24801083],
[-65.73231042999998, 18.2480104],
[-65.73397174000002, 18.248009190000005],
[-65.73705114, 18.248008589999984],
[-65.73750502000001, 18.248008190000007],
[-65.73889711999999, 18.24800842000002],
[-65.73978022, 18.248008830000003],
[-65.74408667, 18.248010669999985],
[-65.74502591999999, 18.248009980000006],
[-65.74623288999999, 18.248009120000006],
[-65.74772324000003, 18.248009149999973],
[-65.74924592000002, 18.248014580000017],
[-65.74961603999998, 18.248013990000004],
[-65.74961524000003, 18.244120570000007],
[-65.74961268999999, 18.243257019999987],
[-65.74961502999997, 18.235669789999974],
[-65.74961267999998, 18.235211540000023],
[-65.74961048, 18.234789499999977],
[-65.74961128000001, 18.231243000000006],
[-65.75090724, 18.231235679999998],
[-65.75247086000002, 18.231236500000023],
[-65.75309636999998, 18.231236850000016],
[-65.75896512000003, 18.231239829999993],
[-65.76053288000003, 18.231240590000027],
[-65.76145975999998, 18.231241049999994],
[-65.76266423999999, 18.23124161999999],
[-65.76402088999998, 18.231242259999988],
[-65.76422652999997, 18.231242339999994],
[-65.76459129, 18.231242520000023],
[-65.76506522, 18.231243529999972],
[-65.76575971, 18.231245],
[-65.77265518000002, 18.231259480000006],
[-65.77609515, 18.23126751000001],
[-65.77853763000002, 18.231273129999977],
[-65.78301661, 18.231283440000027],
[-65.78536026, 18.231288749999976],
[-65.78565572000002, 18.231289430000004],
[-65.78587555000001, 18.23129019999999],
[-65.78745778000001, 18.23129352000001],
[-65.79147775000001, 18.231303949999983],
[-65.80175496999999, 18.23133021000001],
[-65.80328739999999, 18.23133408000001],
[-65.80925552999997, 18.23135074999999],
[-65.81185003000002, 18.231357919999994],
[-65.81302187, 18.231352949999973],
[-65.81574820999998, 18.23134140000002],
[-65.81705820000002, 18.231335829999978],
[-65.81733358000002, 18.231334670000024],
[-65.82028713, 18.231322050000017],
[-65.82052381, 18.23132104000001],
[-65.82337763999999, 18.23130882999999],
[-65.82649563000001, 18.231295439999997],
[-65.82811142999998, 18.231288459999973],
[-65.83293057999998, 18.23127384999998],
[-65.83292964999998, 18.231761140000003],
[-65.83293025, 18.234220730000004],
[-65.83292996, 18.23624890000002],
[-65.83292955000002, 18.239821380000024],
[-65.83292905000002, 18.244286690000024],
[-65.83292845, 18.244807849999972],
[-65.83292886999999, 18.245117160000007],
[-65.83292883000001, 18.24573097000001],
[-65.83292870999998, 18.247063589999982],
[-65.83292857999999, 18.248008060000018],
[-65.83315374, 18.248008760000005],
[-65.83325909000001, 18.248009089999982],
[-65.83590992, 18.248030509999978],
[-65.84442614, 18.248036909999996],
[-65.84617400000002, 18.248038199999996],
[-65.84807433999998, 18.24803958000001],
[-65.84813063000001, 18.248039609999978],
[-65.84903366999998, 18.248040240000023],
[-65.85197088000001, 18.24804229],
[-65.85535651999999, 18.24804193],
[-65.85613706999999, 18.248041839999985],
[-65.85719701, 18.248041699999987],
[-65.8638446, 18.24804075999998],
[-65.86544515000003, 18.24804051000001],
[-65.87069150999997, 18.248039570000003],
[-65.87385301, 18.248038310000027],
[-65.87461352999998, 18.248020329999974],
[-65.87817146999998, 18.248007959999995],
[-65.88441703000001, 18.24800984000001],
[-65.89088908999997, 18.248012580000022],
[-65.89899125, 18.248013500000013],
[-65.89925985999997, 18.24801395999998],
[-65.90513017, 18.248014790000013],
[-65.90874113000001, 18.248012710000012],
[-65.91595359000002, 18.248011819999988],
[-65.91629429, 18.248011819999988],
[-65.9162887, 18.250010359999976],
[-65.9162852, 18.25164811000002],
[-65.91628292000001, 18.25191947000002],
[-65.91627997, 18.253774229999976],
[-65.91627848000002, 18.25477933000002],
[-65.91627578999999, 18.255991100000017],
[-65.91626445999998, 18.261137089999977],
[-65.91625448000002, 18.26512563],
[-65.91625524, 18.26536785000002],
[-65.91625922999998, 18.266019389999997],
[-65.91632637999999, 18.266198929999973],
[-65.91632625, 18.266542049999998],
[-65.91631202000002, 18.267959780000012],
[-65.91631167000003, 18.267977850000022],
[-65.91630744000003, 18.268755800000008],
[-65.91630715999997, 18.268808560000025],
[-65.91625932, 18.270663520000028],
[-65.91625911, 18.270671989999983],
[-65.91625876, 18.270887870000024],
[-65.91625875, 18.27455298000001],
[-65.91625871999997, 18.274613149999993],
[-65.91625811, 18.279979179999998],
[-65.91626000000002, 18.280340190000004],
[-65.91625800000003, 18.281121770000027],
[-65.91625804, 18.281356930000015],
[-65.91618933000001, 18.281356570000014],
[-65.91500064000002, 18.281350369999984],
[-65.91296770999998, 18.281339800000012],
[-65.91253340999998, 18.281337529999973],
[-65.91229578999997, 18.281336280000005],
[-65.90998387000002, 18.281324219999988],
[-65.90871597, 18.281318759999976],
[-65.90216367, 18.28129032999999],
[-65.90111256, 18.281285760000003],
[-65.89913740999998, 18.28127711000002],
[-65.89885119000002, 18.28127286],
[-65.89237293000002, 18.281247450000023],
[-65.89048616000002, 18.281239140000025],
[-65.88711766, 18.28122424999998],
[-65.88599235999999, 18.281219249999992],
[-65.88291291000002, 18.28120555999999],
[-65.88291178999998, 18.28584490999998],
[-65.88291048999997, 18.291010749999998],
[-65.88290905000002, 18.29165870999998],
[-65.88291565999998, 18.302684020000015],
[-65.88291612, 18.303763930000002],
[-65.88291874999999, 18.31314200999998],
[-65.88292098, 18.314737100000002],
[-65.88292178, 18.316319510000028],
[-65.88292336, 18.320099939999977],
[-65.88292583999998, 18.325711160000026],
[-65.88292658, 18.32707603],
[-65.88292819999998, 18.330798640000012],
[-65.88292837, 18.331260059999977],
[-65.88087401000001, 18.331255440000007],
[-65.87894735999998, 18.331251090000023],
[-65.87603802000001, 18.33124448000001],
[-65.87461601000001, 18.33124122999999],
[-65.86804993999999, 18.331420340000022],
[-65.86763531000003, 18.331420009999988],
[-65.86672666999999, 18.33141931],
[-65.86648867999997, 18.331419100000005],
[-65.86635653000002, 18.331419170000004],
[-65.86273363999999, 18.331421009999985],
[-65.85793086000001, 18.331423389999998],
[-65.85789242999999, 18.33142171999998],
[-65.85542400000003, 18.331424019999986],
[-65.85350249999999, 18.331425749999994],
[-65.84982063000001, 18.33142908000002],
[-65.84969439000002, 18.331429189999994],
[-65.84969428, 18.331550279999988],
[-65.84969804000002, 18.33796344000001],
[-65.84969840999997, 18.338737999999978],
[-65.8497021, 18.345083629999976],
[-65.84970268000001, 18.346151969999994],
[-65.84970370000002, 18.34806388999999],
[-65.84281220000003, 18.348051429999998],
[-65.83631126, 18.348039400000005],
[-65.83572038, 18.348038309999993],
[-65.82972193, 18.348027020000018],
[-65.82915395999999, 18.348025940000014],
[-65.82799924, 18.34802375999999],
[-65.82479099, 18.34801637999999],
[-65.82399432, 18.34801453],
[-65.82321229000001, 18.348012719999986],
[-65.82141923, 18.348008540000023],
[-65.82131368, 18.34800831000001],
[-65.81955477000002, 18.348004189999983],
[-65.81593006999998, 18.347995690000005],
[-65.81524768000003, 18.347994099999994],
[-65.81430688, 18.347991850000028],
[-65.81409592, 18.34799134000002],
[-65.81219464999998, 18.347986839999976],
[-65.81037927, 18.347982520000016],
[-65.80875237999999, 18.347978650000016],
[-65.80848982999998, 18.34797801000002],
[-65.80829098999999, 18.347977609999987],
[-65.80772302000003, 18.347976930000016],
[-65.80733909999998, 18.34797567999999],
[-65.80353065000003, 18.347967859999983],
[-65.80071562, 18.347962040000027],
[-65.79902959999998, 18.34795853999998],
[-65.79798546, 18.34795637000002],
[-65.79009180999998, 18.347941110000022],
[-65.78932427000001, 18.347939639999993],
[-65.78840032, 18.347937820000027],
[-65.78753816, 18.347936129999994],
[-65.78601164000003, 18.347933119999993],
[-65.78038322999998, 18.347921919999976],
[-65.77934201, 18.347919479999973],
[-65.77871169000002, 18.347918520000007],
[-65.77776547000002, 18.347916520000012],
[-65.77676473999998, 18.347914670000023],
[-65.77662666999998, 18.347914370000012],
[-65.77532722000001, 18.347911739999972],
[-65.77499889, 18.347911039999985],
[-65.77385053, 18.347908700000005],
[-65.77354066999999, 18.34790806000001],
[-65.76955748, 18.347899840000025],
[-65.76888499, 18.347898439999994],
[-65.76835487, 18.347897349999982],
[-65.76683013000002, 18.34789416000001],
[-65.76222604999998, 18.347884490000013],
[-65.75909141, 18.347877840000024],
[-65.75869390000003, 18.347874339999976],
[-65.75078702000002, 18.34780397999998],
[-65.74961532999998, 18.347793539999998],
[-65.74804139999998, 18.347743690000016],
[-65.74783091, 18.347737010000003],
[-65.74728348000002, 18.347736259999976],
[-65.74297489999998, 18.347730169999977],
[-65.74044021999998, 18.347710549999988],
[-65.73974084000002, 18.347705140000016],
[-65.73561567000002, 18.34767314999999],
[-65.73484725999998, 18.347665380000024],
[-65.73302854000002, 18.347646950000012],
[-65.73294028999999, 18.347646069999996],
[-65.73293561999998, 18.346632310000018],
[-65.73292482, 18.344269059999988],
[-65.73292071999998, 18.343373789999987],
[-65.73291719000002, 18.34259155000001],
[-65.73290365999998, 18.339655180000022],
[-65.73291784000003, 18.337885169999993],
[-65.73292518, 18.334980180000002],
[-65.73292579000002, 18.334753429999978],
[-65.73293016000002, 18.33284838999998],
]
],
[
[
[-66.16262245000001, 18.051031109999997],
[-66.16184043999999, 18.049737929999992],
[-66.1619091, 18.04731941],
[-66.16514587, 18.04502678],
[-66.16511536000002, 18.044198989999984],
[-66.16511725999999, 18.043462750000003],
[-66.16511725999999, 18.043279649999988],
[-66.16594887000002, 18.04355812],
[-66.16832161000002, 18.041448590000016],
[-66.16813087000003, 18.040346150000005],
[-66.16640091, 18.04031180999999],
[-66.16698073999999, 18.03862952999998],
[-66.16720580999998, 18.037527080000018],
[-66.16765975999999, 18.033853529999988],
[-66.16861915999999, 18.034097669999994],
[-66.16942024000002, 18.033731460000013],
[-66.16954613000001, 18.03507804999998],
[-66.16970443999998, 18.036489490000008],
[-66.16989517000002, 18.037008290000017],
[-66.17005347999998, 18.038480760000027],
[-66.17072487000002, 18.03927802999999],
[-66.17091750999998, 18.039522169999998],
[-66.17117309999998, 18.039552689999994],
[-66.17162131999999, 18.039552689999994],
[-66.17216492, 18.039308549999987],
[-66.17245293000002, 18.039155960000016],
[-66.17293358, 18.039094920000025],
[-66.17320251000001, 18.039094920000025],
[-66.17344666000002, 18.039094920000025],
[-66.17376709000001, 18.03928185000001],
[-66.17305756000002, 18.042036059999987],
[-66.17280005999999, 18.04304695000002],
[-66.17234993, 18.044912339999996],
[-66.17170142999998, 18.050027849999992],
[-66.17182922, 18.050394059999974],
[-66.17035484000002, 18.051618580000024],
[-66.16718483, 18.05198096999999],
[-66.16692733999997, 18.051458360000026],
[-66.16661072, 18.050817489999986],
[-66.16660117999999, 18.050874710000016],
[-66.16659355000002, 18.05092811999998],
[-66.16641808000003, 18.052057269999978],
[-66.16641426000001, 18.052072529999975],
[-66.16576958000002, 18.05623436000002],
[-66.16262245000001, 18.051031109999997],
]
],
[
[
[-66.53508758999999, 18.392507550000005],
[-66.53519820999998, 18.391786579999973],
[-66.53970336999998, 18.392427440000006],
[-66.53828812, 18.397306440000023],
[-66.53822708000001, 18.39755821],
[-66.53777313, 18.398542399999997],
[-66.53761481999999, 18.400304790000007],
[-66.53463554000001, 18.40027046],
[-66.53440475000002, 18.399271010000007],
[-66.53497124, 18.39718819000001],
[-66.53505897999997, 18.396612170000026],
[-66.53450774999999, 18.395158770000023],
[-66.53466796999999, 18.394887919999974],
[-66.53466796999999, 18.39454841999998],
[-66.53477286999998, 18.394208909999975],
[-66.53480911000003, 18.393922809999992],
[-66.53482628, 18.39348030000002],
[-66.5349865, 18.393175129999975],
[-66.53508758999999, 18.392507550000005],
]
],
],
},
}
from shapely.geometry import shape
print(shape(d["geometry"]))
| [
11748,
277,
32792,
198,
198,
67,
796,
1391,
198,
220,
220,
220,
366,
4906,
1298,
366,
38816,
1600,
198,
220,
220,
220,
366,
312,
1298,
366,
15,
1600,
198,
220,
220,
220,
366,
48310,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
2... | 1.58383 | 14,583 |
from __future__ import print_function
import argparse, sys
from .utils import is_textfile
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1822,
29572,
11,
25064,
198,
6738,
764,
26791,
1330,
318,
62,
5239,
7753,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
25064,
13,
37... | 2.796296 | 54 |
if __name__ == "__main__":
print("Nothing yet...")
| [
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7203,
18465,
1865,
9313,
8,
198
] | 2.619048 | 21 |
import base64
from sqlalchemy import Column, String, LargeBinary
from sqlalchemy.orm import relationship
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.exc import NoResultFound
from vantage6.common.globals import STRING_ENCODING
from .base import Base, Database
| [
11748,
2779,
2414,
198,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
10903,
11,
13601,
33,
3219,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2776,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
12114,
10236,
1330,
14554,
62,
26745,
198,... | 3.567901 | 81 |
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
check = EKSSecretsEncryption()
| [
6738,
2198,
709,
13,
11321,
13,
27530,
13,
268,
5700,
1330,
6822,
23004,
11,
6822,
34,
26129,
198,
6738,
2198,
709,
13,
353,
430,
687,
13,
42116,
13,
31092,
13,
8692,
62,
31092,
62,
9122,
1330,
7308,
26198,
9787,
628,
198,
198,
9122... | 3.528302 | 53 |
""" AnalysisNode is the base class that all analysis nodes inherit from. """
import logging
import operator
from functools import reduce
from random import random
from time import time
from typing import Tuple, Sequence, List, Dict, Optional
from celery.canvas import Signature
from django.conf import settings
from django.core.cache import cache
from django.db import connection, models
from django.db.models import Value, IntegerField
from django.db.models.aggregates import Count
from django.db.models.deletion import CASCADE, SET_NULL
from django.db.models.query_utils import Q
from django.dispatch import receiver
from django.utils import timezone
from django_dag.models import node_factory, edge_factory
from django_extensions.db.models import TimeStampedModel
from lazy import lazy
from model_utils.managers import InheritanceManager
from analysis.exceptions import NonFatalNodeError, NodeParentErrorsException, NodeConfigurationException, \
NodeParentNotReadyException, NodeNotFoundException, NodeOutOfDateException
from analysis.models.enums import GroupOperation, NodeStatus, NodeColors, NodeErrorSource, AnalysisTemplateType
from analysis.models.models_analysis import Analysis
from analysis.models.nodes.node_counts import get_extra_filters_q, get_node_counts_and_labels_dict
from annotation.annotation_version_querysets import get_variant_queryset_for_annotation_version
from classification.models import Classification, post_delete
from library.database_utils import queryset_to_sql
from library.django_utils import thread_safe_unique_together_get_or_create
from library.log_utils import report_event
from library.utils import format_percent
from snpdb.models import BuiltInFilters, Sample, Variant, VCFFilter, Wiki, Cohort, VariantCollection, \
ProcessingStatus, GenomeBuild, AlleleSource
from snpdb.variant_collection import write_sql_to_variant_collection
from variantgrid.celery import app
def get_q(self, disable_cache=False):
""" A Django Q object representing the Variant filters for this node.
This is the method to override in subclasses - not get_queryset() as:
Chains of filters to a reverse foreign key relationship causes
Multiple joins, so use Q objects which are combined at the end
qs = qs.filter(table_1__val=1)
qs = qs.filter(table_2__val=2)
This is not necessarily equal to:
qs.filter(table_1__val=1, table_2__val=2)
@see https://docs.djangoproject.com/en/2/topics/db/queries/#spanning-multi-valued-relationships
"""
# We need this for node counts, and doing a grid query (each page) - and it can take a few secs to generate
# for some nodes (Comp HET / pheno) so cache it
cache_key = self._get_cache_key() + f"q_cache={disable_cache}"
q: Optional[Q] = None
if settings.ANALYSIS_NODE_CACHE_Q: # Disable for unit tests
q = cache.get(cache_key)
if q is None:
if disable_cache is False:
if cache_q := self._get_node_cache_q():
return cache_q
if self.has_input():
q = self.get_parent_q()
if self.modifies_parents():
if node_q := self._get_node_q():
q &= node_q
else:
q = self.q_all()
if node_q := self._get_node_q():
q = node_q
cache.set(cache_key, q)
return q
def _get_node_cache_q(self) -> Optional[Q]:
q = None
if self.node_cache:
q = self.node_cache.variant_collection.get_q()
return q
def _get_node_q(self) -> Optional[Q]:
raise NotImplementedError()
def _get_unfiltered_queryset(self, **extra_annotation_kwargs):
""" Unfiltered means before the get_q() is applied
extra_annotation_kwargs is applied AFTER node's annotation kwargs
"""
qs = self._get_model_queryset()
a_kwargs = self.get_annotation_kwargs()
a_kwargs.update(extra_annotation_kwargs)
if a_kwargs:
# Clear ordering, @see
# https://docs.djangoproject.com/en/3.0/topics/db/aggregation/#interaction-with-default-ordering-or-order-by
qs = qs.annotate(**a_kwargs).order_by()
return qs
def get_queryset(self, extra_filters_q=None, extra_annotation_kwargs=None,
inner_query_distinct=False, disable_cache=False):
if extra_annotation_kwargs is None:
extra_annotation_kwargs = {}
qs = self._get_unfiltered_queryset(**extra_annotation_kwargs)
q = self.get_q(disable_cache=disable_cache)
if extra_filters_q:
q &= extra_filters_q
filtered_qs = qs.filter(q)
if self.queryset_requires_distinct:
if inner_query_distinct:
qs = qs.filter(pk__in=filtered_qs.values_list("pk", flat=True))
else:
qs = filtered_qs.distinct()
else:
qs = filtered_qs
return qs
def get_extra_grid_config(self):
return {}
def get_class_name(self):
return self.__class__.__name__
def get_identifier(self):
return f"{self.get_class_name()}-{self.pk}"
def get_css_classes(self):
""" returns list of css classes - set on "node > .node-overlay" on node appearance update """
css_classes = []
if self.output_node:
css_classes.append("output-node")
if self.analysis.template_type == AnalysisTemplateType.TEMPLATE and self.analysisvariable_set.exists():
css_classes.append("variable-node")
return css_classes
def get_input_count(self):
parents = self.get_non_empty_parents()
return sum([p.get_output_count() for p in parents])
def get_output_count(self):
# TODO: Move the if not modify parents code in here.
if self.count is not None:
return self.count
count = self.get_queryset().count()
self.count = count
self.save()
return count
def _get_method_summary(self):
raise NotImplementedError()
def get_method_summary(self):
errors = self.get_errors(flat=True)
if not errors:
html_summary = self._get_method_summary()
else:
html_summary = "<b>incorrectly configured</b><ul>"
for error in errors:
html_summary += f"<li>{error}</li>"
html_summary += "</ul>"
return html_summary
def get_node_name(self):
""" Automatic node name """
raise NotImplementedError(f"Node Class: {self.get_class_name()}")
def _get_genome_build_errors(self, field_name, field_genome_build: GenomeBuild) -> List:
""" Used to quickly add errors about genome build mismatches
This only happens in templates (ran template on sample with different build than hardcoded data)
In normal analyses, autocomplete restrictions should not allow you to configure data from other builds """
errors = []
if field_genome_build != self.analysis.genome_build:
msg = f"{field_name} genome build: {field_genome_build} different from analysis build: {self.analysis.genome_build}"
errors.append(msg)
return errors
def _get_configuration_errors(self) -> List:
return []
def get_parents_and_errors(self):
""" Returns error array, includes any min/max parent error and node config error """
if self.has_input():
return self.get_parent_subclasses_and_errors()
return [], []
def get_errors(self, include_parent_errors=True, flat=False):
""" returns a tuple of (NodeError, str) unless flat=True where it's only string """
errors = []
for analysis_error in self.analysis.get_errors():
errors.append((NodeErrorSource.ANALYSIS, analysis_error))
_, parent_errors = self.get_parents_and_errors()
if include_parent_errors:
errors.extend(parent_errors)
if self.errors:
errors.append((NodeErrorSource.INTERNAL_ERROR, self.errors))
errors.extend((NodeErrorSource.CONFIGURATION, ce) for ce in self._get_configuration_errors())
if flat:
errors = AnalysisNode.flatten_errors(errors)
return errors
def inherits_parent_columns(self):
return self.min_inputs == 1 and self.max_inputs == 1
def _get_node_extra_columns(self):
return []
def _get_inherited_columns(self):
extra_columns = []
if self.inherits_parent_columns():
parent = self.get_single_parent()
extra_columns.extend(parent.get_extra_columns())
return extra_columns
def get_extra_columns(self):
cache_key = self._get_cache_key() + "_extra_columns"
extra_columns = cache.get(cache_key)
if extra_columns is None:
extra_columns = []
if self.is_valid():
extra_columns.extend(self._get_inherited_columns())
# Only add columns that are unique, as otherwise filters get added twice.
node_extra_columns = self._get_node_extra_columns()
for col in node_extra_columns:
if col not in extra_columns:
extra_columns.append(col)
cache.set(cache_key, extra_columns)
return extra_columns
def _get_node_extra_colmodel_overrides(self):
""" Subclasses should override to add colmodel overrides for JQGrid """
return {}
def _get_inherited_colmodel_overrides(self):
extra_overrides = {}
if self.inherits_parent_columns():
parent = self.get_single_parent()
extra_overrides.update(parent.get_extra_colmodel_overrides())
return extra_overrides
def get_extra_colmodel_overrides(self):
""" For JQGrid - subclasses should override _get_node_extra_colmodel_overrides """
extra_overrides = {}
if self.is_valid() and self.uses_parent_queryset:
extra_overrides.update(self._get_inherited_colmodel_overrides())
extra_overrides.update(self._get_node_extra_colmodel_overrides())
return extra_overrides
def get_node_classification(self):
if self.is_source():
classification = "source"
else:
classification = "filter"
return classification
def has_input(self):
return self.max_inputs != 0
def is_source(self):
return self.has_input() is False
def is_valid(self):
return not self.get_errors()
def is_ready(self):
return NodeStatus.is_ready(self.status)
def bump_version(self):
if self.version > 0:
DELETE_CACHE_TASK = "analysis.tasks.node_update_tasks.delete_old_node_versions"
app.send_task(DELETE_CACHE_TASK, args=(self.pk, self.version))
self.version += 1
self.status = NodeStatus.DIRTY
self.count = None
self.errors = None
self.cloned_from = None
def modifies_parents(self):
""" Can overwrite and set to False to use parent counts """
return True
def get_unmodified_single_parent_node(self) -> Optional['AnalysisNode']:
""" If a node doesn't modify single parent - can use that in some places to re-use cache """
if self.is_valid() and self.has_input() and not self.modifies_parents():
try:
return self.get_single_parent()
except ValueError:
pass
return None
def _get_cached_label_count(self, label) -> Optional[int]:
""" Override for optimisation.
Returning None means we need to run the SQL to get the count """
try:
if self.cloned_from:
# If cloned (and we or original haven't changed) - use those counts
try:
node_count = NodeCount.load_for_node_version(self.cloned_from, label)
return node_count.count
except NodeCount.DoesNotExist:
# Should only ever happen if original bumped version since we were loaded
# otherwise should have cascade set cloned_from to NULL
pass
if self.has_input():
parent_non_zero_label_counts = []
for parent in self.get_non_empty_parents():
if parent.count != 0: # count=0 has 0 for all labels
parent_node_count = NodeCount.load_for_node(parent, label)
if parent_node_count.count != 0:
parent_non_zero_label_counts.append(parent_node_count.count)
if not parent_non_zero_label_counts:
# logging.info("all parents had 0 %s counts", label)
return 0
if not self.modifies_parents():
if len(parent_non_zero_label_counts) == 1:
# logging.info("Single parent, no modification, using that")
return parent_non_zero_label_counts[0]
except NodeCount.DoesNotExist:
pass
except Exception as e:
logging.warning("Trouble getting cached %s count: %s", label, e)
return None
def get_grid_node_id_and_version(self):
""" Uses parent node_id/version if possible to re-use cache """
node_id = self.pk
version = self.version
if self.cloned_from:
node_id = self.cloned_from.node_id
version = self.cloned_from.version
if parent := self.get_unmodified_single_parent_node():
node_id, version = parent.get_grid_node_id_and_version()
return node_id, version
def node_counts(self):
""" This is inside Celery task """
self.count = None
counts_to_get = {BuiltInFilters.TOTAL}
counts_to_get.update([i[0] for i in self.analysis.get_node_count_types()])
label_counts = {}
for label in counts_to_get:
label_count = self._get_cached_label_count(label)
if label_count is not None:
label_counts[label] = label_count
counts_to_get -= set(label_counts)
logging.debug("%s cached counts: %s", self, label_counts)
if counts_to_get:
logging.debug("%s needs DB request for %s", self, counts_to_get)
retrieved_label_counts = get_node_counts_and_labels_dict(self)
label_counts.update(retrieved_label_counts)
node_version = NodeVersion.get(self)
for label, count in label_counts.items():
NodeCount.objects.create(node_version=node_version, label=label, count=count)
return NodeStatus.READY, label_counts[BuiltInFilters.TOTAL]
def _load(self):
""" Override to do anything interesting """
pass
def load(self):
""" load is called after parents are run """
# logging.debug("node %d (%d) load()", self.id, self.version)
start = time()
self._load() # Do before counts in case it affects anything
status, count = self.node_counts()
load_seconds = time() - start
self.update(status=status, count=count, load_seconds=load_seconds)
def add_parent(self, parent, *args, **kwargs):
if not parent.visible:
raise NonFatalNodeError("Not connecting children to invisible nodes!")
existing_connect = parent.children.through.objects.filter(parent=parent, child=self)
if not existing_connect.exists():
super().add_parent(parent)
self.parents_changed = True
else:
logging.error("Node(pk=%d).add_parent(pk=%d) already exists!", self.pk, parent.pk)
def remove_parent(self, parent):
""" disconnects parent by deleting edge """
# Ok to have multiple, just delete first
edge = parent.children.through.objects.filter(parent=parent, child=self).first()
if edge: # could be some kind of race condition?
edge.delete()
self.parents_changed = True
def handle_ancestor_input_samples_changed(self):
pass
def update(self, **kwargs):
""" Updates Node if self.version matches DB - otherwise throws NodeOutOfDateException """
self_qs = AnalysisNode.objects.filter(pk=self.pk, version=self.version)
updated = self_qs.update(**kwargs)
if not updated:
raise NodeOutOfDateException()
def save(self, **kwargs):
""" To avoid race conditions, don't use save() in a celery task (unless running in scheduling_single_worker)
instead use update() method above """
# logging.debug("save: pk=%s kwargs=%s", self.pk, str(kwargs))
super_save = super().save
if self.parents_changed or self.ancestor_input_samples_changed:
self.handle_ancestor_input_samples_changed()
if self.auto_node_name:
self.name = self.get_node_name()
# TODO: This causes lots of DB queries... should we change this?
self.valid = self.is_valid()
if not self.valid:
self.shadow_color = NodeColors.ERROR
self.appearance_dirty = True
elif self.shadow_color == NodeColors.ERROR: # Need to allow nodes to set to warning
self.shadow_color = NodeColors.VALID
self.appearance_dirty = True
if self.appearance_dirty:
self.appearance_version += 1
if self.parents_changed or self.queryset_dirty:
self.bump_version()
super_save(**kwargs)
if self.update_children:
# We also need to bump if node has it's own sample - as in templates, we set fields in toposort order
# So we could go from having multiple proband samples to only one later (thus can set descendants)
for kid in self.children.select_subclasses():
kid.ancestor_input_samples_changed = self.is_source() or self.ancestor_input_samples_changed or \
self.get_samples_from_node_only_not_ancestors()
kid.appearance_dirty = False
kid.queryset_dirty = True
kid.save() # Will bump versions
else:
super_save(**kwargs)
# Make sure this always exists
NodeVersion.objects.get_or_create(node=self, version=self.version)
# Modify our analyses last updated time
Analysis.objects.filter(pk=self.analysis.pk).update(modified=timezone.now())
def adjust_cloned_parents(self, old_new_map):
""" If you need to do something with old/new parents """
pass
class NodeColumnSummaryCacheCollection(models.Model):
node_version = models.ForeignKey(NodeVersion, on_delete=CASCADE)
variant_column = models.TextField(null=False)
extra_filters = models.TextField(null=False)
| [
37811,
14691,
19667,
318,
262,
2779,
1398,
326,
477,
3781,
13760,
16955,
422,
13,
37227,
198,
11748,
18931,
198,
11748,
10088,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
4738,
1330,
4738,
198,
6738,
640,
1330,
640,
198,
6738,
1... | 2.336428 | 8,186 |
import os
import traceback
from flask import render_template, send_from_directory, current_app, g
from .emailing import email
| [
11748,
28686,
201,
198,
11748,
12854,
1891,
201,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
3758,
62,
6738,
62,
34945,
11,
1459,
62,
1324,
11,
308,
201,
198,
6738,
764,
368,
11608,
1330,
3053,
201,
198,
201,
198
] | 3.384615 | 39 |
# 'from ... import ...' statement
from sys import exit
main() | [
2,
705,
6738,
2644,
1330,
2644,
6,
2643,
198,
6738,
25064,
1330,
8420,
198,
198,
12417,
3419
] | 3.647059 | 17 |
from dataclasses import dataclass
from typing import List
from typing import Union
from postmanparser.description import Description
from postmanparser.exceptions import InvalidObjectException
from postmanparser.exceptions import MissingRequiredFieldException
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
7343,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
1281,
805,
48610,
13,
11213,
1330,
12489,
198,
6738,
1281,
805,
48610,
13,
1069,
11755,
1330,
17665,
10267,
16922,... | 4.851852 | 54 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import pip
import os
import sys
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
11748,
7347,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
366,
... | 2.407407 | 54 |
"""
Console scripts
David Megginson
April 2015
This is a big, ugly module to support the libhxl
console scripts, including (mainly) argument parsing.
License: Public Domain
Documentation: https://github.com/HXLStandard/libhxl-python/wiki
"""
from __future__ import print_function
import argparse, json, logging, os, re, requests, sys
# Do not import hxl, to avoid circular imports
import hxl.converters, hxl.filters, hxl.io
logger = logging.getLogger(__name__)
# In Python2, sys.stdin is a byte stream; in Python3, it's a text stream
STDIN = sys.stdin.buffer
# Posix exit codes
EXIT_OK = 0
EXIT_ERROR = 1
EXIT_SYNTAX = 2
#
# Console script entry points
#
def hxladd():
"""Console script for hxladd."""
run_script(hxladd_main)
def hxlappend():
"""Console script for hxlappend."""
run_script(hxlappend_main)
def hxlclean():
"""Console script for hxlclean"""
run_script(hxlclean_main)
def hxlcount():
"""Console script for hxlcount."""
run_script(hxlcount_main)
def hxlcut():
"""Console script for hxlcut."""
run_script(hxlcut_main)
def hxldedup():
"""Console script for hxldedup."""
run_script(hxldedup_main)
def hxlhash():
"""Console script for hxlhash."""
run_script(hxlhash_main)
def hxlmerge():
"""Console script for hxlmerge."""
run_script(hxlmerge_main)
def hxlrename():
"""Console script for hxlrename."""
run_script(hxlrename_main)
def hxlreplace():
"""Console script for hxlreplace."""
run_script(hxlreplace_main)
def hxlfill():
"""Console script for hxlreplace."""
run_script(hxlfill_main)
def hxlexpand():
"""Console script for hxlexpand."""
run_script(hxlexpand_main)
def hxlexplode():
"""Console script for hxlexplode."""
run_script(hxlexplode_main)
def hxlimplode():
"""Console script for hxlimplode."""
run_script(hxlimplode_main)
def hxlselect():
"""Console script for hxlselect."""
run_script(hxlselect_main)
def hxlsort():
"""Console script for hxlsort."""
run_script(hxlsort_main)
def hxlspec():
"""Console script for hxlspec."""
run_script(hxlspec_main)
def hxltag():
"""Console script for hxltag."""
run_script(hxltag_main)
def hxlvalidate():
"""Console script for hxlvalidate."""
run_script(hxlvalidate_main)
#
# Main scripts for command-line tools.
#
def hxladd_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxladd with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Add new columns with constant values to a HXL dataset.')
parser.add_argument(
'-s',
'--spec',
help='Constant value to add to each row (may repeat option)',
metavar='header#<tag>=<value>',
action='append',
required=True
)
parser.add_argument(
'-b',
'--before',
help='Add new columns before existing ones rather than after them.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.AddColumnsFilter(source, specs=args.spec, before=args.before)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlappend_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlappend with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Concatenate two HXL datasets')
# repeatable argument
parser.add_argument(
'-a',
'--append',
help='HXL file to append (may repeat option).',
metavar='file_or_url',
action='append',
default=[]
)
parser.add_argument(
'-l',
'--list',
help='URL or filename of list of URLs (may repeat option). Will appear after sources in -a options.',
action='append',
default=[]
)
parser.add_argument(
'-x',
'--exclude-extra-columns',
help='Don not add extra columns not in the original dataset.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'From --append datasets, include only rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
append_sources = []
for append_source in args.append:
append_sources.append(hxl.data(append_source, True))
for list_source in args.list:
for append_source in hxl.filters.AppendFilter.parse_external_source_list(hxl.data(list_source, True)):
append_sources.append(hxl.data(append_source, True))
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.AppendFilter(
source,
append_sources=append_sources,
add_columns=(not args.exclude_extra_columns),
queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags)
return EXIT_OK
def hxlclean_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlclean with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Clean data in a HXL file.')
parser.add_argument(
'-w',
'--whitespace',
help='Comma-separated list of tag patterns for whitespace normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-u',
'--upper',
help='Comma-separated list of tag patterns for uppercase conversion.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-l',
'--lower',
help='Comma-separated list of tag patterns for lowercase conversion.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-d',
'--date',
help='Comma-separated list of tag patterns for date normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'--date-format',
help='Date formatting string in strftime format (defaults to %%Y-%%m-%%d).',
default=None,
metavar='format',
)
parser.add_argument(
'-n',
'--number',
help='Comma-separated list of tag patternss for number normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'--number-format',
help='Number formatting string in printf format (without leading %%).',
default=None,
metavar='format',
)
parser.add_argument(
'--latlon',
help='Comma-separated list of tag patterns for lat/lon normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-p',
'--purge',
help='Purge unparseable dates, numbers, and lat/lon during cleaning.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Clean only rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.CleanDataFilter(
source, whitespace=args.whitespace, upper=args.upper, lower=args.lower,
date=args.date, date_format=args.date_format, number=args.number, number_format=args.number_format,
latlon=args.latlon, purge=args.purge, queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags)
return EXIT_OK
def hxlcount_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlcount with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
# Command-line arguments
parser = make_args('Generate aggregate counts for a HXL dataset')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to count.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list,
default='loc,org,sector,adm1,adm2,adm3'
)
parser.add_argument(
'-a',
'--aggregator',
help='Aggregator statement',
metavar='statement',
action='append',
type=hxl.filters.Aggregator.parse,
default=[]
)
add_queries_arg(parser, 'Count only rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.CountFilter(source, patterns=args.tags, aggregators=args.aggregator, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlmerge_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlmerge with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Merge part of one HXL dataset into another.')
parser.add_argument(
'-m',
'--merge',
help='HXL file to write (if omitted, use standard output).',
metavar='filename',
required=True
)
parser.add_argument(
'-k',
'--keys',
help='HXL tag(s) to use as a shared key.',
metavar='tag,tag...',
required=True,
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to include from the merge dataset.',
metavar='tag,tag...',
required=True,
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-r',
'--replace',
help='Replace empty values in existing columns (when available) instead of adding new ones.',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-O',
'--overwrite',
help='Used with --replace, overwrite existing values.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Merged data only from rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output, hxl.io.data(args.merge, True) if args.merge else None as merge_source:
filter = hxl.filters.MergeDataFilter(
source, merge_source=merge_source,
keys=args.keys, tags=args.tags, replace=args.replace, overwrite=args.overwrite,
queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlrename_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlrename with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Rename and retag columns in a HXL dataset')
parser.add_argument(
'-r',
'--rename',
help='Rename an old tag to a new one, with an optional new text header (may repeat option).',
action='append',
metavar='#?<original_tag>:<Text header>?#?<new_tag>',
default=[],
type=hxl.filters.RenameFilter.parse_rename
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.RenameFilter(source, args.rename)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlreplace_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlreplace with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Replace strings in a HXL dataset')
inline_group = parser.add_argument_group('Inline replacement')
map_group = parser.add_argument_group('External substitution map')
inline_group.add_argument(
'-p',
'--pattern',
help='String or regular expression to search for',
nargs='?'
)
inline_group.add_argument(
'-s',
'--substitution',
help='Replacement string',
nargs='?'
)
inline_group.add_argument(
'-t',
'--tags',
help='Tag patterns to match',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
inline_group.add_argument(
'-r',
'--regex',
help='Use a regular expression instead of a string',
action='store_const',
const=True,
default=False
)
map_group.add_argument(
'-m',
'--map',
help='Filename or URL of a mapping table using the tags #x_pattern (required), #x_substitution (required), #x_tag (optional), and #x_regex (optional), corresponding to the inline options above, for multiple substitutions.',
metavar='PATH',
nargs='?'
)
add_queries_arg(parser, 'Replace only in rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
if args.map:
replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.io.data(args.map, True))
else:
replacements = []
if args.pattern:
for tag in args.tags:
replacements.append(hxl.filters.ReplaceDataFilter.Replacement(args.pattern, args.substitution, tag, args.regex))
filter = hxl.filters.ReplaceDataFilter(source, replacements, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlfill_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlfill with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Fill empty cells in a HXL dataset')
parser.add_argument(
'-t',
'--tag',
help='Fill empty cells only in matching columns (default: fill in all)',
metavar='tagpattern,...',
type=hxl.model.TagPattern.parse,
)
add_queries_arg(parser, 'Fill only in rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.FillDataFilter(source, pattern=args.tag, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlexpand_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexpand with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Expand lists in cells by repeating rows')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of tag patterns for columns with lists to expand',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list,
nargs="?"
)
parser.add_argument(
"-s",
'--separator',
help='string separating list items (defaults to "|")',
metavar='string',
default="|"
)
parser.add_argument(
"-c",
'--correlate',
help='correlate list values instead of producing a cartesian product',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Limit list expansion to rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ExpandListsFilter(source, patterns=args.tags, separator=args.separator, correlate=args.correlate, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlexplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexplode with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Explode a wide dataset into a long dataset')
parser.add_argument(
'-H',
'--header-att',
help='attribute to add to the label column (defaults to "label")',
metavar='att',
default="label"
)
parser.add_argument(
'-V',
'--value-att',
help='attribute to add to the value column (defaults to "value")',
metavar='tagpattern',
default="value"
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ExplodeFilter(source, header_attribute=args.header_att, value_attribute=args.value_att)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlimplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexplode with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Implode a long dataset into a wide dataset.')
parser.add_argument(
'-L',
'--label',
help='HXL tag pattern for the label column',
metavar='tagpattern',
required=True,
type=hxl.model.TagPattern.parse,
)
parser.add_argument(
'-V',
'--value',
help='HXL tag pattern for the value column',
metavar='tagpattern',
required=True,
type=hxl.model.TagPattern.parse,
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ImplodeFilter(source, label_pattern=args.label, value_pattern=args.value)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlselect_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlselect with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
# Command-line arguments
parser = make_args('Filter rows in a HXL dataset.')
parser.add_argument(
'-q',
'--query',
help='Query expression for selecting rows (may repeat option for logical OR). <op> may be =, !=, <, <=, >, >=, ~, or !~',
action='append',
metavar='<tagspec><op><value>',
required=True
)
parser.add_argument(
'-r',
'--reverse',
help='Show only lines *not* matching criteria',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.RowFilter(source, queries=args.query, reverse=args.reverse)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlsort_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlcut with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Sort a HXL dataset.')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of tags to for columns to use as sort keys.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-r',
'--reverse',
help='Flag to reverse sort order.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.SortFilter(source, args.tags, args.reverse)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlspec_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
""" Run hxlspec with command-line arguments.
Args:
args (list): a list of command-line arguments
stdin (io.IOBase): alternative standard input (mainly for testing)
stdout (io.IOBase): alternative standard output (mainly for testing)
stderr (io.IOBase): alternative standard error (mainly for testing)
"""
parser = make_args('Process a HXL JSON spec')
args = parser.parse_args(args)
do_common_args(args)
spec = get_json(args.infile)
source = hxl.io.from_spec(spec, allow_local_ok=True)
with make_output(args, stdout) as output:
hxl.io.write_hxl(output.output, source, show_tags=not args.strip_tags)
def hxltag_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxltag with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Add HXL tags to a raw CSV file.')
parser.add_argument(
'-a',
'--match-all',
help='Match the entire header text (not just a substring)',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-m',
'--map',
help='Mapping expression',
required=True,
action='append',
metavar='Header Text#tag',
type=hxl.converters.Tagger.parse_spec
)
parser.add_argument(
'-d',
'--default-tag',
help='Default tag for non-matching columns',
metavar='#tag',
type=hxl.model.Column.parse
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
tagger = hxl.converters.Tagger(input, args.map, default_tag=args.default_tag, match_all=args.match_all)
hxl.io.write_hxl(output.output, hxl.io.data(tagger), show_tags=not args.strip_tags)
return EXIT_OK
def hxlvalidate_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlvalidate with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Validate a HXL dataset.')
parser.add_argument(
'-s',
'--schema',
help='Schema file for validating the HXL dataset (if omitted, use the default core schema).',
metavar='schema',
default=None
)
parser.add_argument(
'-a',
'--all',
help='Include all rows in the output, including those without errors',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-e',
'--error-level',
help='Minimum error level to show (defaults to "info") ',
choices=['info', 'warning', 'error'],
metavar='info|warning|error',
default='info'
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
def callback(e):
"""Show a validation error message."""
if e.rule.severity == 'info':
if args.error_level != 'info':
return
Counter.infos += 1
elif e.rule.severity == 'warning':
if args.error_level == 'error':
return
Counter.warnings += 1
else:
Counter.errors += 1
message = '[{}] '.format(e.rule.severity)
if e.row:
if e.rule:
message += "{},{}: ".format(e.row.row_number + 1, e.rule.tag_pattern)
else:
message += "{}: ".format(e.row.row_number + 1)
elif e.rule:
message += "<dataset>,{}: ".format(e.rule.tag_pattern)
else:
message += "<dataset>: "
if e.value:
message += '"{}" '.format(e.value)
if e.message:
message += e.message
message += "\n"
output.write(message)
output.write("Validating {} with schema {} ...\n".format(args.infile or "<standard input>", args.schema or "<default>"))
source = hxl.io.data(input)
if args.schema:
with make_input(args, None, args.schema) as schema_input:
schema = hxl.schema(schema_input, callback=callback)
else:
schema = hxl.schema(callback=callback)
schema.validate(source)
if args.error_level == 'info':
output.write("{:,} error(s), {:,} warnings, {:,} suggestions\n".format(Counter.errors, Counter.warnings, Counter.infos))
elif args.error_level == 'warning':
output.write("{:,} error(s), {:,} warnings\n".format(Counter.errors, Counter.warnings))
else:
output.write("{:,} error(s)\n".format(Counter.errors))
if Counter.errors > 0:
output.write("Validation failed.\n")
return EXIT_ERROR
else:
output.write("Validation succeeded.\n")
return EXIT_OK
#
# Utility functions
#
def run_script(func):
"""Try running a command-line script, with exception handling."""
try:
sys.exit(func(sys.argv[1:], STDIN, sys.stdout))
except KeyboardInterrupt:
logger.error("Interrupted")
sys.exit(EXIT_ERROR)
def make_args(description, hxl_output=True):
"""Set up parser with default arguments.
@param description: usage description to show
@param hxl_output: if True (default), include options for HXL output.
@returns: an argument parser, partly set up.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'infile',
help='HXL file to read (if omitted, use standard input).',
nargs='?'
)
if hxl_output:
parser.add_argument(
'outfile',
help='HXL file to write (if omitted, use standard output).',
nargs='?'
)
parser.add_argument(
'--sheet',
help='Select sheet from a workbook (1 is first sheet)',
metavar='number',
type=int,
nargs='?'
)
parser.add_argument(
'--selector',
help='JSONPath expression for starting point in JSON input',
metavar='path',
nargs='?'
)
parser.add_argument(
'--http-header',
help='Custom HTTP header to send with request',
metavar='header',
action='append'
)
if hxl_output:
parser.add_argument(
'--remove-headers',
help='Strip text headers from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--strip-tags',
help='Strip HXL tags from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
"--ignore-certs",
help="Don't verify SSL connections (useful for self-signed)",
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--log',
help='Set minimum logging level',
metavar='debug|info|warning|error|critical|none',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='error'
)
return parser
def do_common_args(args):
"""Process standard args"""
logging.basicConfig(format='%(levelname)s (%(name)s): %(message)s', level=args.log.upper())
def make_source(args, stdin=STDIN):
"""Create a HXL input source."""
# construct the input object
input = make_input(args, stdin)
return hxl.io.data(input)
def make_input(args, stdin=sys.stdin, url_or_filename=None):
"""Create an input object"""
if url_or_filename is None:
url_or_filename = args.infile
# sheet index
sheet_index = args.sheet
if sheet_index is not None:
sheet_index -= 1
# JSONPath selector
selector = args.selector
http_headers = make_headers(args)
return hxl.io.make_input(
url_or_filename or stdin,
sheet_index=sheet_index,
selector=selector,
allow_local=True,
http_headers=http_headers,
verify_ssl=(not args.ignore_certs)
)
def make_output(args, stdout=sys.stdout):
"""Create an output stream."""
if args.outfile:
return FileOutput(args.outfile)
else:
return StreamOutput(stdout)
| [
37811,
198,
47581,
14750,
198,
11006,
2185,
1130,
7899,
198,
16784,
1853,
198,
198,
1212,
318,
257,
1263,
11,
13400,
8265,
284,
1104,
262,
9195,
71,
87,
75,
198,
41947,
14750,
11,
1390,
357,
12417,
306,
8,
4578,
32096,
13,
198,
198,
... | 2.330842 | 13,650 |