content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = f.readlines()
with open('README.md') as f:
long_description = f.read()
setup(
name ='corgi-cli',
version ='1.0.0',
author ='Raleigh Wise',
author_email ='vibhu4agarwal@gmail.com',
url ='https://github.com/l3gacyb3ta/corgi',
description ='A modern doggo.ninja cli.',
long_description = long_description,
long_description_content_type ="text/markdown",
license ='UNLICENSE',
packages = find_packages(),
entry_points ={
'console_scripts': [
'corgi = corgi_cli.corgi:parser'
]
},
classifiers =(
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
),
keywords ='doggo.ninja doggo ninja cli bone',
install_requires = requirements,
zip_safe = False
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
4480,
1280,
10786,
8897,
18883,
13,
14116,
11537,
355,
277,
25,
198,
197,
8897,
18883,
796,
277,
13,
961,
6615,
3419,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
9... | 2.507645 | 327 |
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth.views import login, logout
from chat.views import index
urlpatterns = [
url(r'^$', index),
url(r'^accounts/login/$', login),
url(r'^accounts/logout/$', logout),
url(r'^admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,... | 2.748466 | 163 |
print("Digite dois numeros: ")
x = int(input())
y = int(input())
if x > y:
troca = x
x = y
y = troca
soma = 0
for i in range(x, y):
if i % 2 != 0:
soma = soma + i
print(f"Soma dos impares = {soma}") | [
4798,
7203,
19511,
578,
466,
271,
5470,
418,
25,
366,
8,
198,
87,
796,
493,
7,
15414,
28955,
198,
88,
796,
493,
7,
15414,
28955,
198,
198,
361,
2124,
1875,
331,
25,
198,
220,
220,
220,
4161,
6888,
796,
2124,
198,
220,
220,
220,
... | 1.973684 | 114 |
import logging
| [
11748,
18931,
628
] | 5.333333 | 3 |
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import inch
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_RIGHT, TA_CENTER
from reportlab.lib import colors
from energyusage.RAPLFile import RAPLFile
import energyusage.convert as convert
year = "2016"
styles = getSampleStyleSheet()
TitleStyle = ParagraphStyle(name='Normal', fontSize=16, alignment= TA_CENTER)
HeaderStyle = ParagraphStyle(name='Normal',fontSize=14)
SubheaderStyle = ParagraphStyle(name='Normal',fontSize=12)
DescriptorStyle = styles["BodyText"]
Elements = []
def title(text, style=TitleStyle, klass=Paragraph, sep=0.3):
""" Creates title of report """
t = klass(bold(text), style)
Elements.append(t)
def header(text, style=HeaderStyle, klass=Paragraph, sep=0.3, spaceAfter=False):
""" Creates a section header """
s = Spacer(0, sep*inch)
Elements.append(s)
h = klass(bold(text), style)
Elements.append(h)
if spaceAfter:
s = Spacer(0, sep/1.5*inch)
Elements.append(s)
def subheader(text, style=SubheaderStyle, klass=Paragraph, sep=0.2):
""" Creates a subsection header """
s = Spacer(0, sep*inch)
Elements.append(s)
sh = klass(bold(text), style)
Elements.append(sh)
def descriptor(text, style=DescriptorStyle, klass=Paragraph, sep=0.05, spaceBefore=True, spaceAfter = True):
""" Creates descriptor text for a (sub)section; sp adds space before text """
s = Spacer(0, 1.5*sep*inch)
if spaceBefore:
Elements.append(s)
d = klass(text, style)
Elements.append(d)
if spaceAfter:
Elements.append(s)
def generate(location, watt_averages, breakdown, emission, state_emission):
""" Generates pdf report
Parameters:
location (str): user's location
watt_averages (list): list of baseline, total, process wattage averages
breakdown (list): [% coal, % oil/petroleum, % natural gas, % low carbon]
emission (float): kgs of CO2 emitted
"""
# Initializing document
doc = SimpleDocTemplate("energy-usage-report.pdf",pagesize=letter,
rightMargin=1*inch,leftMargin=1*inch,
topMargin=1*inch,bottomMargin=1*inch)
title("Energy Usage Report")
header("Final Readings")
descriptor("Readings shown are averages of wattage over the time period", spaceAfter=True)
baseline_average, process_average, difference_average = watt_averages
readings = [['Measurement', 'Wattage'],
['Baseline', "{:.2f} watts".format(baseline_average)],
['Total', "{:.2f} watts".format(process_average)],
['Process', "{:.2f} watts".format(difference_average)]]
'''
readings = [['Component', 'Baseline', 'Total', 'Process']]
for file in raplfiles:
line = ["{}".format(file.name), "{:.2f} watts".format(file.baseline_average),
"{:.2f} watts".format(file.process_average),
"{:.2f} watts".format(file.process_average-file.baseline_average)]
if "Package" in file.name:
readings.insert(1, line)
else:
readings.append(line)
'''
if state_emission:
coal, oil, natural_gas, low_carbon = breakdown
energy_mix = [['Energy Source', 'Percentage'],
['Coal', "{}%".format(coal)],
['Oil', "{}%".format(oil)],
['Natural gas', "{}%".format(natural_gas)],
['Low carbon', "{}%".format(low_carbon)]]
source = "eGRID"
equivs = [['Carbon Equivalency', str(state_emission) + ' lbs/MWh']]
else:
coal, petroleum, natural_gas, low_carbon = breakdown
energy_mix = [['Energy Source', 'Percentage'],
['Coal', "{:.2f}%".format(coal)],
['Petroleum', "{:.2f}%".format(petroleum)],
['Natural gas', "{:.2f}%".format(natural_gas)],
['Low carbon', "{:.2f}%".format(low_carbon)]]
source = "US EIA"
equivs = [['Coal', '995.725971 kg CO2/MWh'],
['Petroleum', '816.6885263 kg CO2/MWh'],
['Natural gas', '743.8415916 kg CO2/MWh']]
table(readings)
header("Energy Data")
descriptor("Energy mix in {} based on {} {} data".format(location, year, source))
table(energy_mix)
emissions = [['Emission', 'Amount'],
['Effective emission', "{:.2e} kg CO2".format(emission)],
['Equivalent miles driven', "{:.2e} miles".format(convert.carbon_to_miles(emission))],
['Equivalent minutes of 32-inch LCD TV watched', "{:.2e} minutes".format(convert.carbon_to_tv(emission))],
['Percentage of CO2 used in a US household/day', \
"{:.2e}%".format(convert.carbon_to_home(emission))]]
header("Emissions", spaceAfter=True)
table(emissions)
header("Assumed Carbon Equivalencies", spaceAfter=True)
# descriptor("Formulas used for determining amount of carbon emissions")
table(equivs, header=False)
doc.build(Elements)
| [
6738,
989,
23912,
13,
8019,
13,
31126,
4340,
1330,
3850,
198,
6738,
989,
23912,
13,
8019,
13,
41667,
1330,
11111,
198,
6738,
989,
23912,
13,
489,
265,
4464,
385,
1330,
17427,
23579,
30800,
11,
2547,
6111,
11,
1338,
11736,
11,
8655,
11... | 2.358974 | 2,223 |
import pytest
import numpy as np
import reciprocalspaceship as rs
import gemmi
@pytest.mark.parametrize("cell_and_spacegroup", [
(gemmi.UnitCell(10., 20., 30., 90., 90., 90.), gemmi.SpaceGroup('P 21 21 21')),
(gemmi.UnitCell(30., 30., 30., 90., 90., 120.), gemmi.SpaceGroup('R 32')),
])
@pytest.mark.parametrize("anomalous", [False, True])
@pytest.mark.parametrize("full_asu", [False, True])
def test_compute_redundancy(cell_and_spacegroup, anomalous, full_asu):
"""
Test reciprocalspaceship.utils.compute_redundancy.
"""
dmin = 5.
cell,spacegroup = cell_and_spacegroup
hkl = rs.utils.generate_reciprocal_asu(cell, spacegroup, dmin, anomalous=anomalous)
mult = np.random.choice(10, size=len(hkl))
hobs = np.repeat(hkl, mult, axis=0)
hunique, counts = rs.utils.compute_redundancy(hobs, cell, spacegroup, full_asu=full_asu, anomalous=anomalous)
assert hunique.dtype == np.int32
assert counts.dtype == np.int32
assert len(hkl) == len(mult)
assert len(np.unique(hobs, axis=0)) == np.sum(counts > 0)
assert np.all(counts[counts>0] == mult[mult > 0])
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
36564,
874,
43076,
1056,
355,
44608,
198,
11748,
16840,
11632,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7203,
3846,
62,
392,
62,
13200,
8094,
1600... | 2.476615 | 449 |
from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from celery import shared_task
@shared_task
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
628,
198,
6738,
18725,
1924,
1... | 3.823529 | 51 |
from django.shortcuts import render
from django.views import View
from django.views.generic.list import ListView
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db.models import (Q, F, Case, When, PositiveIntegerField)
from django.http import Http404
from django.db import models
from ordered_set import OrderedSet
from .models import ArrayPosition
from .decorators import login_require_or_401
from .models import (Category,
SubCategory,
Favourite,
Card,
TypesOfCard)
class IndexPageView(View):
""" Main page of the webapp """
class CategoryListView(ListView):
"""
List of all categories
"""
model = Category
paginate_by = 6
template_name = 'core/category_list.html'
context_object_name = 'categories'
class SubCategoryListView(ListView):
"""
List of all subcategories for a given category
"""
model = SubCategory
paginate_by = 6
template_name = 'core/subcategory_list.html'
context_object_name = 'subcategories'
"""
Options for user to sort the cards on the page with cards, possible options:
1) 'DD' - cards that are showed by the ID of the cards in decreasing order
2) 'W'- firstly show card which have type `word`, all remaining cards sort using 'DD'
3) 'D'- firstly show card which have type `dialogue`, all remaining cards sort using 'DD'
4) 'S'- firstly show card which have type `sentence`, all remaining cards sort using 'DD'
"""
SORTING_CARD_OPTIONS = [('DD', 'Default')] + TypesOfCard.choices
class CardListView(ListView):
"""
List of all cards for a given subcategory
"""
model = Card
paginate_by = 10
template_name = 'core/card_list.html'
context_object_name = 'cards'
def find_sorting_method(sort_param, *, options) -> (str, str):
""" Function to determine the sorting method on the page """
for sorting_method in options:
if sort_param == sorting_method[0]:
# If the value from GET parameter is in the list of possible
# values, just return the sorting method
sorted_by = sorting_method
break
else:
# Otherwise, return the default method for sorting
sorted_by = options[0]
return sorted_by
def sort_cards_by_param(queryset, sort_param) -> (list, (str, str)):
"""
Function to sort cards by a given sorting parameter
"""
# Initially, determine the sorting parameter
sorted_by = find_sorting_method(sort_param, options=SORTING_CARD_OPTIONS)
if sorted_by != SORTING_CARD_OPTIONS[0]:
# If the queryset is not sorted according to the sorting parameter,
# then sort by the parameter
result = queryset.annotate(
relevancy=(Case(When(Q(type=sort_param), then=1), When(~Q(type=sort_param), then=2),
output_field=PositiveIntegerField())
)).order_by('relevancy', '-id')
else:
# The queryset is already sorted according to the sorting parameter,
# so, just return it
result = queryset
return result, sorted_by
def filter_cards_by_q(queryset, q):
""" Function to sort cards according to the value of q """
res = queryset.filter(Q(content__icontains=q) |
Q(translit_of_pronunciation__icontains=q))
return res
def generate_tmpcontext_sorting(sorted_by, sort_options, **kwargs):
""" Function to generate a temporary context for sorting of cards """
tmpcontext = {
'sorted_by': sorted_by,
'sort_options': sort_options
}
for key, val in kwargs.items():
tmpcontext[key] = val
return tmpcontext
def union_favourites_and_cards(favourites, cards):
"""
Function to unite user's favourites cards with a queryset of cards
"""
# Using sets due to the fact that time complexity is O(1)
# to check that an element is in the set
favourites_set = set()
resulting_cards = cards.all()
for favour in favourites:
favourites_set.add(favour.card.id)
for card in resulting_cards:
# Add new field to the card with the flag
# to indicate is a card user's favourite or not
if card.id in favourites_set:
card.favourite = True
else:
card.favourite = False
return resulting_cards
class FavouriteView(ListView):
"""
List with all user's favourite cards
"""
model = Favourite
paginate_by = 10
template_name = 'core/favourite_list.html'
context_object_name = 'cards'
@method_decorator(login_required)
def search_among_queryset(queryset, q, *, filter_dict):
""" Function to perform filter on the given queryset """
if q:
# If q is not None, filter by it
result = queryset.filter(**filter_dict)
else:
# If q is None, no need to filter, simply return queryset
result = queryset
return result
def generate_tmpcontext_for_search(q, placeholder):
""" Function to generate context for the search """
tmpcontext = {
'searchPlaceholder': placeholder,
'q': q
}
return tmpcontext
class FavouritesControlView(View):
"""
View to add to / remove from user's favourite collection of cards.
"""
def post(self, request, **kwargs):
""" Method to add a card to user's favourite collection """
response = {}
# Check that the card with the provided id exists
card = self._validate_card_id(request, **kwargs)
if not card:
# Otherwise, return error
return self._return_error(request, response)
# Check that card already is in user's collection
if Favourite.objects.filter(card=card,
owner=request.user).exists():
# The card was already added before, return error
return self._return_error(request, response)
# Otherwise, add the card to user's collection
new_favourite_card = Favourite(card=card,
owner=request.user)
new_favourite_card.save()
# Return successful JSON response
return JsonResponse(response, status=200)
def delete(self, request, **kwargs):
""" Method to remove a card from user's favourite collection """
response = {}
# Check that the card with the provided id exists
card = self._validate_card_id(request, **kwargs)
if not card:
# Otherwise, return error
return self._return_error(request, response)
# Find favourite card's object
favourite_card = Favourite.objects.filter(card=card,
owner=request.user).first()
if not favourite_card:
# The card was already removed before or was not in user's favourites,
# so, return error
return self._return_error(request, response, status=410)
# Otherwise, remove the card successfully
favourite_card = favourite_card.delete()
# Return successful JSON response
return JsonResponse(response, status=200)
def _validate_card_id(self, request, **kwargs):
""" Method to check that card with the provided id exists """
card_id = kwargs.get('card_id', None)
if not Card.objects.filter(pk=card_id).exists():
# If the card doesn't exists, return None
return None
# Otherwise, such card exists, simply return it
return Card.objects.filter(pk=card_id).first()
def _return_error(self, request, response, status=409):
""" Method which returns JSON response with the rror """
response['error'] = 'Sorry, an unknown error occurred.'
return JsonResponse(response, status=status)
@method_decorator(login_require_or_401)
class TypesOfSearchSortingOptions(models.TextChoices):
"""
Sorting options for the content of the global search view.
Meaning of each one:
1) 'DF' - default sorting, initially show all blocks for which we found something,
all remaining ones show at the end
2) 'CR' - firstly show all cards, then show remaining ones using 'DF'
3) 'CT' - firstly show all categories, then show remaining ones using 'DF'
4) 'SB' - firstly show all subcategories, then show remaining ones using 'DF'
"""
DEFAULT = 'DF', 'Default'
CARDS = 'CR', 'Cards'
CATEGORIES = 'CT', 'Categories'
SUBCATEGORIES = 'SB', 'Subcategories'
class SearchResultView(View):
"""
Global search view
"""
def extract_and_trip_question(requestData, paramName='q', defaultVal=None):
""" Function which simply takes the parameter and strip it(remove spaces around) """
q = requestData.get(paramName, defaultVal)
if q:
q = q.strip()
return q
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
33571,
1330,
3582,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
4868,
1330,
7343,
7680,
198,
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
198,
... | 2.592983 | 3,506 |
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('signup_page/',views.signuppage,name='signup_page'),
path('login_page/',views.loginpage,name='login_page'),
path('login/',views.handleLogin,name='login'),
path('logout/',views.handleLogout,name='logout'),
path('signup/',views.handleSignup,name='signup'),
path('delete/<str:pk>', views.delete, name='delete')
]
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
17256,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
9630,
11,
... | 2.72067 | 179 |
"""
Make use of a corpus.
The advanced-API provides extra functionality of top of the core of TF.
The most notable things are downloading corpus data and methods for (pretty)
display corpus material.
The real power of the advanced API is unleashed when there are well-tuned configuration
settings for a corpus, and possibly some supporting application code and CSS styling.
This power can be invoked by a very simple command: `use("org/repo")`.
For a detailed description, see `tf.about.usefunc`.
"""
from .parameters import backendRep
from .advanced.app import findApp
# START AN APP
def use(appName, *args, backend=None, **kwargs):
"""Make use of a corpus.
For a detailed description, see `tf.about.usefunc`.
Parameters
----------
appName: string
backend: string, optional None
If present, it is `github` or `gitlab`
or a GitLab instance such as `gitlab.huc.knaw.nl`.
If absent, `None` or empty, it is `github`.
args:
Do not pass any other positional argument!
kwargs:
Used to initialize the TF-app that we use.
That is either an uncustomized `tf.advanced.app.App` or
a customization of it.
Returns
-------
A: object
The object whose attributes and methods constitute the advanced API.
See Also
--------
tf.advanced.app.App
"""
if appName.startswith("data:"):
dataLoc = appName[5:]
appName = None
checkoutApp = None
elif appName.startswith("app:"):
dataLoc = None
checkoutApp = None
else:
dataLoc = None
parts = appName.split(":", maxsplit=1)
if len(parts) == 1:
parts.append("")
(appName, checkoutApp) = parts
backend = backendRep(backend, 'norm')
return findApp(appName, checkoutApp, dataLoc, backend, False, *args, **kwargs)
| [
37811,
198,
12050,
779,
286,
257,
35789,
13,
198,
198,
464,
6190,
12,
17614,
3769,
3131,
11244,
286,
1353,
286,
262,
4755,
286,
24958,
13,
198,
464,
749,
12411,
1243,
389,
22023,
35789,
1366,
290,
5050,
329,
357,
37784,
8,
198,
13812,... | 2.786033 | 673 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import functools
from termcolor import colored
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._utils
import torch.nn.functional as F
import torch.autograd as autograd
sys.path.append("lib/")
from utils.utils import get_world_size, get_rank
sys.path.append("../")
from lib.optimizations import VariationalHidDropout2d, weight_norm
from lib.solvers import anderson, broyden
from lib.jacobian import jac_loss_estimate, power_method
from lib.layer_utils import list2vec, vec2list, norm_diff, conv3x3, conv5x5
BN_MOMENTUM = 0.1
BLOCK_GN_AFFINE = True # Don't change the value here. The value is controlled by the yaml files.
FUSE_GN_AFFINE = True # Don't change the value here. The value is controlled by the yaml files.
POST_GN_AFFINE = True # Don't change the value here. The value is controlled by the yaml files.
DEQ_EXPAND = 5 # Don't change the value here. The value is controlled by the yaml files.
NUM_GROUPS = 4 # Don't change the value here. The value is controlled by the yaml files.
logger = logging.getLogger(__name__)
blocks_dict = { 'BASIC': BasicBlock }
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
1257,
310,
10141,
198,... | 3.033632 | 446 |
import base64
import numpy as np
import urllib.request
import json
import subprocess as sp
from playsound import playsound
# copy from https://qiita.com/to_obara/items/d8d5c92c2ea85a197e2d
def get_token() -> str:
"""
Google Text-To-Speechの認証した上で、gcloudをセットアップした状態で
tokenを取得するために、gcloud auth print-access-tokenの結果を取得する
"""
res = sp.run('gcloud auth application-default print-access-token',
shell=True,
stdout=sp.PIPE,
stderr=sp.PIPE,
encoding='utf-8')
print("[[TTS]]", res.stderr)
return res.stdout.strip()
def makeRequestDict(txt: str) -> dict:
"""
Google Text-To-Speechへリクエストのための情報を生成する
SSMLには未対応
Args:
txt(in): 音声合成するテキスト
Returns:
音声合成するために必要な情報をdictで返却する
"""
dat = {
"audioConfig": {
"audioEncoding": "LINEAR16",
"pitch": 0,
"speakingRate": 1
},
"voice": {
"languageCode": "ja-JP",
"name": "ja-JP-Standard-B"
}
}
dat["input"] = {"text": txt}
return dat
def output_mp3(dat: dict, ofile: str) -> None:
"""
Google Text-To-Speechへリクエストした結果を元に音声データにしてファイルに書き込む
Args:
dat(in): リクエストした結果得られたJSON文字列をdictにしたもの
ofile(in): 音声データを書き出すファイル名
"""
b64str = dat["audioContent"]
binary = base64.b64decode(b64str)
uint8_dat = np.frombuffer(binary, dtype=np.uint8)
with open(ofile, "wb") as f:
f.write(uint8_dat)
if __name__ == "__main__":
with open("text.txt", "r") as fin:
txt = fin.read()
txt = txt[:-1]
gtts(txt, "result2.mp3")
playsound("result2.mp3")
| [
11748,
2779,
2414,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
33918,
198,
11748,
850,
14681,
355,
599,
198,
6738,
5341,
633,
1330,
5341,
633,
198,
198,
2,
4866,
422,
3740,
1378,
40603,
... | 1.750277 | 901 |
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from addressapp.serializers.address import DistrictSerializer,MunicipalitySerializer,\
WardSerializer,WardSerializerUpdate
from addressapp.models import Address, District, Municipality ,Ward
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
# class MunicipalityList(APIView):
# permission_classes = (IsPostOrIsAuthenticated,)
# serializer_class = AddressSerializer
# def get(self, request, district,format=None):
# address_obj = Address.objects.filter(district=district)
# serializer = AddressSerializer(address_obj, many=True, \
# context={'request': request})
# return Response(serializer.data)
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
1330,
21627,
198,
198,
6738,
2209,
1324,
13,
46911,
11341,
... | 3.305019 | 259 |
import swapper
from cities.models import (
Continent, Country, Region, Subregion, City, District, PostalCode,
AlternativeName)
| [
11748,
1509,
11463,
198,
198,
6738,
4736,
13,
27530,
1330,
357,
198,
220,
220,
220,
45778,
11,
12946,
11,
17718,
11,
3834,
36996,
11,
2254,
11,
5665,
11,
32881,
10669,
11,
198,
220,
220,
220,
27182,
5376,
8,
628
] | 3.512821 | 39 |
import csv
from nltk.stem import PorterStemmer
import numpy as np
train_docs_class_list = []
test_docs_index_list = list(range(1, 1010))
train_docs_list = []
with open('trainingdata.txt') as csvfile:
rows = csv.reader(csvfile, delimiter=' ')
for row in rows:
file_list = []
for col in row[1:-1]:
f = open('recipe_dataset/' + col + '.txt', mode='r')
file_text = f.read()
file_list.append(file_text)
train_docs_list.append(file_text)
f.close()
test_docs_index_list.remove(int(col))
train_docs_class_list.append(file_list)
text_all_class_list = []
for class_index in range(11):
text_all_list = []
for doc in train_docs_class_list[class_index]:
token_list = tokenization(doc)
lower_token_list = lowercasing(token_list)
stemmed_token_list = stemming(lower_token_list)
remove_stopword_list = remove_stopwords(stemmed_token_list)
text_all_list.append(remove_stopword_list)
text_all_class_list.append(text_all_list)
chi_square_list = []
V = extract_vocabulary(train_docs_list)
for class_index in range(11):
for term in V:
on_topic_present = 0
off_topic_present = 0
on_topic_absent = 0
off_topic_absent = 0
for docs_class_index in range(11):
for texts in text_all_class_list[docs_class_index]:
if (term in texts) and (docs_class_index == class_index):
on_topic_present += 1
elif (term in texts) and (docs_class_index != class_index):
off_topic_present += 1
elif (term not in texts) and (docs_class_index == class_index):
on_topic_absent += 1
elif (term not in texts) and (docs_class_index != class_index):
off_topic_absent += 1
N = on_topic_present + off_topic_present + on_topic_absent + off_topic_absent
on_topic = on_topic_present + on_topic_absent
off_topic = off_topic_present + off_topic_absent
present = on_topic_present + off_topic_present
absent = on_topic_absent + off_topic_absent
expected_on_topic_present = N * (on_topic/N) * (present/N)
expected_off_topic_present = N * (off_topic/N) * (present/N)
expected_on_topic_absent = N * (on_topic/N) * (absent/N)
expected_off_topic_absent = N * (off_topic/N) * (absent/N)
chi_square = ((
(on_topic_present-expected_on_topic_present)**2)/expected_on_topic_present) + ((
(off_topic_present-expected_off_topic_present)**2)/expected_off_topic_present) + ((
(on_topic_absent-expected_on_topic_absent)**2)/expected_on_topic_absent) + ((
(off_topic_absent-expected_off_topic_absent)**2)/expected_off_topic_absent)
chi_square_list.append(
{'term': term, 'class_index': class_index, 'chi-square': chi_square})
term_list = sorted(
chi_square_list, key=lambda i: i['chi-square'], reverse=True)[:500]
new_train_docs_list = []
for term in term_list:
new_train_docs_list.append(term['term'])
V = new_train_docs_list
N = count_docs(train_docs_list)
prior = []
condprob = []
for class_index in range(11):
Nc = count_docs_in_class(train_docs_class_list, class_index)
prior.append(Nc / N)
text_c = concatenate_text_of_all_docs_in_class(
train_docs_class_list, class_index, V)
total_number_of_terms_in_d = 0
for t in V:
total_number_of_terms_in_d += count_tokens_of_term(text_c, t)
condprob_term_dict = {}
for t in V:
Tct = count_tokens_of_term(text_c, t)
condprob_term = (Tct + 1) / (total_number_of_terms_in_d + len(V))
condprob_term_dict[t] = condprob_term
condprob.append(condprob_term_dict)
| [
11748,
269,
21370,
198,
6738,
299,
2528,
74,
13,
927,
1330,
20890,
1273,
368,
647,
198,
11748,
299,
32152,
355,
45941,
628,
628,
628,
628,
628,
628,
198,
27432,
62,
31628,
62,
4871,
62,
4868,
796,
17635,
198,
9288,
62,
31628,
62,
96... | 2.15187 | 1,791 |
#!/usr/bin/env python
"""
* Copyright (C) 2021 LEIDOS.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
import rospy
import rosnode
import guidance_plugin_components
from cav_msgs.msg import Plugin
from cav_msgs.msg import SystemAlert
class GuidancePluginValidator:
"""
Primary class for the guidance_plugin_validator node. Conducts the validation of each of the
Guidance Plugins (of type 'Strategic', 'Tactical', or 'Control-Wrapper') as provided by this node's
configuration parameters.
"""
def __init__(self):
"""Default constructor for GuidancePluginValidator"""
# Create plugin_discovery subscriber
self.plugin_discovery_sub = rospy.Subscriber("plugin_discovery", Plugin, self.plugin_discovery_cb)
self.system_alert_sub = rospy.Subscriber("system_alert", SystemAlert, self.system_alert_cb)
# Read in config params
self.validation_duration = rospy.get_param('~validation_duration', 300) # Maximum time (sec) that node will spend conducting validation before results are considered final
self.strategic_plugin_names = rospy.get_param('~strategic_plugins_to_validate', [])
self.tactical_plugin_names = rospy.get_param('~tactical_plugins_to_validate', [])
self.control_plugin_names = rospy.get_param('~control_plugins_to_validate', [])
# Write config params to log file
rospy.loginfo("Config params for guidance_plugin_validator:")
rospy.loginfo("Validation Duration: " + str(self.validation_duration) + " seconds")
rospy.loginfo("Strategic Guidance Plugins: " + str(self.strategic_plugin_names))
rospy.loginfo("Tactical Guidance Plugins: " + str(self.tactical_plugin_names))
rospy.loginfo("Control Guidance Plugins: " + str(self.control_plugin_names))
# Boolean flag to indicate whether drivers are ready (this indicates that plugin node validation checks can begin)
self.has_startup_completed = False
# Boolean flag to indicate whether each guidance plugin's node has been validated
self.has_node_validation_completed = False
# Boolean flag to indicate whether final results have been written to log file
self.has_logged_final_results = False
# Set spin rate
self.spin_rate = rospy.Rate(10) # 10 Hz
# Initialize empty dicts that will be populated with a <plugin-type>PluginResults object for each Guidance Plugin
self.strategic_plugin_validation_results = {} # Key is plugin's name; Value is plugin's StrategicPluginResults object
self.tactical_plugin_validation_results = {} # Key is plugin's name; Value is plugin's TacticalPluginResults object
self.control_plugin_validation_results = {} # Key is plugin's name; Value is plugin's ControlPluginResults object
# Call member function to populate the 'validation results' dicts
self.populate_results_dicts(self.strategic_plugin_names, self.tactical_plugin_names, self.control_plugin_names)
def populate_results_dicts(self, strategic_plugin_names, tactical_plugin_names, control_plugin_names):
"""Initialize the the 'validation results' lists and 'index by name' dicts for this object"""
# Populate validation results dict for Strategic Plugins
for plugin_name in strategic_plugin_names:
self.strategic_plugin_validation_results[plugin_name] = guidance_plugin_components.StrategicPluginResults(plugin_name)
# Populate validation results dict for Tactical Plugins
for plugin_name in tactical_plugin_names:
self.tactical_plugin_validation_results[plugin_name] = guidance_plugin_components.TacticalPluginResults(plugin_name)
# Populate validation results dict for Control Plugins
for plugin_name in control_plugin_names:
self.control_plugin_validation_results[plugin_name] = guidance_plugin_components.ControlPluginResults(plugin_name)
return
def spin(self):
"""
Function to ensure node spins at configured spin rate.
"""
while not rospy.is_shutdown():
if self.has_startup_completed:
# Conduct node validation if it has not yet occurred
if not self.has_node_validation_completed:
self.conduct_node_validation()
self.has_node_validation_completed = True
# If time has surpassed the configured validation duration, the current results are considered final. Write to log file.
seconds_since_startup_completed = rospy.get_time() - self.start_time_seconds
if (seconds_since_startup_completed >= self.validation_duration):
if not self.has_logged_final_results:
self.log_final_results_for_each_plugin()
self.has_logged_final_results = True
self.spin_rate.sleep()
return
def log_final_results_for_each_plugin(self):
"""
Calls appropriate function for each plugin's 'results' object in order to write
all final validation results to the log file for this node.
"""
rospy.loginfo("**********************************************************")
rospy.loginfo("******Final Validation Results for Strategic Plugins******")
rospy.loginfo("**********************************************************")
# Write final validation results to log file for Guidance Strategic Plugins
for plugin_name, plugin_results_object in self.strategic_plugin_validation_results.items():
plugin_results_object.write_strategic_final_results_to_logs()
rospy.loginfo("**********************************************************")
rospy.loginfo("******Final Validation Results for Tactical Plugins*******")
rospy.loginfo("**********************************************************")
# Write final validation results to log file for Guidance Tactical Plugins
for plugin_name, plugin_results_object in self.tactical_plugin_validation_results.items():
plugin_results_object.write_tactical_final_results_to_logs()
rospy.loginfo("**********************************************************")
rospy.loginfo("*******Final Validation Results for Control Plugins*******")
rospy.loginfo("**********************************************************")
# Write final validation results to log file for Guidance Control Plugins
for plugin_name, plugin_results_object in self.control_plugin_validation_results.items():
plugin_results_object.write_control_final_results_to_logs()
rospy.loginfo("**********************************************************")
rospy.loginfo("*******End of Final Validation Results for Plugins********")
rospy.loginfo("**********************************************************")
return
def system_alert_cb(self, msg):
"""
Callback function for the system_alert topic. The Guidance Plugin Validator Node will begin conducting
validation checks on each plugin's node after a 'DRIVERS_READY' alert has been received.
"""
# Startup has completed when drivers are ready
if msg.type == SystemAlert.DRIVERS_READY:
rospy.loginfo("DRIVERS_READY message received. Beginning node validation.")
self.start_time_seconds = rospy.get_time()
self.has_startup_completed = True
return
def plugin_discovery_cb(self, msg):
"""
Callback function for the plugin_discovery topic. Processes the first received message for each guidance
plugin (as specified by this node's configuration parameters), and updates the plugin's 'results'
object accordingly.
"""
# Get the name of this plugin based on the message
plugin_name = msg.name
# Validate the plugin_discovery message based on the plugin's type (Strategic, Tactical, or Control)
if plugin_name in self.strategic_plugin_names:
# Do not process message if this plugin has already had its plugin_discovery message validated
# Note: Assumption is that once one message is received and processed for a plugin, the rest will be identical
if self.strategic_plugin_validation_results[plugin_name].has_had_plugin_discovery_message_validated:
return
# Process the message and log appropriate messages
rospy.loginfo("Processing plugin_discovery message for " + str(plugin_name) + " (Strategic Plugin)")
self.strategic_plugin_validation_results[plugin_name].has_had_plugin_discovery_message_validated = True
expected_capability = self.strategic_plugin_validation_results[plugin_name].requirement_results.correct_plugin_discovery_capability
if msg.capability == expected_capability:
self.strategic_plugin_validation_results[plugin_name].requirement_results.has_correct_plugin_discovery_capability = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery capability " + str(expected_capability))
else:
rospy.logerr("ERROR: " + str(plugin_name) + " plugin_discovery capability == " + str(msg.capability) + " (expected + " + str(expected_capability) + ")")
expected_type = self.strategic_plugin_validation_results[plugin_name].requirement_results.correct_plugin_discovery_type
if msg.type == expected_type:
self.strategic_plugin_validation_results[plugin_name].requirement_results.has_correct_plugin_discovery_type = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery type " + str(expected_type))
else:
rospy.logerr("ERROR: " + str(plugin_name) + " plugin_discovery type == " + str(msg.type) + " (expected + " + str(expected_type) + ")")
if msg.available == True:
self.strategic_plugin_validation_results[plugin_name].optional_results.has_correct_plugin_discovery_available = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery availabile == True")
else:
rospy.logwarn("WARNING: " + str(plugin_name) + " plugin_discovery available == " + str(msg.available) + " (expected True)")
if msg.activated == True:
self.strategic_plugin_validation_results[plugin_name].optional_results.has_correct_plugin_discovery_activated = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery activated == True")
else:
rospy.logwarn("WARNING: " + str(plugin_name) + " plugin_discovery activated == " + str(msg.activated) + " (expected True)")
elif plugin_name in self.tactical_plugin_names:
# Do not process message if this plugin has already had its plugin_discovery message validated
# Note: Assumption is that once one message is received and processed for a plugin, the rest will be identical
if self.tactical_plugin_validation_results[plugin_name].has_had_plugin_discovery_message_validated:
return
# Process the message and log appropriate messages
rospy.loginfo("Processing plugin_discovery message for " + str(plugin_name) + " (Tactical Plugin)")
self.tactical_plugin_validation_results[plugin_name].has_had_plugin_discovery_message_validated = True
expected_capability = self.tactical_plugin_validation_results[plugin_name].requirement_results.correct_plugin_discovery_capability
if msg.capability == expected_capability:
self.tactical_plugin_validation_results[plugin_name].requirement_results.has_correct_plugin_discovery_capability = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery capability " + str(expected_capability))
else:
rospy.logerr("ERROR: " + str(plugin_name) + " plugin_discovery capability == " + str(msg.capability) + " (expected + " + str(expected_capability) + ")")
expected_type = self.tactical_plugin_validation_results[plugin_name].requirement_results.correct_plugin_discovery_type
if msg.type == expected_type:
self.tactical_plugin_validation_results[plugin_name].requirement_results.has_correct_plugin_discovery_type = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery type " + str(expected_type))
else:
rospy.logerr("ERROR: " + str(plugin_name) + " plugin_discovery type == " + str(msg.type) + " (expected + " + str(expected_type) + ")")
if msg.available == True:
self.tactical_plugin_validation_results[plugin_name].optional_results.has_correct_plugin_discovery_available = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery availabile == True")
else:
rospy.logwarn("WARNING: " + str(plugin_name) + " plugin_discovery availability == " + str(msg.available) + " (expected True)")
if msg.activated == True:
self.tactical_plugin_validation_results[plugin_name].optional_results.has_correct_plugin_discovery_activated = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery activated == True")
else:
rospy.logwarn("WARNING: " + str(plugin_name) + " plugin_discovery activated == " + str(msg.activated) + " (expected True)")
elif plugin_name in self.control_plugin_names:
# Do not process message if this plugin has already had its plugin_discovery message validated
# Note: Assumption is that once one message is received and processed for a plugin, the rest will be identical
if self.control_plugin_validation_results[plugin_name].has_had_plugin_discovery_message_validated:
return
# Process the message and log appropriate messages
rospy.loginfo("Processing plugin_discovery message for " + str(plugin_name) + " (Control Plugin)")
self.control_plugin_validation_results[plugin_name].has_had_plugin_discovery_message_validated = True
expected_capability = self.control_plugin_validation_results[plugin_name].requirement_results.correct_plugin_discovery_capability
if msg.capability == expected_capability:
self.control_plugin_validation_results[plugin_name].requirement_results.has_correct_plugin_discovery_capability = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery capability " + str(expected_capability))
else:
rospy.logerr("ERROR: " + str(plugin_name) + " plugin_discovery capability == " + str(msg.capability) + " (expected + " + str(expected_capability) + ")")
expected_type = self.control_plugin_validation_results[plugin_name].requirement_results.correct_plugin_discovery_type
if msg.type == expected_type:
self.control_plugin_validation_results[plugin_name].requirement_results.has_correct_plugin_discovery_type = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery type " + str(expected_type))
else:
rospy.logerr("ERROR: " + str(plugin_name) + " plugin_discovery type == " + str(msg.type) + " (expected + " + str(expected_type) + ")")
if msg.available == True:
self.control_plugin_validation_results[plugin_name].optional_results.has_correct_plugin_discovery_available = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery availabile == True")
else:
rospy.logwarn("WARNING: " + str(plugin_name) + " plugin_discovery availability == " + str(msg.available) + " (expected True)")
if msg.activated == True:
self.control_plugin_validation_results[plugin_name].optional_results.has_correct_plugin_discovery_activated = True
rospy.loginfo("Success: " + str(plugin_name) + " has plugin_discovery activated == True")
else:
rospy.logwarn("WARNING: " + str(plugin_name) + " plugin_discovery activated == " + str(msg.activated) + " (expected True)")
return
def conduct_node_validation(self):
"""
Call appropriate member functions to conduct validation of node communication interfaces.
"""
rospy.loginfo("Beginning validation checks for node subscriptions, publications, and advertised services")
self.validate_strategic_plugins()
self.validate_tactical_plugins()
self.validate_control_plugins()
rospy.loginfo("Completed validation checks for node subscriptions, publications, and advertised services")
return
def validate_strategic_plugins(self):
"""
Conduct validation checks for each strategic plugin's node (as specified by this node's
configuration parameters) for proper publications, subscriptions, and advertised services. Based on the
results, this function updates each strategic plugin's StrategicPluginResults object accordingly.
"""
for plugin_name, plugin_results_object in self.strategic_plugin_validation_results.items():
plugin_node_name = plugin_results_object.node_name
rospy.loginfo("Processing publishers, subscribers, and services for " + str(plugin_name) + " (Strategic Plugin)")
# Check whether the node has been created
if rosnode.rosnode_ping(plugin_node_name, max_count = 5):
plugin_results_object.requirement_results.has_node = True
rospy.loginfo("Success: Node " + str(plugin_node_name) + " exists.")
else:
rospy.logerr("ERROR: No node response for " + str(plugin_node_name) + ". Node does not exist.")
# Obtain string that includes information regarding a node's publications, subscriptions, and services
rosnode_info_string = (rosnode.get_node_info_description(plugin_node_name))
# Get substring from rosnode info that contains 'Subscriptions' information
sub_index_start = rosnode_info_string.index("Subscriptions:")
sub_index_end = rosnode_info_string.index("Services:")
subscriptions_string = rosnode_info_string[sub_index_start:sub_index_end]
# Check for required and optional subscriptions
if plugin_results_object.optional_results.current_pose_topic in subscriptions_string:
plugin_results_object.optional_results.has_current_pose_sub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " subscribes to " + str(plugin_results_object.optional_results.current_pose_topic))
else:
rospy.logwarn("WARNING: " + str(plugin_node_name) + " does not subscribe to " + str(plugin_results_object.optional_results.current_pose_topic))
if plugin_results_object.optional_results.current_speed_topic in subscriptions_string:
plugin_results_object.optional_results.has_current_speed_sub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " subscribes to " + str(plugin_results_object.optional_results.current_speed_topic))
else:
rospy.logwarn("WARNING: " + str(plugin_node_name) + " does not subscribe to " + str(plugin_results_object.optional_results.current_speed_topic))
# Get substring from rosnode info that contains 'Publications' information
pub_index_start = rosnode_info_string.index("Publications:")
pub_index_end = rosnode_info_string.index("Subscriptions:")
publications_string = rosnode_info_string[pub_index_start:pub_index_end]
# Check for required and optional publications
if plugin_results_object.requirement_results.plugin_discovery_topic in publications_string:
plugin_results_object.requirement_results.has_plugin_discovery_pub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " publishes to " + str(plugin_results_object.requirement_results.plugin_discovery_topic))
else:
rospy.logerr("ERROR: " + str(plugin_node_name) + " does not publish to " + str(plugin_results_object.requirement_results.plugin_discovery_topic))
# Get substring from rosnode info that contains 'Services' information
serv_index_start = rosnode_info_string.index("Services:")
services_string = rosnode_info_string[serv_index_start:]
# Check for required and optional servers
if plugin_results_object.requirement_results.plan_maneuvers_service in services_string:
plugin_results_object.requirement_results.has_plan_maneuvers_service = True
rospy.loginfo("Success: " + str(plugin_node_name) + " advertises service " + str(plugin_results_object.requirement_results.plan_maneuvers_service))
else:
rospy.logerr("ERROR: " + str(plugin_node_name) + " does not advertise service " + str(plugin_results_object.requirement_results.plan_maneuvers_service))
return
def validate_tactical_plugins(self):
"""
Conduct validation checks for each tactical plugin's node (as specified by this node's
configuration parameters) for proper publications, subscriptions, and advertised services. Based on the
results, this function updates each tactical plugin's TacticalPluginResults object accordingly.
"""
for plugin_name, plugin_results_object in self.tactical_plugin_validation_results.items():
plugin_node_name = plugin_results_object.node_name
rospy.loginfo("Processing publishers, subscribers, and services for " + str(plugin_name) + " (Tactical Plugin)")
# Check whether the node has been created
if rosnode.rosnode_ping(plugin_node_name, max_count = 5):
plugin_results_object.requirement_results.has_node = True
rospy.loginfo("Success: Node " + str(plugin_node_name) + " exists.")
else:
rospy.logerr("ERROR: No node response for " + str(plugin_node_name) + ". Node does not exist.")
# Obtain string that includes information regarding a node's publications, subscriptions, and services
rosnode_info_string = (rosnode.get_node_info_description(plugin_node_name))
# Get substring from rosnode info that contains 'Subscriptions' information
sub_index_start = rosnode_info_string.index("Subscriptions:")
sub_index_end = rosnode_info_string.index("Services:")
subscriptions_string = rosnode_info_string[sub_index_start:sub_index_end]
# Check for required and optional subscriptions
if plugin_results_object.optional_results.current_pose_topic in subscriptions_string:
plugin_results_object.optional_results.has_current_pose_sub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " subscribes to " + str(plugin_results_object.optional_results.current_pose_topic))
else:
rospy.logwarn("WARNING: " + str(plugin_node_name) + " does not subscribe to " + str(plugin_results_object.optional_results.current_pose_topic))
if plugin_results_object.optional_results.current_speed_topic in subscriptions_string:
plugin_results_object.optional_results.has_current_speed_sub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " subscribes to " + str(plugin_results_object.optional_results.current_speed_topic))
else:
rospy.logwarn("WARNING: " + str(plugin_node_name) + " does not subscribe to " + str(plugin_results_object.optional_results.current_speed_topic))
# Get substring from rosnode info that contains 'Publications' information
pub_index_start = rosnode_info_string.index("Publications:")
pub_index_end = rosnode_info_string.index("Subscriptions:")
publications_string = rosnode_info_string[pub_index_start:pub_index_end]
# Check for required and optional publications
if plugin_results_object.requirement_results.plugin_discovery_topic in publications_string:
plugin_results_object.requirement_results.has_plugin_discovery_pub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " publishes to " + str(plugin_results_object.requirement_results.plugin_discovery_topic))
else:
rospy.logerr("ERROR: " + str(plugin_node_name) + " does not publish to " + str(plugin_results_object.requirement_results.plugin_discovery_topic))
# Get substring from rosnode info that contains 'Services' information
serv_index_start = rosnode_info_string.index("Services:")
services_string = rosnode_info_string[serv_index_start:]
# Check for required and optional servers
if plugin_results_object.requirement_results.plan_trajectory_service in services_string:
plugin_results_object.requirement_results.has_plan_trajectory_service = True
rospy.loginfo("Success: " + str(plugin_node_name) + " advertises service " + str(plugin_results_object.requirement_results.plan_trajectory_service))
else:
rospy.logerr("ERROR: " + str(plugin_node_name) + " does not advertise service " + str(plugin_results_object.requirement_results.plan_trajectory_service))
return
def validate_control_plugins(self):
"""
Conduct validation checks for each control plugin's node (as specified by this node's
configuration parameters) for proper publications, subscriptions, and advertised services. Based on the
results, this function updates each control plugin's ControlPluginResults object accordingly.
"""
for plugin_name, plugin_results_object in self.control_plugin_validation_results.items():
plugin_node_name = plugin_results_object.node_name
rospy.loginfo("Processing publishers, subscribers, and services for " + str(plugin_name) + " (Control Plugin)")
# Check whether the node has been created
if rosnode.rosnode_ping(plugin_node_name, max_count = 5):
plugin_results_object.requirement_results.has_node = True
rospy.loginfo("Success: Node " + str(plugin_node_name) + " exists.")
else:
rospy.logerr("ERROR: No node response for " + str(plugin_node_name) + ". Node does not exist.")
# Obtain string that includes information regarding a node's publications, subscriptions, and services
rosnode_info_string = (rosnode.get_node_info_description(plugin_node_name))
# Get substring from rosnode info that contains 'Subscriptions' information
sub_index_start = rosnode_info_string.index("Subscriptions:")
sub_index_end = rosnode_info_string.index("Services:")
subscriptions_string = rosnode_info_string[sub_index_start:sub_index_end]
# Check for required and optional subscriptions
if plugin_results_object.requirement_results.plan_trajectory_topic in subscriptions_string:
plugin_results_object.requirement_results.has_plan_trajectory_sub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " subscribes to " + str(plugin_results_object.requirement_results.plan_trajectory_topic))
else:
rospy.logerr("ERROR: " + str(plugin_node_name) + " does not subscribe to " + str(plugin_results_object.requirement_results.plan_trajectory_topic))
# Get substring from rosnode info that contains 'Publications' information
pub_index_start = rosnode_info_string.index("Publications:")
pub_index_end = rosnode_info_string.index("Subscriptions:")
publications_string = rosnode_info_string[pub_index_start:pub_index_end]
# Check for required and optional publications
if plugin_results_object.requirement_results.plugin_discovery_topic in publications_string:
plugin_results_object.requirement_results.has_plugin_discovery_pub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " publishes to " + str(plugin_results_object.requirement_results.plugin_discovery_topic))
else:
rospy.logerr("ERROR: " + str(plugin_node_name) + " does not publish to " + str(plugin_results_object.requirement_results.plugin_discovery_topic))
if plugin_results_object.requirement_results.final_waypoints_topic in publications_string:
plugin_results_object.requirement_results.has_final_waypoints_pub = True
rospy.loginfo("Success: " + str(plugin_node_name) + " publishes to " + str(plugin_results_object.requirement_results.final_waypoints_topic))
else:
rospy.logerr("ERROR: " + str(plugin_node_name) + " does not publish to " + str(plugin_results_object.requirement_results.final_waypoints_topic))
return | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
1635,
15069,
357,
34,
8,
33448,
12509,
2389,
2640,
13,
198,
1635,
198,
1635,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
... | 2.650324 | 11,422 |
import struct
with open("fluxtime.dat","rb") as f:
bin_data = f.read()
num_entries = len(bin_data)/4
data_unpak = struct.unpack('{0}f'.format(num),bin_data)
RA_set = data_unpack[0::7]
DEC_set = data_unpack[1::7]
AST_set = data_unpack[2::7]
I_set = data_unpack[3::7]
Q_set = data_unpack[4::7]
U_set = data_unpack[5::7]
V_set = data_unpack[6::7]
num_point = len(RA_set)/num_channels
| [
11748,
2878,
198,
198,
4480,
1280,
7203,
35522,
742,
524,
13,
19608,
2430,
26145,
4943,
355,
277,
25,
198,
220,
220,
220,
9874,
62,
7890,
796,
277,
13,
961,
3419,
198,
198,
22510,
62,
298,
1678,
796,
18896,
7,
8800,
62,
7890,
20679,... | 2.113514 | 185 |
# Generated by Django 3.0.3 on 2020-02-12 00:37
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
18,
319,
12131,
12,
2999,
12,
1065,
3571,
25,
2718,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
"""
ukcp18_ls1.py
=============
Helper functions for working with UKCP18 Land Strand 1.
"""
# Standard library imports
import os
# Third-party imports
import numpy as np
import numpy.random as npr
import numpy.ma
from numpy.ma.core import MaskedArray
import cPickle
BASEDIR = 'ukcp-test-inputs/inputs'
prob_data_map = {'sample': 'samp',
'percentile': 'prob'}
def load_coord_var(prob_data_type):
"""
Loads a coordinate variable from the source data and returns it.
:param prob_data_type:
:return:
"""
fpath = "{}/source_others/a1b_tas_jja_EAW_1961-1990.dat".format(BASEDIR)
with open(fpath, 'rb') as reader:
data = cPickle.load(reader)
key = prob_data_map[prob_data_type]
if key == 'prob':
return np.array((data[key] * 100), np.float)
else:
return np.array(data[key], np.int32)
def load_samples():
"""
Load the values of the 'sample' coordinate variable.
:return: numpy array
"""
return load_coord_var('sample')[:]
def load_percentiles():
"""
Load the values of the 'percentile' coordinate variable.
:return: numpy array
"""
return load_coord_var('percentile')
def _get_ls1_prob_site_data(var_id, year, scenario="a1b",
prob_data_type="sample",
temp_avg_type="mon"):
"""
Extract and return example probabilistic data and a numpy array.
:param var_id: variable id [string]
:param year: year [int]
:param scenario: scenario [string]
:param prob_data_type: probability data type [string]
:param grid_res: grid resolution [string]
:param temp_avg_type: temporal average type (frequency) [string]
:return: numpy array.
"""
# Structure the output data based on the temporal average type
if temp_avg_type == "mon":
mults = [1, 0.95, 0.8, 0.6, 0.5, 0.3, 0.1, 0, 0.1, 0.3, 0.7, 0.9]
elif temp_avg_type == "seas":
mults = [1, 0.5, 0, 0.6]
elif temp_avg_type == "ann":
mults = [0.5]
else:
raise Exception("Temporal average type must be one of: mon, seas, ann.")
data = {}
# Remove "Anom" from var_id - might not be there
var_id = var_id.replace("Anom", "")
for temporal_average in ("djf", "jja"):
fname = "a1b_{var_id}_{temporal_average}_EAW_1961-1990.dat".format(
scenario=scenario, var_id=var_id, temporal_average=temporal_average)
fpath = os.path.join(BASEDIR, "source_others", fname)
print "Reading data from: {0}".format(fpath)
with open(fpath, 'rb') as reader:
data[temporal_average] = cPickle.load(reader)
if year < 1975:
# Set year to start year available in example data
year = 1975
year_index = [int(y) for y in data['jja']['time']].index(year)
prob_data_key = prob_data_map[prob_data_type] + "data"
prob_data_djf = data['djf'][prob_data_key][:, year_index]
prob_data_jja = data['jja'][prob_data_key][:, year_index]
prob_data_over_times = [(prob_data_djf * mult) + (prob_data_jja * (1 - mult)) for mult in mults]
return np.array(prob_data_over_times)
def modify_ls1_array(variable, date_times, **facets):
"""
Modify the array provided based on example input data.
:param variable:
:param time_step:
:param facets:
:return: Tuple of: (new_array, dimensions_list)
"""
var_id = facets["var_id"]
scenario = facets["scenario"]
prob_data_type = facets["prob_data_type"]
temp_avg_type = facets["frequency"]
year = date_times[0].year
eg_data = _get_ls1_prob_site_data(var_id, year, scenario=scenario,
prob_data_type=prob_data_type,
temp_avg_type=temp_avg_type)
array = variable[:]
spatial_dims = list(array.shape[1:])
# Now broadcast the array to new fourth dimension
len_prob_dim = eg_data.shape[1]
new_shape = list(array.shape) + [len_prob_dim]
if isinstance(array, MaskedArray):
print("ARRAY IS MASKED")
mask = array.mask
new_array = numpy.ma.resize(array, new_shape)
else:
new_array = numpy.resize(array, new_shape)
mask = None
dims_list = tuple(list(variable.dimensions) + [prob_data_type])
if 0: # For DEBUGGING ONLY
new_array = np.zeros(new_shape)
return new_array, dims_list
print "Building the new array..."
for t_index, values in enumerate(eg_data):
for y_index in range(spatial_dims[0]):
mult = (spatial_dims[0] + 0.5) / spatial_dims[0]
# Work out shape of random array to be broadcasted
# Will either be (len_x, len_prob_dim) or just (len_prob_dim)
sub_shape = [len_prob_dim]
if len(spatial_dims) > 1:
sub_shape = spatial_dims[1:] + sub_shape
random_array = _get_broadcasted_random_array(sub_shape)
incremented_values = mult * random_array * values
if not var_id.startswith('tas'):
values = incremented_values
new_array[t_index][y_index] = incremented_values
print
# Do some checking of the mask
if isinstance(new_array, MaskedArray):
print("Broadcasting and re-applying MASK")
print(mask.shape)
# Broadcast `mask` along the 3rd dimension to make it the same shape as `new_array`
_, big_mask = np.broadcast_arrays(new_array, mask[..., None])
new_array.mask = big_mask
nam = new_array.mask
for i in range(len(values)):
assert numpy.array_equal(nam[0,:,:,0], nam[0,:,:,i])
return new_array, dims_list
def _get_broadcasted_random_array(shape):
"""
Broadcast array randomly to new shape.
:param shape:
:return:
"""
arr = npr.random(shape) / 10. + 1
return arr
if __name__ == "__main__":
for var_id in ('tas', 'pr'):
array = _get_ls1_prob_site_data(var_id, 2000, scenario="a1b",
prob_data_type="percentile",
temp_avg_type="mon")
print array.shape
for y in range(array.shape[0]):
for x in range(array.shape[1]):
if numpy.ma.is_masked(array[y, x]): print array[y, x] | [
37811,
201,
198,
2724,
13155,
1507,
62,
7278,
16,
13,
9078,
201,
198,
25609,
28,
201,
198,
201,
198,
47429,
5499,
329,
1762,
351,
3482,
8697,
1507,
6379,
4285,
392,
352,
13,
201,
198,
37811,
201,
198,
201,
198,
2,
8997,
5888,
17944,... | 2.106129 | 3,100 |
import abjad
from abjadext import rmakers
I_rhythms = {
"matA": rmakers.stack(
rmakers.talea([1, 1, 1, 1], 8, extra_counts=[0, 1]),
rmakers.extract_trivial()
),
"rests": rmakers.stack(
rmakers.note(),
rmakers.force_rest(selector)
)
}
II_rhythms = {
"matA": rmakers.stack(
rmakers.talea([1, 1, 1, 1], 8, extra_counts=[1, 0]),
rmakers.extract_trivial()
),
"rests": rmakers.stack(
rmakers.note(),
rmakers.force_rest(selector)
)
}
III_rhythms = {
"matA": rmakers.stack(
rmakers.talea([1, 1], 8, extra_counts=[1]),
rmakers.extract_trivial()
),
"rests": rmakers.stack(
rmakers.note(),
rmakers.force_rest(selector)
)
}
# gtrI_rhythm_01 = gtrI_stack_01(gtrI_durations_01)
| [
11748,
450,
38442,
198,
6738,
450,
73,
671,
742,
1330,
374,
6620,
198,
198,
40,
62,
17179,
5272,
907,
796,
1391,
198,
220,
220,
220,
366,
6759,
32,
1298,
374,
6620,
13,
25558,
7,
198,
220,
220,
220,
374,
6620,
13,
29429,
64,
26933... | 1.982323 | 396 |
# File: kafka_parser.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
from datetime import datetime
time_format = '%Y-%m-%d %H:%M:%S'
| [
2,
9220,
25,
479,
1878,
4914,
62,
48610,
13,
9078,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
12,
1238,
1828,
13341,
2954,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
153... | 3.40404 | 198 |
__version__ = "0.1.0"
from .exceptions import ActyMathError
from .calc import Calc
| [
834,
9641,
834,
796,
366,
15,
13,
16,
13,
15,
1,
198,
198,
6738,
764,
1069,
11755,
1330,
2191,
88,
37372,
12331,
198,
6738,
764,
9948,
66,
1330,
2199,
66,
198
] | 2.709677 | 31 |
# Generated by Django 2.0.4 on 2018-07-20 02:04
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
19,
319,
2864,
12,
2998,
12,
1238,
7816,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import numpy as np
from .. import ArrayStringReader
def test_arraystringreader():
"""here is my test code
https://docs.pytest.org/en/stable/getting-started.html#create-your-first-test
"""
size = 8
sample_array = np.random.rand(size).astype('float32')
text = ','.join([str(x) for x in sample_array])
reader = ArrayStringReader()
crafted_doc = reader.craft(text, 0)
assert crafted_doc['blob'].shape[0] == size
np.testing.assert_array_equal(crafted_doc['blob'], sample_array)
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
1330,
15690,
10100,
33634,
628,
198,
4299,
1332,
62,
18747,
8841,
46862,
33529,
198,
220,
220,
220,
37227,
1456,
318,
616,
1332,
2438,
628,
220,
220,
220,
3740,
1378,
31628,
13,
9078... | 2.731579 | 190 |
import base64
from typing import Optional, Tuple
import boto3
import click
from botocore.config import Config
from docker import from_env
from opta.amplitude import amplitude_client
from opta.core.gcp import GCP
from opta.core.generator import gen_all
from opta.core.terraform import get_terraform_outputs
from opta.exceptions import UserErrors
from opta.layer import Layer
from opta.nice_subprocess import nice_run
from opta.utils import check_opta_file_exists, fmt_msg, is_tool, yaml
# Check if the config file is for a service or environment opta layer.
# Some commands (like push/deploy) are meant only for service layers.
#
# If the config file has the "environments" field, then it is a child/service layer.
@click.command(hidden=True)
@click.argument("image")
@click.option("-c", "--config", default="opta.yml", help="Opta config file.")
@click.option(
"-e", "--env", default=None, help="The env to use when loading the config file."
)
@click.option(
"--tag",
default=None,
help="The image tag associated with your docker container. Defaults to your local image tag.",
)
| [
11748,
2779,
2414,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
198,
198,
11748,
275,
2069,
18,
198,
11748,
3904,
198,
6738,
10214,
420,
382,
13,
11250,
1330,
17056,
198,
6738,
36253,
1330,
422,
62,
24330,
198,
198,
6738,
2172,
64,
... | 3.193084 | 347 |
# Preprocess images
from scipy import misc
from scipy.ndimage.measurements import center_of_mass
import numpy as np
import os
from os.path import isfile, isdir
if __name__ == '__main__':
PARENT_DIR = 'resources/New'
progbar = ProgBar()
recurse(progbar, PARENT_DIR) | [
2,
3771,
14681,
4263,
198,
198,
6738,
629,
541,
88,
1330,
12747,
198,
6738,
629,
541,
88,
13,
358,
9060,
13,
1326,
5015,
902,
1330,
3641,
62,
1659,
62,
22208,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
28686,
... | 2.699029 | 103 |
# -*- coding: utf-8 -*-
x = jQuery(".item")
x.on("click", lambda e: e.preventDefault())
fooController["$inject"] = ["$scope", "$rootScope", "config"]
tttt = xxx = 222
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
87,
796,
37420,
7,
1911,
9186,
4943,
198,
87,
13,
261,
7203,
12976,
1600,
37456,
304,
25,
304,
13,
3866,
1151,
19463,
28955,
628,
198,
198,
21943,
22130,
14692,
3... | 2.478261 | 69 |
from typing import Callable, Mapping, TypeVar
from unittest.mock import Mock
from lxml.etree import _Element as Element
from pytest import raises
from preacher.core.extraction.analysis import Analyzer
T = TypeVar('T')
| [
6738,
19720,
1330,
4889,
540,
11,
337,
5912,
11,
5994,
19852,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
198,
198,
6738,
300,
19875,
13,
316,
631,
1330,
4808,
20180,
355,
11703,
198,
6738,
12972,
9288,
1330,
12073,
198,
198,
... | 3.46875 | 64 |
from faktotum.research.vendor import ner, lm, utils_ner
| [
6738,
277,
461,
83,
313,
388,
13,
34033,
13,
85,
18738,
1330,
17156,
11,
300,
76,
11,
3384,
4487,
62,
1008,
198
] | 2.545455 | 22 |
import requests
from projects.sachinl0har.chatbots.helper import say, run
if __name__ == "__main__":
run("Acro", acro)
| [
11748,
7007,
198,
198,
6738,
4493,
13,
82,
620,
259,
75,
15,
9869,
13,
17006,
42478,
13,
2978,
525,
1330,
910,
11,
1057,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1057,
7203,
12832,
... | 2.612245 | 49 |
import numpy as np
def sog_func(x, p, exp_const, store_x0, matrix_test, store_c):
"""
Compute Sum of Gaussians function at a given point with given arguments.
Parameters
----------
point : 1-D array with shape (d, )
A point used to evaluate the function.
p : integer
Number of local minima.
sigma_sq: float or integer
Value of sigma squared.
store_x0 : 2-D array with shape (p, d).
matrix_test : 3-D array with shape (p, d, d).
store_c : 1-D array with shape (p, ).
Returns
-------
float(-function_val) : float
Function value.
"""
d = x.shape[0]
f_val = (store_c.reshape(p, 1, 1) @
(np.exp((-1 / (2 * exp_const)) *
np.transpose((x - store_x0).reshape(p, d, 1), (0, 2, 1)) @
matrix_test @ (x-store_x0).reshape(p, d, 1))))
sum_f_val = np.sum(f_val, axis=0)
return float(-sum_f_val)
def sog_grad(x, p, exp_const, store_x0, matrix_test, store_c):
"""
Compute Sum of Gaussians gradient at a given point with given arguments.
Parameters
----------
point : 1-D array with shape (d, )
A point used to evaluate the gradient.
p : integer
Number of local minima.
sigma_sq: float or integer
Value of sigma squared.
store_x0 : 2-D array with shape (p, d).
matrix_test : 3-D array with shape (p, d, d).
store_c : 1-D array with shape (p, ).
Returns
-------
total_gradient : 1-D array with shape (d, )
Gradient at point.
"""
d = x.shape[0]
grad_val_1 = ((store_c.reshape(p, 1, 1) * (1/exp_const))
@ np.exp((-1 / (2 * exp_const)) *
np.transpose((x - store_x0).reshape(p, d, 1), (0, 2, 1)) @
matrix_test @ (x-store_x0).reshape(p, d, 1)))
grad_val_2 = (matrix_test @ (x-store_x0).reshape(p, d, 1))
gradient = grad_val_1 * grad_val_2
sum_g_val = np.sum(gradient, axis=0)
return sum_g_val.reshape(d,) | [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
264,
519,
62,
20786,
7,
87,
11,
279,
11,
1033,
62,
9979,
11,
3650,
62,
87,
15,
11,
17593,
62,
9288,
11,
3650,
62,
66,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3082,
1133... | 2.105749 | 974 |
import tensorflow as tf
import tensorflow.keras.backend as K
from skatingAI.nets.keypoint.KPDetectorBase import KPDetectorBase
layers = tf.keras.layers
BN_MOMENTUM = 0.01
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
13,
6122,
292,
13,
1891,
437,
355,
509,
198,
198,
6738,
33051,
20185,
13,
45938,
13,
2539,
4122,
13,
42,
5760,
316,
9250,
14881,
1330,
509,
5760,
316,
9250,
14881,
1... | 2.651515 | 66 |
__author__ = 'Matt Wilson'
__copyright__ = 'Copyright 2019-2020, Synesis Information Systems, Copyright 2019, Synesis Software'
__credits__ = [
'Garth Lancaster',
'Matt Wilson',
]
__email__ = 'matthew@synesis.com.au'
__license__ = 'BSD-3-Clause'
__maintainer__ = 'Matt Wilson'
__status__ = 'Beta'
__version__ = '0.6.1'
from .conrep import abort, report
from .log import enable_logging, is_logging_enabled, log
from .program_name import *
from .severity import *
from .trace import enable_tracing, is_tracing_enabled, trace
| [
198,
834,
9800,
834,
220,
220,
220,
220,
220,
796,
220,
220,
705,
13448,
8127,
6,
198,
834,
22163,
4766,
834,
220,
220,
796,
220,
220,
705,
15269,
13130,
12,
42334,
11,
1632,
2516,
271,
6188,
11998,
11,
15069,
13130,
11,
1632,
2516,... | 2.458333 | 240 |
import numpy as np
from collections import Counter
| [
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
15034,
628,
628,
628
] | 4.307692 | 13 |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
from v2x_solution.road import models as road_models
# find event
# select event
# update event
# delete event
| [
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
764,
1330,
4981,
11,
11389,
11341,
198,
6738,
410,
17,
87,
62,
82,
2122,
... | 3.411765 | 85 |
# -*- coding: utf-8 -*-
###############################################################################
# Author: Gérald Fenoy, gerald.fenoy@cartoworks.com
# Copyright (c) 2010-2014, Cartoworks Inc.
###############################################################################
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import sys
import warnings
warnings.simplefilter("ignore",DeprecationWarning)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
220,
6434,
25,
220,
220,
402,
2634,
1373,
67,
19426,
726,
11,
308,
12573,
13,
41037,
726,
31,
26674,
322,
3647,
13,
785,
198,
... | 3.984252 | 381 |
import pytest
from pyannote.core import Segment
| [
11748,
12972,
9288,
198,
198,
6738,
12972,
1236,
1258,
13,
7295,
1330,
1001,
5154,
198
] | 3.266667 | 15 |
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
628
] | 2.864865 | 37 |
from import_export.admin import ImportExportModelAdmin
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import *
admin.site.register(CustomUser, CustomUserAdmin)
admin.site.register(PDFForm)
admin.site.register(Field,FieldAdmin)
admin.site.register(PDFFormField,PDFFormFieldAdmin)
admin.site.register(UserProfile,UserProfileAdmin)
admin.site.register(GeneratedPDF)
| [
6738,
1330,
62,
39344,
13,
28482,
1330,
17267,
43834,
17633,
46787,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
364... | 3.283951 | 162 |
import argparse
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import math
import operator
import re
def make_windows(length, window, slide):
"""
For a given length, return an iterator for intervals of length `window` with
a slide of `slide`.
>>> list(make_windows(8, 4, 0))
[(0, 4), (4, 8)]
>>> list(make_windows(8, 5, 0))
[(0, 5), (5, 8)]
>>> list(make_windows(8, 8, 0))
[(0, 8)]
>>> list(make_windows(8, 4, 2))
[(0, 4), (2, 6), (4, 8)]
>>> list(make_windows(8, 5, 2))
[(0, 5), (2, 7), (4, 8)]
>>> list(make_windows(7, 8, 0))
[(0, 7)]
"""
if slide == 0:
windows = xrange(0, length, window)
else:
windows = xrange(0, length, slide)
for start in windows:
yield (start, min(start + window, length))
# At most, only output one window at the end of the sequence.
if length <= start + window:
break
def fragment_sequence(sequence, window, slide=0):
"""Fragment a given sequence to the requested window length without a slide.
>>> fragment_sequence("ACTGACTG", 4, 0)
['ACTG', 'ACTG']
>>> fragment_sequence("ACTGACTG", 5, 0)
['ACTGA', 'CTG']
>>> fragment_sequence("ACTGACTG", 8, 0)
['ACTGACTG']
Fragment a given sequence to the requested window length with a slide.
>>> fragment_sequence("ACTGACTG", 4, 2)
['ACTG', 'TGAC', 'ACTG']
>>> fragment_sequence("ACTGACTG", 5, 2)
['ACTGA', 'TGACT', 'ACTG']
Remove gap bases from input sequence and return the longest non-gap
fragment. Don't return any sequence if the entire input is gap bases.
>>> fragment_sequence("NNNNNNNN", 4, 2)
[]
>>> fragment_sequence("ACTGNNNN", 4, 2)
['ACTG']
>>> fragment_sequence("ACTGNNTA", 4, 2)
['ACTG']
>>> fragment_sequence("ACNNACTA", 4, 2)
['ACTA']
"""
# Check sequence for gap bases and keep the longest of the non-gap pieces in
# the sequence.
sequences = []
sequence_pieces = [(piece, len(piece)) for piece in re.split("N+", sequence) if len(piece) > 0]
if len(sequence_pieces) > 0:
sorted_sequence_pieces = sorted(sequence_pieces, key=operator.itemgetter(1), reverse=True)
sequence = sorted_sequence_pieces[0][0]
sequence_length = sorted_sequence_pieces[0][1]
if sequence_length > window:
# Split sequence into two or more reads of the given length.
window_ranges = make_windows(sequence_length, window, slide)
sequences = [sequence[start:end] for start, end in window_ranges]
else:
# Output the sequence as is.
sequences = [sequence]
return sequences
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", help="FASTA sequences to fragment")
parser.add_argument("output", help="fragmented FASTA sequences")
parser.add_argument("window", type=int, help="length to fragment each FASTA sequence to")
parser.add_argument("--slide", type=int, default=0, help="length to fragment each FASTA sequence to")
args = parser.parse_args()
with open(args.output, "w") as oh:
for seq_record in SeqIO.parse(args.input, "fasta"):
sequences = fragment_sequence(str(seq_record.seq), args.window, args.slide)
records = []
for i in xrange(len(sequences)):
records.append(SeqRecord(Seq(sequences[i]), id="%s_%i" % (seq_record.id, i), description=""))
SeqIO.write(records, oh, "fasta")
| [
11748,
1822,
29572,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
6738,
16024,
13,
4653,
80,
1330,
1001,
80,
198,
6738,
16024,
13,
4653,
80,
23739,
1330,
1001,
80,
23739,
198,
11748,
10688,
198,
11748,
10088,
198,
11748,
302,
628,
198,
... | 2.521524 | 1,417 |
"""
This part communicates with the database
Author: Max Marshall
Project: Fridge Tracker
"""
import datetime
import math
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import readings as read
import light_sensor as ls
import mario
cred = None
try:
cred = credentials.Certificate("/home/pi/Smart_Fridge/smartfridge-28fdd-firebase-adminsdk-cn2d2-a24a5cb16c.json")
except FileNotFoundError:
pass
try:
cred = credentials.Certificate("D:\\Python Scripts\\GitHub\\Smart_Fridge\\"
"smartfridge-28fdd-firebase-adminsdk-cn2d2-a24a5cb16c.json")
except FileNotFoundError:
pass
try:
cred = credentials.Certificate("C:\\Users\\maxtm\\Desktop\\Python Projects\\GitHub\\Smart_Fridge\\"
"smartfridge-28fdd-firebase-adminsdk-cn2d2-a24a5cb16c.json")
except FileNotFoundError:
pass
firebase_admin.initialize_app(cred)
db = firestore.client()
# The main chunk of the program
door_alarm = 0
power_alarm = 0
time_since_alarm = 1000
time_since_feedback = 9600
# Guard
if __name__ == '__main__':
print("Starting...\n")
while True:
door_alarm, power_alarm, temps, time1 = tick_forward(door_alarm, power_alarm)
update_firebase("inventory", temps, time1)
if time_since_feedback >= 9600:
time_since_feedback = 0
grave_items = []
data_ref = db.collection(u'{}'.format("graveyard"))
docs = data_ref.stream()
for doc in docs:
grave_items.append((doc.id, doc.to_dict()))
for item in grave_items:
exp_date = float(item[1]['expDate'])
add_date = float(item[1]['addDate'])
if (exp_date - add_date) < 100000:
info = dict()
info['name'] = item[1]['name']
info['feedback'] = "You seem to be wasting food. Try buying fewer/less {}(s)".format(item[1]['name'])
f_data_ref = db.collection(u'{}'.format('feedback')).document(u'{}'.format(item[0]))
f_data_ref.set(info, merge=True)
| [
37811,
201,
198,
1212,
636,
48556,
351,
262,
6831,
201,
198,
201,
198,
13838,
25,
5436,
13606,
201,
198,
16775,
25,
1305,
3130,
26885,
201,
198,
37811,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
10688,
201,
198,
11748,
2046,
8692,
... | 2.092697 | 1,068 |
import os,json,sys,time,shutil
from datetime import datetime
if len(sys.argv)<2:
print("nombre d'argument incorrect")
print("utiliser: ")
print(" python globalPipelineISIS.py obsId1 obsId2 ...")
print(" python globalPipelineISIS.py range obsIdStart obsIdStop ")
exit(1)
print("load configuration")
json_text=open("../../config/config.json").read()
config=json.loads(json_text)
path=config['path']
racineArchive=path['archive']
#source Calib path
PathPipelineSrcCalib=path['eShelPipeFastWork']+'/calib'
#destination path for processed
eShelPipeProcessedRoot=path['eShelPipeFastWork']+'/processed'
#work path
eShelPipeFastWork=path['eShelPipeFastWork']+'/work'
PathObservationJson=eShelPipeFastWork+'/observation.json'
PathObservationINI=eShelPipeFastWork+"/observation.ini"
loopSleepTime=1
print("process command line argument")
if len(sys.argv)==2:
obsIds=[int(sys.argv[1])]
else:
if sys.argv[1]=='range':
obsIds=range(int(sys.argv[2]),int(sys.argv[3])+1)
else:
obsIds=[]
for arg in sys.argv[1:]:
obsIds.append(int(arg))
print("Selected ObsId=",obsIds)
print("************")
print("start loop process observations")
for obsId in obsIds:
print("***observation ID = ",obsId)
#dst path
now = datetime.now()
strDate= now.strftime("%Y-%m-%d-%H-%M-%S")
eShelPipeProcessed=eShelPipeProcessedRoot+'/'+strDate
print("create target processed directory",eShelPipeProcessed)
os.mkdir(eShelPipeProcessed)
print("clean directory eShelPipeFastWork=",eShelPipeFastWork)
for f in os.listdir(eShelPipeFastWork):
p=eShelPipeFastWork+'/'+f
if os.path.isfile(p):
#print("file", f)
os.remove(p)
if os.path.isdir(p):
#print("dir",f)
shutil.rmtree(p)
orgPath=os.getcwd()
os.chdir("..")
print("Fill observation RAW Calib files")
os.system("python fillObservationRawCalib.py "+str(obsId))
print("Get Raw spectrum")
os.chdir("../base")
os.system("python get-raw-obs.py"+" "+str(obsId)+" "+eShelPipeFastWork)
os.chdir(orgPath)
print("copy calibration files directory from",PathPipelineSrcCalib,"to",eShelPipeFastWork)
shutil.copytree(PathPipelineSrcCalib,eShelPipeFastWork+'/calib')
if not os.path.isfile(PathObservationJson):
print("Error cannot found Json "+PathObservationJson)
continue
convertJSONtoINI(PathObservationJson,PathObservationINI)
print("rename calibration files")
renameCalib(eShelPipeFastWork,"TUNGSTEN","tung","calib")
renameCalib(eShelPipeFastWork,"CALIB","thor","calib")
renameCalib(eShelPipeFastWork,"LED","led","calib")
print("Start ISIS pipeline")
os.system("actionna.bat")
archiveProcessedFiles(eShelPipeFastWork,eShelPipeProcessed)
print("END of ISIS pipeline")
print("**** Integrate processed spectrum in data base")
now = datetime.now()
strDate = now.strftime("%Y-%m-%d-%H-%M-%S")
logFile = racineArchive+"/log/in.processed."+strDate+".log"
errFile = racineArchive+"/log/in.processed."+strDate+".err"
cmdProcess="python in-processed.py "+eShelPipeProcessedRoot+" >"+logFile+" 2> "+errFile
orgPath=os.getcwd()
os.chdir('../../base')
print(cmdProcess)
os.system(cmdProcess)
os.chdir(orgPath)
print("delete iSIS work files")
shutil.rmtree(eShelPipeProcessed)
| [
198,
198,
11748,
28686,
11,
17752,
11,
17597,
11,
2435,
11,
1477,
22602,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
27,
17,
25,
198,
197,
4798,
7203,
77,
2381,
260,
288,
6,
49140,
1149... | 2.363317 | 1,423 |
import json
import pathlib
import re
from xml.etree import ElementTree
from carim.configuration import decorators
from carim.global_resources import deploydir, mission, resourcesdir
from carim.util import file_writing
@decorators.register
@decorators.mission
| [
11748,
33918,
198,
11748,
3108,
8019,
198,
11748,
302,
198,
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
198,
198,
6738,
1097,
320,
13,
11250,
3924,
1330,
11705,
2024,
198,
6738,
1097,
320,
13,
20541,
62,
37540,
1330,
6061,
15908,
11,... | 3.690141 | 71 |
from scipy.optimize import minimize, basinhopping
import numpy as np
| [
6738,
629,
541,
88,
13,
40085,
1096,
1330,
17775,
11,
1615,
20327,
2105,
198,
11748,
299,
32152,
355,
45941,
628
] | 3.5 | 20 |
from bokeh.plotting import figure, output_file, show
import numpy as np
if __name__ == '__main__':
x = np.linspace(0, 2*np.pi, 100)
y_lin = x
y_sqrt = np.sqrt(x)
y_log = np.log(x)
y_sin = np.sin(x)
output_file("plot.html")
fig = figure(title="Simple Plots")
fig.line(x=x, y=y_lin, color='red', legend="Linear")
fig.line(x=x, y=y_sqrt, color='blue', legend="Square Root")
fig.line(x=x, y=y_log, color='black', legend="Log")
fig.line(x=x, y=y_sin, color='green', legend="Sin")
fig.legend.location = "top_left"
fig.legend.click_policy="hide"
show(fig) | [
6738,
1489,
365,
71,
13,
29487,
889,
1330,
3785,
11,
5072,
62,
7753,
11,
905,
198,
11748,
299,
32152,
355,
45941,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
87,
796,
45941,
13,
21602,
10223,
7,
15,... | 2.258964 | 251 |
import os
from datetime import datetime
from flask import flash
from flask_user import current_user
from pygate_grpc.client import PowerGateClient
from pygate_grpc.ffs import get_file_bytes, bytes_to_chunks, chunks_to_bytes
from google.protobuf.json_format import MessageToDict
from deplatformr import app, db
from deplatformr.models.filecoin_models import Ffs, Logs, Files, Wallets
def create_ffs():
"""
Create a new Powergate Filecoin Filesystem (FFS)
"""
powergate = PowerGateClient(app.config["POWERGATE_ADDRESS"])
ffs = powergate.ffs.create()
creation_date = datetime.now().replace(microsecond=0)
# TODO salt token id
filecoin_file_system = Ffs(
ffs_id=ffs.id, token=ffs.token, creation_date=creation_date, user_id=current_user.id,
)
db.session.add(filecoin_file_system)
# Create new FFS wallet and add entry in log table
address = powergate.ffs.addrs_list(ffs.token)
obj = MessageToDict(address)
wallet = obj["addrs"][0]["addr"]
wallet = Wallets(created=creation_date,
address=wallet,
ffs=ffs.id,
user_id=current_user.id,)
db.session.add(wallet)
db.session.commit()
new_ffs = Ffs.query.filter_by(ffs_id=ffs.id).first()
return new_ffs
| [
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42903,
1330,
7644,
198,
6738,
42903,
62,
7220,
1330,
1459,
62,
7220,
198,
6738,
12972,
10494,
62,
2164,
14751,
13,
16366,
1330,
4333,
22628,
11792,
198,
6738,
12972,
10494,... | 2.457306 | 527 |
from PySide import QtGui, QtCore
from PySide.QtCore import QThread, QDataStream
from PySide.QtNetwork import QTcpServer, QHostAddress
import sys
from guerilla_importer import MSToGuerillaWorker
| [
6738,
9485,
24819,
1330,
33734,
8205,
72,
11,
33734,
14055,
198,
6738,
9485,
24819,
13,
48,
83,
14055,
1330,
1195,
16818,
11,
1195,
6601,
12124,
198,
6738,
9485,
24819,
13,
48,
83,
26245,
1330,
1195,
51,
13155,
10697,
11,
1195,
17932,
... | 2.786667 | 75 |
import pathlib as pl
import os
def prepend_object_name(obj: str):
"""Rename by prepending to file names, assuming files are in sheep/ and coke/
Args:
obj (str): Text of string to prepend at start of image name
"""
for path in pl.Path(obj).iterdir():
if path.is_file():
file_name = path.stem
file_ext = path.suffix
if obj not in file_name:
if file_ext == ".png":
directory = path.parent
new_name = obj + file_name + file_ext
path.rename(pl.Path(directory, new_name))
if __name__ == "__main__":
data_path = pl.Path("data")
objects_to_label = []
# Load class names
with open(data_path / "obj.names", "r") as f_obj:
objects_to_label = [line.strip() for line in f_obj.readlines()]
# Prepend class names to file names to avoid clashes
for obj in objects_to_label:
if pl.Path(obj).is_dir():
prepend_object_name(obj)
# Append everything to train.txt
with open(data_path / "train.txt", "w") as f_train:
for obj in objects_to_label:
item_num = []
if pl.Path(data_path/"obj").is_dir():
for path in pl.Path(data_path/"obj").iterdir():
if obj in path.stem and path.suffix == ".png":
num = str(path.stem).strip(obj)
item_num.append(int(num))
item_num.sort()
for i in item_num:
line = "build/darknet/x64/data/obj/" + obj + str(i) + ".png\n"
f_train.write(line)
# for path in pl.Path(data_path / "obj").iterdir():
# if path.suffix == ".txt":
# if "sheep" not in str(path):
# if "coke" not in str(path):
# file_name = path.stem
# png_sheep = pl.Path(data_path/"obj"/("sheep" + file_name + ".png"))
# png_coke = pl.Path(data_path/"obj"/("sheep" + file_name + ".png"))
# if png_coke.is_file():
# new_name = "coke" + file_name + path.suffix
# path.rename(pl.Path(path.parent, new_name))
# print(new_name)
# elif png_sheep.is_file():
# new_name = "sheep" + file_name + path.suffix
# path.rename(pl.Path(path.parent, new_name))
# print(new_name)
| [
11748,
3108,
8019,
355,
458,
198,
11748,
28686,
628,
198,
4299,
3143,
437,
62,
15252,
62,
3672,
7,
26801,
25,
965,
2599,
198,
220,
220,
220,
37227,
49,
12453,
416,
3143,
1571,
284,
2393,
3891,
11,
13148,
3696,
389,
287,
15900,
14,
2... | 1.886057 | 1,334 |
# -----------------------------------
# import
# -----------------------------------
import os
import sys
import argparse
from PySide2.QtWidgets import QApplication, QMainWindow, QAction, QWidget, QStyle
from PySide2.QtWidgets import QStackedWidget, QSplitter, QTextEdit, QTreeWidget, QTreeWidgetItem
from PySide2.QtWidgets import QHBoxLayout, qApp, QFileDialog
from PySide2.QtGui import QTextCursor, QFont, QColor, QIcon
from PySide2.QtCore import QSize, Qt, Slot
from common.exif_reader import ExifReader
print("start exif_viewer")
# -----------------------------------
# define
# -----------------------------------
# -----------------------------------
# function
# -----------------------------------
# -----------------------------------
# class
# -----------------------------------
# -----------------------------------
# main
# -----------------------------------
if __name__ == "__main__":
main(get_args())
| [
2,
20368,
6329,
198,
2,
1330,
198,
2,
20368,
6329,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
198,
6738,
9485,
24819,
17,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
13383,
27703,
11,
1195,
12502,... | 3.883817 | 241 |
HEARTBEAT_INTERVAL = 20
DISCONNECTED_THRESHOLD = HEARTBEAT_INTERVAL * 2
HSTUDIO_HOME = "~/.hstudio" | [
13909,
7227,
12473,
1404,
62,
41358,
23428,
796,
1160,
198,
26288,
10943,
48842,
1961,
62,
4221,
19535,
39,
15173,
796,
11179,
7227,
12473,
1404,
62,
41358,
23428,
1635,
362,
198,
198,
39,
2257,
8322,
9399,
62,
39069,
796,
366,
93,
1175... | 2.222222 | 45 |
import sys
sys.setrecursionlimit(10**7)
N = int(sys.stdin.readline().rstrip())
MOD = 10**4 + 7
ans = tribonacci(N)[2]
print(ans)
| [
11748,
25064,
201,
198,
201,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
940,
1174,
22,
8,
201,
198,
201,
198,
45,
796,
493,
7,
17597,
13,
19282,
259,
13,
961,
1370,
22446,
81,
36311,
28955,
201,
198,
201,
198,
33365,
796,
838,
1... | 2.013699 | 73 |
# -*- coding: utf-8 -*-
"""
@file
@brief Various functions about conversions.
"""
import datetime
def str2datetime(dt, format=None):
"""
convert a string into a datetime object, it can be:
- 2013-05-24 18:49:46
- 2013-05-24 18:49:46.568
@param dt string
@param format format for the conversion, the most complete one is
``%Y-%m-%d %H:%M:%S.%f``
which you get by default
@rtype datetime
@return datetime
"""
if "+" in dt:
dt = dt.split("+")[0].strip()
elif " -" in dt:
dt = dt.split(" -")[0].strip()
if format is None:
if " " in dt:
if "." in dt:
return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S.%f")
else:
return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
elif "T" in dt:
if "." in dt:
return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f")
else:
return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
else:
return datetime.datetime.strptime(dt, "%Y-%m-%d")
else:
return datetime.datetime.strptime(dt, format)
def datetime2str(dt):
"""
Converts a datetime into a string.
@param dt datetime
@return string
"""
return dt.strftime("%Y-%m-%d %H:%M:%S")
def timestamp_to_datetime(timestamp):
"""
convert a timestamp into a datetime
@param timestamp timestamp
@rtype datetime
"""
return datetime.datetime.utcfromtimestamp(timestamp)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
7753,
198,
31,
65,
3796,
26386,
5499,
546,
32626,
13,
198,
37811,
198,
11748,
4818,
8079,
628,
198,
4299,
965,
17,
19608,
8079,
7,
28664,
11,
5794,
28,... | 1.843956 | 910 |
n=int(input("enter a number: "))
for i in range(n,0,-1):
for j in range(n,0,-1):
if(j>i):
print(j,' ',sep='',end="")
else:
print(i,' ',sep='',end="")
print()
'''
output:
enter a number: 6
6 6 6 6 6 6
6 5 5 5 5 5
6 5 4 4 4 4
6 5 4 3 3 3
6 5 4 3 2 2
6 5 4 3 2 1
'''
| [
77,
28,
600,
7,
15414,
7203,
9255,
257,
1271,
25,
366,
4008,
198,
1640,
1312,
287,
2837,
7,
77,
11,
15,
12095,
16,
2599,
198,
220,
329,
474,
287,
2837,
7,
77,
11,
15,
12095,
16,
2599,
198,
220,
220,
611,
7,
73,
29,
72,
2599,
... | 1.913333 | 150 |
import lvgl as lv
import styles
if __name__ == '__main__':
lv.init()
scr = lv.obj()
lv.scr_load(scr)
lv_arc(scr)
| [
11748,
300,
85,
4743,
355,
300,
85,
198,
11748,
12186,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
300,
85,
13,
15003,
3419,
198,
220,
220,
220,
6040,
796,
300,
85,
13,
26801,
3419,
198,
... | 1.857143 | 70 |
#!/usr/bin/python
import argparse
import sys
import glob
import os
import re
from subprocess import call
import itertools
main() | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
850,
14681,
1330,
869,
198,
11748,
340,
861,
10141,
198,
12417,
3419
] | 3.459459 | 37 |
#
# Copyright (c) 2017 by QA Cafe.
# All Rights Reserved.
#
"""Module for processing CDRouter datetime strings."""
from datetime import datetime
from marshmallow import fields
from marshmallow.exceptions import ValidationError
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
416,
1195,
32,
26965,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
198,
37811,
26796,
329,
7587,
6458,
49,
39605,
4818,
8079,
13042,
526,
15931,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
... | 3.709677 | 62 |
from BeautifulSoup import BeautifulSoup
import urllib2, urllib, threading, sys
url = "http://apk.hiapk.com/apps"
down_url = "http://apk.hiapk.com/appdown/"
info_url = "http://apk.hiapk.com/appinfo/"
num = 11
page = 5
threads = []
for mon in xrange(1, num):
print '[+] Thread %d is start' %mon
t = threading.Thread(target=makelist, args=(mon,))
threads.append(t)
t.start()
for t in threads:
t.join()
| [
6738,
23762,
50,
10486,
1330,
23762,
50,
10486,
198,
11748,
2956,
297,
571,
17,
11,
2956,
297,
571,
11,
4704,
278,
11,
25064,
198,
198,
6371,
796,
366,
4023,
1378,
499,
74,
13,
5303,
499,
74,
13,
785,
14,
18211,
1,
198,
2902,
62,
... | 2.39548 | 177 |
"""
Tests for Master thesis
"""
import cfg
from TestSuite import runTests, SummaryType, TestUtils as TU
import re
import operator as op
if __name__ == "__main__":
print("Execute test file " + __file__)
main()
| [
37811,
198,
51,
3558,
329,
5599,
21554,
198,
37811,
198,
198,
11748,
30218,
70,
198,
6738,
6208,
5606,
578,
1330,
1057,
51,
3558,
11,
21293,
6030,
11,
6208,
18274,
4487,
355,
309,
52,
198,
11748,
302,
198,
11748,
10088,
355,
1034,
198... | 2.894737 | 76 |
import os
from xml.etree import ElementTree as ET
import pytest
from nexsan_exporter import nexsan
@pytest.fixture(params=['opstats1.xml', 'opstats2.xml'])
def opstats_xml(request):
'''
Returns a file-like object for the Collector to consume.
'''
test_dir, _ = os.path.splitext(request.module.__file__)
filename = request.param
with open(os.path.join(test_dir, filename)) as f:
return ET.parse(f)
def test_opstats(opstats_xml):
'''
Tests that real-world data parses.
'''
metrics = list(nexsan.Collector(opstats_xml).collect())
assert 0 < len(metrics)
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
28686,
198,
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
355,
12152,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
497,
87,
12807,
62,
1069,
26634,
1330,
497,
87,
12807,
198,
198,
31,
9078,
9288,
13,
69,
9602,
7,
37266,
28,
... | 2.454839 | 310 |
# Echo server program
import socket
import cPickle as pickle
import sys
import os
HOST = '' # Symbolic name meaning all available interfaces
PORT = 6000 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
SESSION_BEGIN = 0
SESSION_IN_PROGRESS = 1
SESSION_COMPLETE = 2
#messages for detecting acks or nacks
ACK_CHUNK = 1
NACK_CHUNK = 2
ACK_FILE = 3
NACK_FILE = 4
TIMES = 0
FILE_SIZE = None
CHUNK_SIZE = None
FILENAME = "my_file_server.txt"
file_data = ""
#deleting the file if it does exist
try:
os.remove(FILENAME)
except OSError:
pass
#the file variable
f = None
conn, addr = s.accept()
print 'Connected by', addr
while 1:
data = conn.recv(1024)
try:
u_data = pickle.loads(data)
if(u_data['message_type'] == SESSION_BEGIN):
if(u_data['file_size']<u_data['chunk_size']):
u_data["chunk_ack"] = NACK_CHUNK
u_data["file_ack"] = NACK_FILE
else:
FILE_SIZE = u_data['file_size']
CHUNK_SIZE = u_data['chunk_size']
u_data["chunk_ack"] = ACK_CHUNK
u_data["file_ack"] = NACK_FILE
TIMES = u_data["times"]
f= open(FILENAME,"w+")
f.write(u_data["data"])
TIMES = TIMES + 1
print 'Session begins, wrote data of size{0} and acked chunk'.format(CHUNK_SIZE)
elif(u_data['message_type'] == SESSION_IN_PROGRESS):
if u_data['file_size']<u_data['chunk_size'] or u_data['file_size']!=FILE_SIZE or u_data['chunk_size']!=CHUNK_SIZE or u_data['times']!=TIMES:
u_data["chunk_ack"] = NACK_CHUNK
u_data["file_ack"] = NACK_FILE
else:
u_data["chunk_ack"] = ACK_CHUNK
u_data["file_ack"] = NACK_FILE
TIMES = TIMES + 1
f.write(u_data["data"]);
print 'Received and wrote data of size{0} and acked chunk'.format(CHUNK_SIZE)
elif(u_data['message_type'] == SESSION_COMPLETE):
if u_data['file_size']<u_data['chunk_size'] or u_data['file_size']!=FILE_SIZE or u_data['chunk_size']!=CHUNK_SIZE or u_data['times']!=TIMES:
u_data["chunk_ack"] = NACK_CHUNK
u_data["file_ack"] = NACK_FILE
else:
f.close()
statinfo = os.stat(FILENAME)
if(statinfo.st_size != FILE_SIZE):
u_data["file_ack"] = NACK_FILE
else:
u_data["file_ack"] = ACK_FILE
print 'Session complete, wrote file of size {0} bytes with the name {1}'.format(FILE_SIZE, FILENAME)
else:
u_data["chunk_ack"] = NACK_CHUNK
u_data["file_ack"] = NACK_FILE
#deleting these 3 values from dict in all messages
del u_data['times']
del u_data['chunk_size']
del u_data['data']
except EOFError:
a = 1
#print "Ignore this error, it does come in unpickling, but does not affect the logic of the program"
#print 'Received'
if not data: break
conn.sendall(pickle.dumps(u_data))
conn.close() | [
2,
21455,
4382,
1430,
198,
11748,
17802,
198,
11748,
220,
269,
31686,
293,
355,
2298,
293,
198,
11748,
25064,
198,
198,
11748,
28686,
198,
198,
39,
10892,
796,
10148,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.933214 | 1,677 |
import os
from nipype.interfaces.base import (CommandLine, CommandLineInputSpec,
TraitedSpec)
from nipype.interfaces.base import traits, File
# T1xT2BiasFieldCorrection
class T1xT2BiasFieldCorrection(CommandLine):
"""
Description: Bias field correction using T1w & T2w images.
Provides an attempt of brain extration if wanted.
Inputs:
Mandatory:
t1_file
Whole-head T1w image
t2_file
Whole-head T2w image (use -aT2 if T2w image is not in the T1w
space)
Optional:
os
Suffix for the bias field corrected images (default is
"_debiased")
aT2
Will coregrister T2w to T1w using flirt. Output will have the\
suffix provided. Will only work for spatially close images.
opt_as
Suffix for T2w to T1w registration ("-in-T1w" if not specified)
s
size of gauss kernel in mm when performing mean filtering \
(default=4)argstr="-s %d", mandatory=False)
b
Brain mask file. Will also output bias corrected brain files \
with the format "output_prefix_brain.nii.gz"
bet
Will try to "smart" BET the anat files to get a brain mask:
n = the number of iterations BET will be run to find center of
gravity (default=0, will not BET if option -b has been
specified).
bs
Suffix for the BET masked images (default is "_BET")
f
-f options of BET: fractional intensity threshold (0->1); \
default=0.5; smaller values give larger brain outline
estimates
g
-g options of BET:\
vertical gradient in fractional intensity threshold
(-1->1); default=0; positive values give larger brain
outline at bottom, smaller at top
k
Will keep temporary files
p
Prefix for running FSL functions\
(can be a path or just a prefix)
Outputs:
t1_debiased_file
debiased T1
t2_debiased_file
debiased T2
t2_coreg_file
T2 on T1
t1_debiased_brain_file
debiased bet T1
t2_debiased_brain_file
debiased bet T2
debiased_mask_file
debiased bet mask
"""
input_spec = T1xT2BiasFieldCorrectionInputSpec
output_spec = T1xT2BiasFieldCorrectionOutputSpec
package_directory = os.path.dirname(os.path.abspath(__file__))
_cmd = 'bash {}/../bash/T1xT2BiasFieldCorrection.sh'.format(
package_directory)
| [
11748,
28686,
198,
6738,
299,
541,
2981,
13,
3849,
32186,
13,
8692,
1330,
357,
21575,
13949,
11,
9455,
13949,
20560,
22882,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.98632 | 1,462 |
"""
WSGI config for callblocker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.conf import settings
from django.core.wsgi import get_wsgi_application
from callblocker import blocker
from callblocker.blocker import BootstrapMode
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'conf.settings.production')
blocker.bootstrap_mode(
BootstrapMode.FAKE_SERVER
if settings.MODEM_USE_FAKE
else BootstrapMode.SERVER
)
application = get_wsgi_application()
| [
37811,
198,
19416,
18878,
4566,
329,
869,
9967,
263,
1628,
13,
198,
198,
1026,
32142,
262,
25290,
18878,
869,
540,
355,
257,
8265,
12,
5715,
7885,
3706,
7559,
31438,
15506,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
11,
766,
198,
... | 3 | 211 |
import os
from unittest import TestCase
from unittest.async_case import IsolatedAsyncioTestCase
from aiohttp import ClientSession
from polygonscan.core.async_client import AsyncClient
from polygonscan.core.sync_client import SyncClient
from requests import Session
CONFIG_PATH = "polygon/configs/stable.json"
API_KEY = os.environ["API_KEY"]
| [
11748,
28686,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
292,
13361,
62,
7442,
1330,
1148,
50027,
42367,
952,
14402,
20448,
198,
198,
6738,
257,
952,
4023,
1330,
20985,
36044,
198,
6738,
25052,
684,
5171,... | 3.285714 | 105 |
import time
import random
from selenium import webdriver
""" NOT USED
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
"""
#-----------------------------------------------------------------------------------# | [
11748,
640,
198,
11748,
4738,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
37811,
5626,
1294,
1961,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
13083,
1330,
26363,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
... | 4.238636 | 88 |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock responses to GCP API calls, for testing.
When updating this file, also update the model test database by running
tests/services/model/importer/update_test_dbs.py.
"""
import json
ORGANIZATION_ID = "organizations/111222333"
GSUITE_CUSTOMER_ID = "ABC123DEF"
# Every resource needs a unique ID prefix so that every generated resource
# has a unique ID.
USER_ID_PREFIX = "100"
GROUP_ID_PREFIX = "101"
GROUP_MEMBER_ID_PREFIX = "102"
FOLDER_ID_PREFIX = "103"
PROJECT_ID_PREFIX = "104"
GCE_PROJECT_ID_PREFIX = "105"
GCE_INSTANCE_ID_PREFIX = "106"
FIREWALL_ID_PREFIX = "107"
INSTANCE_GROUP_ID_PREFIX = "108"
BACKEND_SERVICE_ID_PREFIX = "109"
SERVICEACCOUNT_ID_PREFIX = "110"
FORWARDING_RULE_ID_PREFIX = "111"
INSTANCE_GROUP_MANAGER_ID_PREFIX = "112"
INSTANCE_TEMPLATE_ID_PREFIX = "113"
NETWORK_ID_PREFIX = "114"
SUBNETWORK_ID_PREFIX = "115"
SERVICEACCOUNT_KEY_ID_PREFIX = "116"
GCE_IMAGE_ID_PREFIX = "117"
GCE_DISK_ID_PREFIX = "118"
# Fields: id, email, name
AD_USER_TEMPLATE = """
{{
"kind": "admin#directory#user",
"id": "100{id}",
"primaryEmail": "{email}",
"name": {{
"fullName": "{name}"
}},
"emails": [
{{
"address": "{email}",
"primary": true
}}
]
}}
"""
AD_GET_USERS = {
GSUITE_CUSTOMER_ID: [
json.loads(
AD_USER_TEMPLATE.format(
id=1, email="a_user@forseti.test", name="A User")),
json.loads(
AD_USER_TEMPLATE.format(
id=2, email="b_user@forseti.test", name="B User")),
json.loads(
AD_USER_TEMPLATE.format(
id=3, email="c_user@forseti.test", name="C User")),
json.loads(
# Note: SQLite's varchar is case sensitive and MySQL is not
# so this test case is not useful while running SQLite.
# This is here for future reference.
AD_USER_TEMPLATE.format(
id=4, email="a_USER@forseti.test", name="A User")),
]
}
# Fields: email, name, members, id
AD_GROUP_TEMPLATE = """
{{
"nonEditableAliases": ["{email}"],
"kind": "admin#directory#group",
"name": "{name}",
"adminCreated": true,
"directMembersCount": "{members}",
"email": "{email}",
"id": "101{id}",
"description": ""
}}
"""
AD_GET_GROUPS = {
GSUITE_CUSTOMER_ID: [
json.loads(
AD_GROUP_TEMPLATE.format(
id=1, email="a_grp@forseti.test", name="A Group", members=1)),
json.loads(
AD_GROUP_TEMPLATE.format(
id=2, email="b_grp@forseti.test", name="B Group", members=1)),
json.loads(
AD_GROUP_TEMPLATE.format(
id=3, email="c_grp@forseti.test", name="C Group", members=2)),
# Duplicate groups
json.loads(
AD_GROUP_TEMPLATE.format(
id=1, email="a_grp@forseti.test", name="A Group", members=1)),
json.loads(
AD_GROUP_TEMPLATE.format(
id=4, email="a_GRP@forseti.test", name="A Group", members=1)),
]
}
# Fields: id, email, type
AD_GROUP_MEMBER_TEMPLATE = """
{{
"kind": "admin#directory#member",
"id": "102{id}",
"email": "{email}",
"role": "MEMBER",
"type": "{type}",
"status": "ACTIVE"
}}
"""
AD_GET_GROUP_MEMBERS = {
GROUP_ID_PREFIX + "1": [
json.loads(
AD_GROUP_MEMBER_TEMPLATE.format(
id=1, email="a_user@forseti.test", type="USER")),
# Duplicate group member
json.loads(
AD_GROUP_MEMBER_TEMPLATE.format(
id=1, email="a_user@forseti.test", type="USER"))
],
GROUP_ID_PREFIX + "2": [
json.loads(
AD_GROUP_MEMBER_TEMPLATE.format(
id=2, email="b_user@forseti.test", type="USER"))
],
GROUP_ID_PREFIX + "3": [
json.loads(
AD_GROUP_MEMBER_TEMPLATE.format(
id=3, email="c_user@forseti.test", type="USER")),
json.loads(
AD_GROUP_MEMBER_TEMPLATE.format(
id=5, email="b_grp@forseti.test", type="GROUP")),
],
}
# Fields: project
APPENGINE_APP_TEMPLATE = """
{{
"name": "apps/{project}",
"id": "{project}",
"authDomain": "forseti.test",
"locationId": "us-central",
"codeBucket": "staging.{project}.a.b.c",
"servingStatus": "SERVING",
"defaultHostname": "{project}.a.b.c",
"defaultBucket": "{project}.a.b.c",
"gcrDomain": "us.gcr.io"
}}
"""
GAE_GET_APP = {
"project3": json.loads(
APPENGINE_APP_TEMPLATE.format(
project="project3")),
"project4": json.loads(
APPENGINE_APP_TEMPLATE.format(
project="project4")),
}
# Fields: project, service, version
APPENGINE_SERVICE_TEMPLATE = """
{{
"name": "apps/{project}/services/{service}",
"id": "{service}",
"split": {{
"allocations": {{
"{version}": 1
}}
}}
}}
"""
GAE_GET_SERVICES = {
"project4": [
json.loads(
APPENGINE_SERVICE_TEMPLATE.format(
project="project4", service="default", version="1")),
],
}
# Fields: project, service, version
APPENGINE_VERSION_TEMPLATE = """
{{
"name": "apps/{project}/services/{service}/versions/{version}",
"id": "{version}",
"instanceClass": "F1",
"runtime": "python27",
"threadsafe": true,
"env": "standard",
"servingStatus": "SERVING",
"createdBy": "a_user@forseti.test",
"createTime": "2017-09-11T22:48:32Z",
"diskUsageBytes": "2036",
"versionUrl": "https://{version}-dot-{project}.a.b.c"
}}
"""
GAE_GET_VERSIONS = {
"project4": {"default": [
json.loads(
APPENGINE_VERSION_TEMPLATE.format(
project="project4", service="default", version="1")),
]},
}
# Fields: project, service, version, instance
APPENGINE_INSTANCE_TEMPLATE = """
{{
"name": "apps/{project}/services/{service}/versions/{version}/instances/{instance}",
"id": "{instance}",
"appEngineRelease": "1.9.54",
"availability": "DYNAMIC",
"startTime": "2017-09-11T22:49:03.485539Z",
"requests": 3,
"memoryUsage": "22802432"
}}
"""
GAE_GET_INSTANCES = {
"project4": {"default": {"1": [
json.loads(
APPENGINE_INSTANCE_TEMPLATE.format(
project="project4", service="default", version="1",
instance="1")),
json.loads(
APPENGINE_INSTANCE_TEMPLATE.format(
project="project4", service="default", version="1",
instance="2")),
json.loads(
APPENGINE_INSTANCE_TEMPLATE.format(
project="project4", service="default", version="1",
instance="3")),
]}},
}
BQ_GET_DATASETS_FOR_PROJECTID = {
PROJECT_ID_PREFIX + "3": [{
"datasetReference": {
"datasetId": "dataset1",
"projectId": "project3"
},
"id": "project3:dataset1",
"kind": "bigquery#dataset"
}]
}
BQ_GET_DATASET_ACCESS = {
"project3": {
"dataset1": [{
"role": "WRITER",
"specialGroup": "projectWriters"
}, {
"role": "OWNER",
"specialGroup": "projectOwners"
}, {
"role": "OWNER",
"userByEmail": "a_user@forseti.test"
}, {
"role": "READER",
"specialGroup": "projectReaders"
}]
}
}
CRM_GET_ORGANIZATION = {
ORGANIZATION_ID: {
"displayName": "forseti.test",
"owner": {
"directoryCustomerId": GSUITE_CUSTOMER_ID
},
"creationTime": "2015-09-09T19:34:18.591Z",
"lifecycleState": "ACTIVE",
"name": ORGANIZATION_ID
}
}
# Fields: id, parent, name
CRM_FOLDER_TEMPLATE = """
{{
"name": "folders/103{id}",
"parent": "{parent}",
"displayName": "{name}",
"lifecycleState": "ACTIVE",
"createTime": "2017-02-09T22:02:07.769Z"
}}
"""
CRM_GET_FOLDER = {
"folders/" + FOLDER_ID_PREFIX + "1":
json.loads(
CRM_FOLDER_TEMPLATE.format(
id=1, parent=ORGANIZATION_ID, name="Folder 1")),
"folders/" + FOLDER_ID_PREFIX + "2":
json.loads(
CRM_FOLDER_TEMPLATE.format(
id=2, parent=ORGANIZATION_ID, name="Folder 2")),
"folders/" + FOLDER_ID_PREFIX + "3":
json.loads(
CRM_FOLDER_TEMPLATE.format(
id=3, parent="folders/2", name="Folder 3")),
}
CRM_GET_FOLDERS = {
ORGANIZATION_ID: [
CRM_GET_FOLDER["folders/" + FOLDER_ID_PREFIX + "1"],
CRM_GET_FOLDER["folders/" + FOLDER_ID_PREFIX + "2"]
],
"folders/" + FOLDER_ID_PREFIX + "1": [],
"folders/" + FOLDER_ID_PREFIX + "2": [
CRM_GET_FOLDER["folders/" + FOLDER_ID_PREFIX + "3"]
],
"folders/" + FOLDER_ID_PREFIX + "3": [],
}
# Fields: num, id, name, parent_type, parent_id
CRM_PROJECT_TEMPLATE = """
{{
"projectNumber": "104{num}",
"projectId": "{id}",
"lifecycleState": "ACTIVE",
"name": "{name}",
"createTime": "2017-07-12T17:50:40.895Z",
"parent": {{
"type": "{parent_type}",
"id": "{parent_id}"
}}
}}
"""
CRM_GET_PROJECT = {
PROJECT_ID_PREFIX + "1":
json.loads(
CRM_PROJECT_TEMPLATE.format(
num=1,
id="project1",
name="Project 1",
parent_type="organization",
parent_id="111222333")),
PROJECT_ID_PREFIX + "2":
json.loads(
CRM_PROJECT_TEMPLATE.format(
num=2,
id="project2",
name="Project 2",
parent_type="organization",
parent_id="111222333")),
PROJECT_ID_PREFIX + "3":
json.loads(
CRM_PROJECT_TEMPLATE.format(
num=3,
id="project3",
name="Project 3",
parent_type="folder",
parent_id="1")),
PROJECT_ID_PREFIX + "4":
json.loads(
CRM_PROJECT_TEMPLATE.format(
num=4,
id="project4",
name="Project 4",
parent_type="folder",
parent_id="3")),
}
CRM_GET_PROJECTS = {
"organization": {
"111222333": [{
"projects": [
CRM_GET_PROJECT[PROJECT_ID_PREFIX + "1"],
CRM_GET_PROJECT[PROJECT_ID_PREFIX + "2"]
]
}]
},
"folder": {
FOLDER_ID_PREFIX + "1": [{
"projects": [CRM_GET_PROJECT[PROJECT_ID_PREFIX + "3"]]
}],
FOLDER_ID_PREFIX + "2": [],
FOLDER_ID_PREFIX + "3": [{
# Make sure duplicate api response doesn't block data model from building.
"projects": [CRM_GET_PROJECT[PROJECT_ID_PREFIX + "4"],
CRM_GET_PROJECT[PROJECT_ID_PREFIX + "4"]]
}]
}
}
# Fields: id
CRM_PROJECT_IAM_POLICY_TEMPLATE = """
{{
"version": 1,
"bindings": [
{{
"role": "roles/editor",
"members": [
"serviceAccount:{id}@cloudservices.gserviceaccount.com",
"serviceAccount:{id}-compute@developer.gserviceaccount.com"
]
}},
{{
"role": "roles/owner",
"members": [
"group:c_grp@forseti.test",
"user:a_user@forseti.test"
]
}}
],
"auditConfigs": [
{{
"auditLogConfigs": [
{{
"logType": "ADMIN_READ"
}},
{{
"logType": "DATA_WRITE"
}},
{{
"logType": "DATA_READ"
}}
],
"service": "allServices"
}},
{{
"auditLogConfigs": [
{{
"exemptedMembers": [
"user:gcp-reader-12345@p1234.iam.gserviceaccount.com"
],
"logType": "ADMIN_READ"
}}
],
"service": "cloudsql.googleapis.com"
}}
]
}}
"""
CRM_PROJECT_IAM_POLICY_MEMBER_MULTI_ROLES = """
{{
"version": 1,
"bindings": [
{{
"role": "roles/editor",
"members": [
"serviceAccount:{id}@cloudservices.gserviceaccount.com",
"serviceAccount:{id}-compute@developer.gserviceaccount.com"
]
}},
{{
"role": "roles/owner",
"members": [
"group:c_grp@forseti.test",
"user:a_user@forseti.test"
]
}},
{{
"role": "roles/appengine.codeViewer",
"members": [
"user:abc_user@forseti.test"
]
}},
{{
"role": "roles/appengine.appViewer",
"members": [
"user:abc_user@forseti.test"
]
}}
]
}}
"""
CRM_PROJECT_IAM_POLICY_DUP_MEMBER = """
{{
"version": 1,
"bindings": [
{{
"role": "roles/editor",
"members": [
"serviceAccount:{id}@cloudservices.gserviceaccount.com",
"serviceAccount:{id}-compute@developer.gserviceaccount.com",
"serviceAccount:{id}@cloudservices.gserviceaccount.com"
]
}},
{{
"role": "roles/owner",
"members": [
"group:c_grp@forseti.test",
"user:a_user@forseti.test"
]
}}
]
}}
"""
CRM_FOLDER_IAM_POLICY = """
{
"version": 1,
"bindings": [
{
"role": "roles/resourcemanager.folderAdmin",
"members": [
"user:a_user@forseti.test"
]
}
]
}
"""
CRM_ORG_IAM_POLICY = """
{
"version": 1,
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:a_user@forseti.test"
]
}
],
"auditConfigs": [
{
"auditLogConfigs": [
{
"logType": "ADMIN_READ"
}
],
"service": "allServices"
}
]
}
"""
CRM_GET_IAM_POLICIES = {
ORGANIZATION_ID: json.loads(CRM_ORG_IAM_POLICY),
"folders/" + FOLDER_ID_PREFIX + "1": json.loads(CRM_FOLDER_IAM_POLICY),
"folders/" + FOLDER_ID_PREFIX + "2": json.loads(CRM_FOLDER_IAM_POLICY),
"folders/" + FOLDER_ID_PREFIX + "3": json.loads(CRM_FOLDER_IAM_POLICY),
"project1": json.loads(CRM_PROJECT_IAM_POLICY_TEMPLATE.format(id=1)),
"project2": json.loads(CRM_PROJECT_IAM_POLICY_TEMPLATE.format(id=2)),
"project3": json.loads(CRM_PROJECT_IAM_POLICY_MEMBER_MULTI_ROLES.format(id=3)),
"project4": json.loads(CRM_PROJECT_IAM_POLICY_DUP_MEMBER.format(id=4)),
}
GCP_PERMISSION_DENIED_TEMPLATE = """
{{
"error": {{
"errors": [
{{
"domain": "global",
"reason": "forbidden",
"message": "The caller does not have permission on {id}.",
}}
],
"code": 403,
"message": "The caller does not have permission on {id}."
}}
}}
"""
# Fields: name, project, ip
SQL_INSTANCE_TEMPLATE = """
{{
"kind": "sql#instance",
"name": "{name}",
"connectionName": "{project}:us-west1:{name}",
"project": "{project}",
"state": "RUNNABLE",
"backendType": "SECOND_GEN",
"databaseVersion": "MYSQL_5_7",
"region": "us-west1",
"settings": {{
"kind": "sql#settings",
"settingsVersion": "13",
"authorizedGaeApplications": [
],
"tier": "db-n1-standard-1",
"backupConfiguration": {{
"kind": "sql#backupConfiguration",
"startTime": "09:00",
"enabled": true,
"binaryLogEnabled": true
}},
"pricingPlan": "PER_USE",
"replicationType": "SYNCHRONOUS",
"activationPolicy": "ALWAYS",
"ipConfiguration": {{
"ipv4Enabled": true,
"authorizedNetworks": [
]
}},
"locationPreference": {{
"kind": "sql#locationPreference",
"zone": "us-west1-a"
}},
"dataDiskSizeGb": "10",
"dataDiskType": "PD_SSD",
"maintenanceWindow": {{
"kind": "sql#maintenanceWindow",
"hour": 0,
"day": 0
}},
"storageAutoResize": true,
"storageAutoResizeLimit": "0"
}},
"serverCaCert": {{
"kind": "sql#sslCert",
"instance": "{name}",
"sha1Fingerprint": "1234567890",
"commonName": "C=US,O=Test",
"certSerialNumber": "0",
"cert": "-----BEGIN CERTIFICATE----------END CERTIFICATE-----",
"createTime": "2017-11-22T17:59:22.085Z",
"expirationTime": "2019-11-22T18:00:22.085Z"
}},
"ipAddresses": [
{{
"ipAddress": "{ip}",
"type": "PRIMARY"
}}
],
"instanceType": "CLOUD_SQL_INSTANCE",
"gceZone": "us-west1-a"
}}
"""
SQL_GET_INSTANCES = {
"project2": [
json.loads(
SQL_INSTANCE_TEMPLATE.format(
name="forseti", project="project2", ip="192.168.2.2")),
]
}
GCE_API_NOT_ENABLED_TEMPLATE = """
{{
"error": {{
"errors": [
{{
"domain": "usageLimits",
"reason": "accessNotConfigured",
"message": "Access Not Configured. Compute Engine API has not been used in project {id} before or it is disabled. Enable it by visiting https://console.developers.google.com/apis/api/compute.googleapis.com/overview?project={id} then retry. If you enabled this API recently, wait a few minutes for the action to propagate to our systems and retry.",
"extendedHelp": "https://console.developers.google.com/apis/api/compute.googleapis.com/overview?project={id}"
}}
],
"code": 403,
"message": "Access Not Configured. Compute Engine API has not been used in project {id} before or it is disabled. Enable it by visiting https://console.developers.google.com/apis/api/compute.googleapis.com/overview?project={id} then retry. If you enabled this API recently, wait a few minutes for the action to propagate to our systems and retry."
}}
}}
"""
# Fields: num, id, projnum
GCE_PROJECT_TEMPLATE = """
{{
"kind": "compute#project",
"id": "105{num}",
"creationTimestamp": "2016-02-25T14:01:23.140-08:00",
"name": "{id}",
"commonInstanceMetadata": {{
"kind": "compute#metadata",
"fingerprint": "ABC",
"items": [
{{
"key": "some-key",
"value": "some-value"
}}
]
}},
"quotas": [
{{
"metric": "SNAPSHOTS",
"limit": 1000.0,
"usage": 0.0
}},
{{
"metric": "NETWORKS",
"limit": 5.0,
"usage": 1.0
}}
],
"selfLink": "https://www.googleapis.com/compute/v1/projects/{id}",
"defaultServiceAccount": "{projnum}-compute@developer.gserviceaccount.com",
"xpnProjectStatus": "UNSPECIFIED_XPN_PROJECT_STATUS"
}}
"""
GCE_GET_PROJECT = {
"project1":
json.loads(
GCE_PROJECT_TEMPLATE.format(
num=1, id="project1", projnum=PROJECT_ID_PREFIX + "1")),
"project2":
json.loads(
GCE_PROJECT_TEMPLATE.format(
num=2, id="project2", projnum=PROJECT_ID_PREFIX + "1")),
"project3":
json.loads(
GCE_PROJECT_TEMPLATE.format(
num=3, id="project3", projnum=PROJECT_ID_PREFIX + "3")),
}
# Fields: id, name, project, num, ip, external_ip, network, template,
# groupmanager
GCE_INSTANCE_TEMPLATE_IAP = """
{{
"kind": "compute#instance",
"id": "106{id}",
"creationTimestamp": "2017-05-26T22:08:11.094-07:00",
"name": "{name}",
"tags": {{
"items": [
"iap-tag"
],
"fingerprint": "gilEhx3hEXk="
}},
"machineType": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-c/machineTypes/f1-micro",
"status": "RUNNING",
"zone": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-c",
"canIpForward": false,
"networkInterfaces": [
{{
"kind": "compute#networkInterface",
"network": "https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}",
"subnetwork": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/subnetworks/{network}",
"networkIP": "{ip}",
"name": "nic0",
"accessConfigs": [
{{
"kind": "compute#accessConfig",
"type": "ONE_TO_ONE_NAT",
"name": "External NAT",
"natIP": "{external_ip}"
}}
]
}}
],
"disks": [
{{
"kind": "compute#attachedDisk",
"type": "PERSISTENT",
"mode": "READ_WRITE",
"source": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-c/disks/{name}",
"deviceName": "{template}",
"index": 0,
"boot": true,
"autoDelete": true,
"licenses": [
"https://www.googleapis.com/compute/v1/projects/debian-cloud/global/licenses/debian-8-jessie"
],
"interface": "SCSI"
}}
],
"metadata": {{
"kind": "compute#metadata",
"fingerprint": "3MpZMMvDTyo=",
"items": [
{{
"key": "instance-template",
"value": "projects/{num}/global/instanceTemplates/{template}"
}},
{{
"key": "created-by",
"value": "projects/{num}/zones/us-central1-c/instanceGroupManagers/{groupmanager}"
}}
]
}},
"serviceAccounts": [
{{
"email": "{num}-compute@developer.gserviceaccount.com",
"scopes": [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append"
]
}}
],
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-c/instances/{name}",
"scheduling": {{
"onHostMaintenance": "MIGRATE",
"automaticRestart": true,
"preemptible": false
}},
"cpuPlatform": "Intel Haswell"
}}
"""
# Fields: id, name, project, num, ip, external_ip, network
GCE_INSTANCE_TEMPLATE_STANDARD = """
{{
"kind": "compute#instance",
"id": "106{id}",
"creationTimestamp": "2017-11-22T09:47:37.688-08:00",
"name": "{name}",
"description": "",
"tags": {{
"fingerprint": "42WmSpB8rSM="
}},
"machineType": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-west1-a/machineTypes/n1-standard-2",
"status": "RUNNING",
"zone": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-west1-a",
"canIpForward": false,
"networkInterfaces": [
{{
"kind": "compute#networkInterface",
"network": "https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}",
"subnetwork": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-west1/subnetworks/{network}",
"networkIP": "{ip}",
"name": "nic0",
"accessConfigs": [
{{
"kind": "compute#accessConfig",
"type": "ONE_TO_ONE_NAT",
"name": "External NAT",
"natIP": "{external_ip}"
}}
],
"fingerprint": "Z9b15gLF1tc="
}}
],
"disks": [
{{
"kind": "compute#attachedDisk",
"type": "PERSISTENT",
"mode": "READ_WRITE",
"source": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-west1-a/disks/{name}",
"deviceName": "{name}",
"index": 0,
"boot": true,
"autoDelete": true,
"licenses": [
"https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/licenses/ubuntu-1604-xenial"
],
"interface": "SCSI"
}}
],
"metadata": {{
"kind": "compute#metadata",
"fingerprint": "n9X2Zj3rDe0="
}},
"serviceAccounts": [
{{
"email": "{num}-compute@developer.gserviceaccount.com",
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
}}
],
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-west1-a/instances/{name}",
"scheduling": {{
"onHostMaintenance": "MIGRATE",
"automaticRestart": true,
"preemptible": false
}},
"cpuPlatform": "Intel Broadwell",
"labelFingerprint": "42WmSpB8rSM=",
"startRestricted": false,
"deletionProtection": false
}}
"""
GCE_GET_INSTANCES = {
"project1": [
json.loads(
GCE_INSTANCE_TEMPLATE_IAP.format(
id=1,
name="iap_instance1",
project="project1",
num=PROJECT_ID_PREFIX + "1",
ip="10.138.0.2",
external_ip="192.168.1.2",
network="default",
template="instance_template1",
groupmanager="group_manager1")),
json.loads(
GCE_INSTANCE_TEMPLATE_IAP.format(
id=2,
name="iap_instance2",
project="project1",
num=PROJECT_ID_PREFIX + "1",
ip="10.138.0.3",
external_ip="192.168.1.3",
network="default",
template="instance_template1",
groupmanager="group_manager1")),
json.loads(
GCE_INSTANCE_TEMPLATE_IAP.format(
id=3,
name="iap_instance3",
project="project1",
num=PROJECT_ID_PREFIX + "1",
ip="10.138.0.4",
external_ip="192.168.1.4",
network="default",
template="instance_template1",
groupmanager="group_manager1")),
],
"project2": [
json.loads(
GCE_INSTANCE_TEMPLATE_STANDARD.format(
id=4,
name="instance3",
project="project2",
num=PROJECT_ID_PREFIX + "2",
ip="10.138.0.2",
external_ip="192.168.1.5",
network="default")),
]
}
# Fields: network, id, project
GCE_FIREWALL_TEMPLATE_DEFAULT = """
[
{{
"kind": "compute#firewall",
"id": "107{id}1",
"creationTimestamp": "2017-05-04T16:23:00.568-07:00",
"network": "https://www.googleapis.com/compute/beta/projects/{project}/global/networks/{network}",
"priority": 1000,
"sourceRanges": ["0.0.0.0/0"],
"description": "Allow ICMP from anywhere",
"allowed": [
{{
"IPProtocol": "icmp"
}}
],
"name": "{network}-allow-icmp",
"direction": "INGRESS",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/firewalls/{network}-allow-icmp"
}},
{{
"kind": "compute#firewall",
"id": "107{id}2",
"creationTimestamp": "2017-05-04T16:23:00.568-07:00",
"network": "https://www.googleapis.com/compute/beta/projects/{project}/global/networks/{network}",
"priority": 1000,
"sourceRanges": ["0.0.0.0/0"],
"description": "Allow RDP from anywhere",
"allowed": [
{{
"IPProtocol": "tcp",
"ports": ["3389"]
}}
],
"name": "{network}-allow-rdp",
"direction": "INGRESS",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/firewalls/{network}-allow-rdp"
}},
{{
"kind": "compute#firewall",
"id": "107{id}3",
"creationTimestamp": "2017-05-04T16:23:00.568-07:00",
"network": "https://www.googleapis.com/compute/beta/projects/{project}/global/networks/{network}",
"priority": 1000,
"sourceRanges": ["0.0.0.0/0"],
"description": "Allow SSH from anywhere",
"allowed": [
{{
"IPProtocol": "tcp",
"ports": ["22"]
}}
],
"name": "{network}-allow-ssh",
"direction": "INGRESS",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/firewalls/{network}-allow-ssh"
}},
{{
"kind": "compute#firewall",
"id": "107{id}4",
"creationTimestamp": "2017-05-04T16:23:00.568-07:00",
"network": "https://www.googleapis.com/compute/beta/projects/{project}/global/networks/{network}",
"priority": 1000,
"sourceRanges": ["10.0.0.0/8"],
"description": "Allow internal traffic on the {network} network.",
"allowed": [
{{
"IPProtocol": "udp",
"ports": ["1-65535"]
}},
{{
"IPProtocol": "tcp",
"ports": ["1-65535"]
}},
{{
"IPProtocol": "icmp"
}}
],
"name": "{network}-allow-internal",
"direction": "INGRESS",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/firewalls/{network}-allow-internal"
}}
]
"""
# Fields: network, id, project
GCE_FIREWALL_TEMPLATE_IAP = """
[
{{
"kind": "compute#firewall",
"id": "107{id}1",
"creationTimestamp": "2017-05-04T16:23:00.568-07:00",
"network": "https://www.googleapis.com/compute/beta/projects/{project}/global/networks/{network}",
"priority": 1000,
"sourceRanges": ["130.211.0.0/22", "35.191.0.0/16"],
"description": "Allow HTTP and HTTPS from LB",
"allowed": [
{{
"IPProtocol": "tcp",
"ports": ["80", "443"]
}}
],
"targetTags": ["iap-tag"],
"name": "{network}-allow-https-lb",
"direction": "INGRESS",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/firewalls/{network}-allow-https-lb"
}},
{{
"kind": "compute#firewall",
"id": "107{id}2",
"creationTimestamp": "2017-05-04T16:23:00.568-07:00",
"network": "https://www.googleapis.com/compute/beta/projects/{project}/global/networks/{network}",
"priority": 1000,
"sourceRanges": ["0.0.0.0/0"],
"description": "Allow SSH from anywhere",
"allowed": [
{{
"IPProtocol": "tcp",
"ports": ["22"]
}}
],
"name": "{network}-allow-ssh",
"direction": "INGRESS",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/firewalls/{network}-allow-ssh"
}},
{{
"kind": "compute#firewall",
"id": "107{id}3",
"creationTimestamp": "2017-05-04T16:23:00.568-07:00",
"network": "https://www.googleapis.com/compute/beta/projects/{project}/global/networks/{network}",
"priority": 1000,
"sourceRanges": ["10.0.0.0/8"],
"description": "Allow SSH and ICMP between instances on {network} network.",
"allowed": [
{{
"IPProtocol": "tcp",
"ports": ["22"]
}},
{{
"IPProtocol": "icmp"
}}
],
"name": "{network}-allow-internal",
"direction": "INGRESS",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/firewalls/{network}-allow-internal"
}}
]
"""
GCE_GET_FIREWALLS = {
"project1":
json.loads(
GCE_FIREWALL_TEMPLATE_IAP.format(
id=1, project="project1", network="default")),
"project2":
json.loads(
GCE_FIREWALL_TEMPLATE_DEFAULT.format(
id=2, project="project2", network="default")),
}
# Fields: id, project, zone, name
GCE_DISKS_TEMPLATE = """
{{
"kind": "compute#disk",
"id": "118{id}",
"creationTimestamp": "2017-08-07T10:18:45.802-07:00",
"name": "instance-1",
"sizeGb": "10",
"zone": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}",
"status": "READY",
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{name}",
"sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-9-stretch-v20170717",
"sourceImageId": "4214972497302618486",
"type": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/diskTypes/pd-standard",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/debian-cloud/global/licenses/debian-9-stretch"
],
"lastAttachTimestamp": "2017-08-07T10:18:45.806-07:00",
"users": [
"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}"
],
"labelFingerprint": "42WmSpB8rSM="
}}
"""
GCE_GET_DISKS = {
"project1": [
json.loads(
GCE_DISKS_TEMPLATE.format(
id=1,
name="iap_instance1",
project="project1",
zone="us-central1-c")),
json.loads(
GCE_DISKS_TEMPLATE.format(
id=2,
name="iap_instance2",
project="project1",
zone="us-central1-c")),
json.loads(
GCE_DISKS_TEMPLATE.format(
id=3,
name="iap_instance3",
project="project1",
zone="us-central1-c")),
],
"project2": [
json.loads(
GCE_DISKS_TEMPLATE.format(
id=4,
name="instance3",
project="project2",
zone="us-west1-a")),
]
}
# Fields: id, project
GCE_IMAGES_TEMPLATE = """
[
{{
"kind": "compute#image",
"id": "117{id}1",
"creationTimestamp": "2017-11-15T21:59:58.627-08:00",
"name": "centos-6-custom-v20171116",
"description": "Custom CentOS 6 built on 20171116",
"sourceType": "RAW",
"deprecated": {{
"state": "DEPRECATED",
"replacement": "https://www.googleapis.com/compute/v1/projects/{project}/global/images/centos-6-custom-v20171208"
}},
"status": "READY",
"archiveSizeBytes": "688350464",
"diskSizeGb": "10",
"sourceDisk": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-b/disks/disk-install-centos-6-custom-dz0wt",
"sourceDiskId": "2345",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/centos-cloud/global/licenses/centos-6"
],
"family": "centos-6-custom",
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/global/images/centos-6-custom-v20171116",
"labelFingerprint": "42WmSpB8rSM=",
"guestOsFeatures": [
{{
"type": "VIRTIO_SCSI_MULTIQUEUE"
}}
]
}},
{{
"kind": "compute#image",
"id": "117{id}2",
"creationTimestamp": "2017-12-07T16:19:13.482-08:00",
"name": "centos-6-custom-v20171208",
"description": "Custom CentOS 6 built on 20171208",
"sourceType": "RAW",
"status": "READY",
"archiveSizeBytes": "788880064",
"diskSizeGb": "10",
"sourceDisk": "https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-b/disks/disk-install-centos-6-custom-62bzs",
"sourceDiskId": "5678",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/centos-cloud/global/licenses/centos-6"
],
"family": "centos-6-custom",
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/global/images/centos-6-custom-v20171208",
"labelFingerprint": "42WmSpB8rSM=",
"guestOsFeatures": [
{{
"type": "VIRTIO_SCSI_MULTIQUEUE"
}}
]
}}
]
"""
GCE_GET_IMAGES = {
"project2":
json.loads(
GCE_IMAGES_TEMPLATE.format(
id=1, project="project2")),
}
# Fields: id, name, project, network, instance1, instance2, instance3
GCE_INSTANCE_GROUPS_TEMPLATE = """
{{
"kind": "compute#instanceGroup",
"id": "108{id}",
"creationTimestamp": "2017-08-24T11:10:06.771-07:00",
"name": "{name}",
"description": "This instance group is controlled by Regional Instance Group Manager '{name}'. To modify instances in this group, use the Regional Instance Group Manager API: https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers",
"network": "https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}",
"fingerprint": "42WmSpB8rSM=",
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/instanceGroups/{name}",
"size": 3,
"region": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1",
"subnetwork": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/subnetworks/{network}",
"instance_urls": [
"https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-c/instances/{instance1}",
"https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-c/instances/{instance2}",
"https://www.googleapis.com/compute/v1/projects/{project}/zones/us-central1-c/instances/{instance3}"
]
}}
"""
GCE_GET_INSTANCE_GROUPS = {
"project1": [
json.loads(
GCE_INSTANCE_GROUPS_TEMPLATE.format(
id=1,
name="bs-1-ig-1",
project="project1",
network="default",
instance1="iap_instance1",
instance2="iap_instance2",
instance3="iap_instance3")),
json.loads(
GCE_INSTANCE_GROUPS_TEMPLATE.format(
id=2,
name="gke-cluster-1-default-pool-12345678-grp",
project="project1",
network="default",
instance1="ke_instance1",
instance2="ke_instance2",
instance3="ke_instance3")),
]
}
# Fields: id, name, project, ig_name, hc_name
GCE_BACKEND_SERVICES_TEMPLATE_IAP = """
{{
"kind": "compute#backendService",
"id": "109{id}",
"creationTimestamp": "2017-05-12T11:14:18.559-07:00",
"name": "{name}",
"description": "",
"selfLink": "https://www.googleapis.com/compute/beta/projects/{project}/global/backendServices/{name}",
"backends": [
{{
"description": "",
"group": "https://www.googleapis.com/compute/beta/projects/{project}/zones/us-central1-c/instanceGroups/{ig_name}",
"balancingMode": "UTILIZATION",
"maxUtilization": 0.8,
"capacityScaler": 1.0
}}
],
"healthChecks": [
"https://www.googleapis.com/compute/beta/projects/{project}/global/healthChecks/{hc_name}"
],
"timeoutSec": 30,
"port": 80,
"protocol": "HTTP",
"portName": "http",
"enableCDN": false,
"sessionAffinity": "NONE",
"affinityCookieTtlSec": 0,
"loadBalancingScheme": "EXTERNAL",
"connectionDraining": {{
"drainingTimeoutSec": 300
}},
"iap": {{
"enabled": true,
"oauth2ClientId": "foo",
"oauth2ClientSecretSha256": "bar"
}}
}}
"""
GCE_GET_BACKEND_SERVICES = {
"project1": [
json.loads(
GCE_BACKEND_SERVICES_TEMPLATE_IAP.format(
id=1,
name="bs-1",
project="project1",
ig_name="bs-1-ig-1",
hc_name="bs-1-hc")),
]
}
# Fields: id, name, project, ip, target
FORWARDING_RULES_TEMPLATE = """
{{
"kind": "compute#forwardingRule",
"description": "",
"IPAddress": "{ip}",
"region": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1",
"loadBalancingScheme": "EXTERNAL",
"target": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/{target}",
"portRange": "80-80",
"IPProtocol": "TCP",
"creationTimestamp": "2017-05-05T12:00:01.000-07:00",
"id": "111{id}",
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/forwardingRules/{name}",
"name": "{name}"
}}
"""
GCE_GET_FORWARDING_RULES = {
"project1": [
json.loads(
FORWARDING_RULES_TEMPLATE.format(
id=1,
name="lb-1",
project="project1",
ip="172.16.1.2",
target="targetHttpProxies/lb-1-target-proxy")),
]
}
# Fields: id, name, project, template
INSTANCE_GROUP_MANAGER_TEMPLATE = """
{{
"kind": "compute#instanceGroupManager",
"id": "112{id}",
"creationTimestamp": "2017-08-24T11:10:06.770-07:00",
"name": "{name}",
"region": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1",
"instanceTemplate": "https://www.googleapis.com/compute/v1/projects/{project}/global/instanceTemplates/{template}",
"instanceGroup": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/instanceGroups/{name}",
"baseInstanceName": "{name}",
"currentActions": {{
"none": 3,
"creating": 0,
"creatingWithoutRetries": 0,
"recreating": 0,
"deleting": 0,
"abandoning": 0,
"restarting": 0,
"refreshing": 0
}},
"targetSize": 3,
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/instanceGroupManagers/{name}"
}}
"""
# Fields: id, name, project, template, zone
KE_INSTANCE_GROUP_MANAGER_TEMPLATE = """
{{
"kind": "compute#instanceGroupManager",
"id": "112{id}",
"creationTimestamp": "2017-10-24T12:36:42.373-07:00",
"name": "{name}",
"zone": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}",
"instanceTemplate": "https://www.googleapis.com/compute/v1/projects/{project}/global/instanceTemplates/{template}",
"instanceGroup": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}",
"baseInstanceName": "{template}",
"fingerprint": "Y81gWm4KRRY=",
"currentActions": {{
"none": 3,
"creating": 0,
"creatingWithoutRetries": 0,
"recreating": 0,
"deleting": 0,
"abandoning": 0,
"restarting": 0,
"refreshing": 0
}},
"targetSize": 3,
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{name}"
}}
"""
GCE_GET_INSTANCE_GROUP_MANAGERS = {
"project1": [
json.loads(
INSTANCE_GROUP_MANAGER_TEMPLATE.format(
id=1, name="igm-1", project="project1", template="it-1")),
json.loads(
KE_INSTANCE_GROUP_MANAGER_TEMPLATE.format(
id=2, name="gke-cluster-1-default-pool-12345678-grp",
project="project1",
template="gke-cluster-1-default-pool-12345678",
zone="us-central1-a")),
]
}
# Fields: id, name, project, network, num
INSTANCE_TEMPLATES_TEMPLATE = """
{{
"kind": "compute#instanceTemplate",
"id": "113{id}",
"creationTimestamp": "2017-05-26T22:07:36.275-07:00",
"name": "{name}",
"description": "",
"properties": {{
"tags": {{
"items": [
"iap-tag"
]
}},
"machineType": "f1-micro",
"canIpForward": false,
"networkInterfaces": [
{{
"kind": "compute#networkInterface",
"network": "https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}",
"accessConfigs": [
{{
"kind": "compute#accessConfig",
"type": "ONE_TO_ONE_NAT",
"name": "External NAT"
}}
]
}}
],
"disks": [
{{
"kind": "compute#attachedDisk",
"type": "PERSISTENT",
"mode": "READ_WRITE",
"deviceName": "{name}",
"boot": true,
"initializeParams": {{
"sourceImage": "projects/debian-cloud/global/images/debian-8-jessie-v20170523",
"diskSizeGb": "10",
"diskType": "pd-standard"
}},
"autoDelete": true
}}
],
"metadata": {{
"kind": "compute#metadata",
"fingerprint": "Ab2_F_dLE3A="
}},
"serviceAccounts": [
{{
"email": "{num}-compute@developer.gserviceaccount.com",
"scopes": [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append"
]
}}
],
"scheduling": {{
"onHostMaintenance": "MIGRATE",
"automaticRestart": true,
"preemptible": false
}}
}},
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/global/instanceTemplates/{name}"
}}
"""
GCE_GET_INSTANCE_TEMPLATES = {
"project1": [
json.loads(
INSTANCE_TEMPLATES_TEMPLATE.format(
id=1,
name="it-1",
project="project1",
network="default",
num=PROJECT_ID_PREFIX + "1")),
json.loads(
INSTANCE_TEMPLATES_TEMPLATE.format(
id=2,
name="gke-cluster-1-default-pool-12345678",
project="project1",
network="default",
num=PROJECT_ID_PREFIX + "1")),
]
}
# Fields: id, name, project
NETWORK_TEMPLATE = """
{{
"kind": "compute#network",
"id": "114{id}",
"creationTimestamp": "2017-09-25T12:33:24.312-07:00",
"name": "{name}",
"description": "",
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{name}",
"autoCreateSubnetworks": true,
"subnetworks": [
"https://www.googleapis.com/compute/v1/projects/{project}/regions/europe-west1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/asia-east1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/us-west1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/asia-northeast1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/us-central1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/southamerica-east1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/europe-west3/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/us-east1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/us-east4/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/europe-west2/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/asia-southeast1/subnetworks/{name}",
"https://www.googleapis.com/compute/v1/projects/{project}/regions/australia-southeast1/subnetworks/{name}"
]
}}
"""
GCE_GET_NETWORKS = {
"project1": [
json.loads(
NETWORK_TEMPLATE.format(id=1, name="default", project="project1")),
],
"project2": [
json.loads(
NETWORK_TEMPLATE.format(id=2, name="default", project="project2")),
]
}
# Fields: id, name, project, ippart, region
SUBNETWORK_TEMPLATE = """
{{
"kind": "compute#subnetwork",
"id": "115{id}",
"creationTimestamp": "2017-03-27T15:45:47.874-07:00",
"name": "{name}",
"network": "https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{name}",
"ipCidrRange": "10.{ippart}.0.0/20",
"gatewayAddress": "10.{ippart}.0.1",
"region": "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}",
"selfLink": "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{name}",
"privateIpGoogleAccess": false
}}
"""
def _generate_subnetworks(project, startid):
"""Generate one subnetwork resource per region."""
subnetworks = []
ippart = 128
id = startid
for region in ["asia-east1", "asia-northeast1", "asia-southeast1",
"australia-southeast1", "europe-west1", "europe-west2",
"europe-west3", "southamerica-east1", "us-central1",
"us-east1", "us-east4", "us-west1"]:
subnetworks.append(
json.loads(
SUBNETWORK_TEMPLATE.format(
id=id, name="default", project=project, ippart=ippart,
region=region)))
ippart += 4
id += 1
return subnetworks
GCE_GET_SUBNETWORKS = {
"project1": _generate_subnetworks("project1", 10),
"project2": _generate_subnetworks("project2", 30),
}
# Fields: name, num
GCS_BUCKET_TEMPLATE = """
{{
"kind": "storage#bucket",
"id": "{name}",
"selfLink": "https://www.googleapis.com/storage/v1/b/{name}",
"projectNumber": "{num}",
"name": "{name}",
"timeCreated": "2017-01-18T18:57:23.536Z",
"updated": "2017-01-18T18:57:23.536Z",
"metageneration": "1",
"acl": [
{{
"kind": "storage#bucketAccessControl",
"id": "{name}/project-owners-{num}",
"selfLink": "https://www.googleapis.com/storage/v1/b/{name}/acl/project-owners-{num}",
"bucket": "{name}",
"entity": "project-owners-{num}",
"role": "OWNER",
"projectTeam": {{
"projectNumber": "{num}",
"team": "owners"
}},
"etag": "CAE="
}},
{{
"kind": "storage#bucketAccessControl",
"id": "{name}/project-editors-{num}",
"selfLink": "https://www.googleapis.com/storage/v1/b/{name}/acl/project-editors-{num}",
"bucket": "{name}",
"entity": "project-editors-{num}",
"role": "OWNER",
"projectTeam": {{
"projectNumber": "{num}",
"team": "editors"
}},
"etag": "CAE="
}},
{{
"kind": "storage#bucketAccessControl",
"id": "{name}/project-viewers-{num}",
"selfLink": "https://www.googleapis.com/storage/v1/b/{name}/acl/project-viewers-{num}",
"bucket": "{name}",
"entity": "project-viewers-{num}",
"role": "READER",
"projectTeam": {{
"projectNumber": "{num}",
"team": "viewers"
}},
"etag": "CAE="
}}
],
"defaultObjectAcl": [
{{
"kind": "storage#objectAccessControl",
"entity": "project-owners-{num}",
"role": "OWNER",
"projectTeam": {{
"projectNumber": "{num}",
"team": "owners"
}},
"etag": "CAE="
}},
{{
"kind": "storage#objectAccessControl",
"entity": "project-editors-{num}",
"role": "OWNER",
"projectTeam": {{
"projectNumber": "{num}",
"team": "editors"
}},
"etag": "CAE="
}},
{{
"kind": "storage#objectAccessControl",
"entity": "project-viewers-{num}",
"role": "READER",
"projectTeam": {{
"projectNumber": "{num}",
"team": "viewers"
}},
"etag": "CAE="
}}
],
"owner": {{
"entity": "project-owners-{num}"
}},
"location": "US",
"storageClass": "STANDARD",
"etag": "CAE="
}}
"""
GCS_GET_BUCKETS = {
PROJECT_ID_PREFIX + "3": [
json.loads(
GCS_BUCKET_TEMPLATE.format(
name="bucket1", num=PROJECT_ID_PREFIX + "3")),
],
PROJECT_ID_PREFIX + "4": [
json.loads(
GCS_BUCKET_TEMPLATE.format(
name="bucket2", num=PROJECT_ID_PREFIX + "4")),
]
}
GCS_GET_OBJECTS = {}
BUCKET_IAM_TEMPLATE = """
{{
"kind": "storage#policy",
"resourceId": "projects/_/buckets/{name}",
"bindings": [
{{
"role": "roles/storage.legacyBucketOwner",
"members": [
"projectEditor:{project}",
"projectOwner:{project}"
]
}},
{{
"role": "roles/storage.legacyBucketReader",
"members": [
"projectViewer:{project}"
]
}}
],
"etag": "CAE="
}}
"""
GCS_GET_BUCKET_IAM = {
"bucket1":
json.loads(
BUCKET_IAM_TEMPLATE.format(name="bucket1", project="project3")),
"bucket2":
json.loads(
BUCKET_IAM_TEMPLATE.format(name="bucket2", project="project4"))
}
GCS_GET_OBJECT_IAM = {}
# Fields: project, num, id
SERVICEACCOUNT_TEMPLATE = """
{{
"name": "projects/{project}/serviceAccounts/{num}-compute@developer.gserviceaccount.com",
"projectId": "{project}",
"uniqueId": "110{id}",
"email": "{num}-compute@developer.gserviceaccount.com",
"displayName": "Compute Engine default service account",
"etag": "etag",
"oauth2ClientId": "110{id}"
}}
"""
IAM_GET_SERVICEACCOUNTS = {
"project1": [
json.loads(
SERVICEACCOUNT_TEMPLATE.format(
project="project1", num=PROJECT_ID_PREFIX + "1", id=1)),
],
"project2": [
json.loads(
SERVICEACCOUNT_TEMPLATE.format(
project="project2", num=PROJECT_ID_PREFIX + "2", id=2)),
],
}
SERVICEACCOUNT_IAM_POLICY = """
{
"bindings": [
{
"role": "roles/iam.serviceAccountKeyAdmin",
"members": [
"user:c_user@forseti.test"
]
}
]
}
"""
SERVICEACCOUNT_EMPTY_IAM_POLICY = """
{
"etag": "ACAB"
}
"""
SERVICEACCOUNT1 = IAM_GET_SERVICEACCOUNTS["project1"][0]["name"]
SERVICEACCOUNT2 = IAM_GET_SERVICEACCOUNTS["project2"][0]["name"]
IAM_GET_SERVICEACCOUNT_IAM_POLICY = {
SERVICEACCOUNT1: json.loads(SERVICEACCOUNT_IAM_POLICY),
SERVICEACCOUNT2: json.loads(SERVICEACCOUNT_EMPTY_IAM_POLICY),
}
# Fields: sa_name, id
SERVICEACCOUNT_EXPORT_KEY_TEMPLATE = """
{{
"name": "{sa_name}/keys/116{id}",
"validAfterTime": "2017-11-22T17:49:56Z",
"validBeforeTime": "2027-11-20T17:49:56Z",
"keyAlgorithm": "KEY_ALG_RSA_2048"
}}
"""
IAM_GET_SERVICEACCOUNT_KEYS = {
SERVICEACCOUNT1: [
json.loads(
SERVICEACCOUNT_EXPORT_KEY_TEMPLATE.format(
sa_name=SERVICEACCOUNT1, id=1)),
],
}
CONTAINER_SERVERCONFIG = """
{
"defaultClusterVersion": "1.7.11-gke.1",
"validNodeVersions": [
"1.8.6-gke.0",
"1.8.5-gke.0",
"1.8.4-gke.1",
"1.8.3-gke.0",
"1.8.2-gke.0",
"1.8.1-gke.1",
"1.7.12-gke.0",
"1.7.11-gke.1",
"1.7.11",
"1.7.10-gke.0",
"1.7.8-gke.0",
"1.6.13-gke.1",
"1.6.11-gke.0",
"1.5.7"
],
"defaultImageType": "COS",
"validImageTypes": [
"UBUNTU",
"COS"
],
"validMasterVersions": [
"1.8.6-gke.0",
"1.8.5-gke.0",
"1.7.12-gke.0",
"1.7.11-gke.1"
]
}
"""
# Fields: project, cl_name, np_name, zone
CONTAINER_CLUSTERS_TEMPLATE = """
{{
"name": "{cl_name}",
"nodeConfig": {{
"machineType": "n1-standard-1",
"diskSizeGb": 100,
"oauthScopes": [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append"
],
"imageType": "COS",
"serviceAccount": "default"
}},
"masterAuth": {{
"username": "user",
"password": "pass",
"clusterCaCertificate": "AB",
"clientCertificate": "AB",
"clientKey": "AB"
}},
"loggingService": "logging.googleapis.com",
"monitoringService": "none",
"network": "default",
"clusterIpv4Cidr": "10.8.0.0/14",
"addonsConfig": {{
"httpLoadBalancing": {{}},
"kubernetesDashboard": {{}},
"networkPolicyConfig": {{
"disabled": true
}}
}},
"subnetwork": "default",
"nodePools": [
{{
"name": "{np_name}",
"config": {{
"machineType": "n1-standard-1",
"diskSizeGb": 100,
"oauthScopes": [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append"
],
"imageType": "COS",
"serviceAccount": "default"
}},
"initialNodeCount": 3,
"autoscaling": {{}},
"management": {{
"autoUpgrade": true
}},
"selfLink": "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/clusters/{cl_name}/nodePools/{np_name}",
"version": "1.7.11-gke.1",
"instanceGroupUrls": [
"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/gke-{cl_name}-{np_name}-12345678-grp"
],
"status": "RUNNING"
}}
],
"locations": [
"us-central1-a"
],
"labelFingerprint": "abcdef12",
"legacyAbac": {{}},
"networkPolicy": {{
"provider": "CALICO"
}},
"ipAllocationPolicy": {{}},
"masterAuthorizedNetworksConfig": {{}},
"selfLink": "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/clusters/{cl_name}",
"zone": "{zone}",
"endpoint": "10.0.0.1",
"initialClusterVersion": "1.7.6-gke.1",
"currentMasterVersion": "1.7.11-gke.1",
"currentNodeVersion": "1.7.11-gke.1",
"createTime": "2017-10-24T19:36:21+00:00",
"status": "RUNNING",
"nodeIpv4CidrSize": 24,
"servicesIpv4Cidr": "10.11.240.0/20",
"instanceGroupUrls": [
"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/gke-{cl_name}-{np_name}-12345678-grp"
],
"currentNodeCount": 3
}}
"""
KE_GET_CLUSTERS = {
"project1": [
json.loads(
CONTAINER_CLUSTERS_TEMPLATE.format(
project="project1", cl_name="cluster-1", np_name="default-pool",
zone="us-central1-a")),
],
}
KE_GET_SERVICECONFIG = {
"us-central1-a": json.loads(CONTAINER_SERVERCONFIG),
}
# Fields: project, role
PROJECT_ROLES_TEMPLATE = """
{{
"name": "projects/{project}/roles/{role}",
"title": "{role}",
"description": "Created on: 2017-11-08",
"includedPermissions": [
"compute.firewalls.create",
"compute.firewalls.delete",
"compute.firewalls.get",
"compute.firewalls.list",
"compute.firewalls.update",
"compute.globalOperations.list",
"compute.networks.updatePolicy",
"compute.projects.get"
],
"etag": "BwVdgFmZ7Dg="
}}
"""
IAM_GET_PROJECT_ROLES = {
"project4": [
json.loads(
PROJECT_ROLES_TEMPLATE.format(project="project4", role="role1")),
]
}
# Fields: orgid, role
ORG_ROLES_TEMPLATE = """
{{
"name": "{orgid}/roles/{role}",
"title": "{role}",
"description": "Created on: 2017-11-08",
"includedPermissions": [
"compute.firewalls.create",
"compute.firewalls.delete",
"compute.firewalls.get",
"compute.firewalls.list",
"compute.firewalls.update",
"compute.globalOperations.list",
"compute.networks.updatePolicy",
"compute.projects.get"
],
"etag": "BwVdgFmZ7Dg="
}}
"""
IAM_GET_ORG_ROLES = {
ORGANIZATION_ID: [
json.loads(
ORG_ROLES_TEMPLATE.format(orgid=ORGANIZATION_ID, role="role2")),
]
}
IAM_GET_CURATED_ROLES = [{
"name":
"roles/appengine.appAdmin",
"title":
"App Engine Admin",
"description":
"Full management of App Engine apps (but not storage).",
"includedPermissions": [
"appengine.applications.disable", "appengine.applications.get",
"appengine.applications.update", "appengine.instances.delete",
"appengine.instances.get", "appengine.instances.list",
"appengine.instances.update", "appengine.operations.cancel",
"appengine.operations.delete", "appengine.operations.get",
"appengine.operations.list", "appengine.runtimes.actAsAdmin",
"appengine.services.delete", "appengine.services.get",
"appengine.services.list", "appengine.services.update",
"appengine.versions.create", "appengine.versions.delete",
"appengine.versions.get", "appengine.versions.list",
"appengine.versions.update", "resourcemanager.projects.get",
"resourcemanager.projects.list"
],
"stage":
"GA",
"etag":
"AA=="
}, {
"name":
"roles/appengine.appViewer",
"title":
"App Engine Viewer",
"description":
"Ability to view App Engine app status.",
"includedPermissions": [
"appengine.applications.get", "appengine.instances.get",
"appengine.instances.list", "appengine.operations.get",
"appengine.operations.list", "appengine.services.get",
"appengine.services.list", "appengine.versions.get",
"appengine.versions.list", "resourcemanager.projects.get",
"resourcemanager.projects.list"
],
"stage":
"GA",
"etag":
"AA=="
}, {
"name":
"roles/appengine.codeViewer",
"title":
"App Engine Code Viewer",
"description":
"Ability to view App Engine app status and deployed source code.",
"includedPermissions": [
"appengine.applications.get", "appengine.instances.get",
"appengine.instances.list", "appengine.operations.get",
"appengine.operations.list", "appengine.services.get",
"appengine.services.list", "appengine.versions.get",
"appengine.versions.getFileContents", "appengine.versions.list",
"resourcemanager.projects.get", "resourcemanager.projects.list"
],
"stage":
"GA",
"etag":
"AA=="
}]
# Fields: project
BILLING_ENABLED_TEMPLATE = """
{{
"name": "projects/{project}/billingInfo",
"projectId": "{project}",
"billingAccountName": "billingAccounts/000000-111111-222222",
"billingEnabled": true
}}
"""
# Fields: project
BILLING_DISABLED_TEMPLATE = """
{{
"name": "projects/{project}/billingInfo",
"projectId": "{project}"
}}
"""
BILLING_GET_INFO = {
"project1":
json.loads(
BILLING_ENABLED_TEMPLATE.format(project="project1")),
"project2":
json.loads(
BILLING_ENABLED_TEMPLATE.format(project="project2")),
"project3":
json.loads(
BILLING_ENABLED_TEMPLATE.format(project="project3")),
"project4":
json.loads(
BILLING_DISABLED_TEMPLATE.format(project="project4")),
}
APPENGINE_API_ENABLED = """
{
"serviceName": "appengine.googleapis.com",
"producerProjectId": "google.com:elegant-theorem-93918"
}
"""
BIGQUERY_API_ENABLED = """
{
"serviceName": "bigquery-json.googleapis.com",
"producerProjectId": "google.com:ultra-current-88221"
}
"""
CLOUDSQL_API_ENABLED = """
{
"serviceName": "sql-component.googleapis.com",
"producerProjectId": "google.com:prod-default-producer-project"
}
"""
COMPUTE_API_ENABLED = """
{
"serviceName": "compute.googleapis.com",
"producerProjectId": "google.com:api-project-539346026206"
}
"""
CONTAINER_API_ENABLED = """
{
"serviceName": "container.googleapis.com",
"producerProjectId": "google.com:cloud-kubernetes-devrel"
}
"""
STORAGE_API_ENABLED = """
{
"serviceName": "storage-component.googleapis.com",
"producerProjectId": "google.com:prod-default-producer-project"
}
"""
SERVICEMANAGEMENT_ENABLED_APIS = {
"project1": [
json.loads(STORAGE_API_ENABLED),
json.loads(COMPUTE_API_ENABLED),
json.loads(CONTAINER_API_ENABLED),
],
"project2": [
json.loads(STORAGE_API_ENABLED),
json.loads(COMPUTE_API_ENABLED),
json.loads(CLOUDSQL_API_ENABLED),
],
"project3": [
json.loads(STORAGE_API_ENABLED),
json.loads(BIGQUERY_API_ENABLED),
],
"project4": [
json.loads(STORAGE_API_ENABLED),
json.loads(APPENGINE_API_ENABLED),
],
}
# Fields: name, destination
LOG_SINK_TEMPLATE = """
{{
"name": "{name}",
"destination": "{destination}",
"filter": "logName:\\\"logs/cloudaudit.googleapis.com\\\"",
"outputVersionFormat": "V2",
"writerIdentity": "serviceAccount:{name}@logging-1234.iam.gserviceaccount.com"
}}
"""
# Fields: name, destination
LOG_SINK_TEMPLATE_NO_FILTER = """
{{
"name": "{name}",
"destination": "{destination}",
"outputVersionFormat": "V2",
"writerIdentity": "serviceAccount:{name}@logging-1234.iam.gserviceaccount.com"
}}
"""
# Fields: name, destination
LOG_SINK_TEMPLATE_INCL_CHILDREN = """
{{
"name": "{name}",
"destination": "{destination}",
"outputVersionFormat": "V2",
"filter": "logName:\\\"logs/cloudaudit.googleapis.com\\\"",
"includeChildren": true,
"writerIdentity": "serviceAccount:cloud-logs@system.gserviceaccount.com"
}}
"""
LOGGING_GET_ORG_SINKS = {
ORGANIZATION_ID: [
json.loads(
LOG_SINK_TEMPLATE.format(
name="org-audit-logs",
destination="storage.googleapis.com/my_org_logs")),
]
}
LOGGING_GET_FOLDER_SINKS = {
"folders/" + FOLDER_ID_PREFIX + "1": [
json.loads(
LOG_SINK_TEMPLATE.format(
name="folder-logs", destination=(
"pubsub.googleapis.com/projects/project1/topics/f1-logs"))),
],
"folders/" + FOLDER_ID_PREFIX + "2": [
json.loads(
LOG_SINK_TEMPLATE_INCL_CHILDREN.format(
name="folder-logs",
destination="storage.googleapis.com/my_folder_logs")),
]
}
LOGGING_GET_BILLING_ACCOUNT_SINKS = {
"000000-111111-222222": [
json.loads(
LOG_SINK_TEMPLATE.format(
name="billing-audit-logs",
destination="storage.googleapis.com/b001122_logs")),
]
}
LOGGING_GET_PROJECT_SINKS = {
"project1": [
json.loads(
LOG_SINK_TEMPLATE.format(
name="logs-to-bigquery", destination=(
"bigquery.googleapis.com/projects/project1/"
"datasets/audit_logs"))),
json.loads(
LOG_SINK_TEMPLATE_NO_FILTER.format(
name="logs-to-gcs",
destination="storage.googleapis.com/project1_logs")),
],
"project2": [
json.loads(
LOG_SINK_TEMPLATE.format(
name="logs-to-gcs",
destination="storage.googleapis.com/project2_logs")),
]
}
| [
2,
15069,
2177,
383,
27325,
316,
72,
4765,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11... | 2.137805 | 29,433 |
from django.contrib import admin
from django.db.models import F
from django.utils.translation import ugettext_lazy as _
from django_todopago import models
@admin.register(models.Merchant)
@admin.register(models.Operation)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
376,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
198,
6738,
42625,
14208,
62,
... | 3.180556 | 72 |
from __future__ import absolute_import
from redis_cache import get_redis_connection
import pickle
import redis
from django.conf import settings
from surround.django.utils import CacheKey
from surround.django import execution
import datetime
from redis import WatchError
from surround.django.logging import setupModuleLogger
setupModuleLogger(globals())
redis.StrictRedis.set_pickled = set_pickled
redis.StrictRedis.get_pickled = get_pickled
# make_key cannot be used here, since the key is already in its final shape
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
2266,
271,
62,
23870,
1330,
651,
62,
445,
271,
62,
38659,
198,
11748,
2298,
293,
198,
11748,
2266,
271,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
4573,
13,
28... | 3.407643 | 157 |
"""
Lightweight objects
"""
import collections
from caspy import str
| [
37811,
198,
15047,
6551,
5563,
198,
37811,
198,
11748,
17268,
198,
6738,
269,
5126,
88,
1330,
965,
628,
628
] | 3.789474 | 19 |
"""
Module containing helpers for small calculation involing nonlinear optics
"""
# %%
from skultrafast.unit_conversions import c
from scipy.optimize import minimize_scalar
import numpy as np
def tl_pulse_from_nm(center_wl: float, fhwm: float, shape: str = 'gauss') -> float:
"""
Calculates the transformlimted pulselength in fs from given center
wavelength and fwhm in nanometers.
Parameters
----------
center_wl : float
fhwm : float
shape : str,
optional, by default 'gauss'
Returns
-------
float
"""
if shape == 'gauss':
tbw = 0.44
elif shape == 'sech':
tbw = 0.315
return tbw / (c*1e9 * fhwm / center_wl**2)*1e15
def dispersion(t_in, t_out):
"""
Estimates the amount of dispersion assuming form the pulse length a
transform limited input pulse
Parameters
----------
t_in : float
[description]
t_out : float
[description]
Returns
-------
[type]
[description]
"""
f = minimize_scalar(lambda x: (pulse_length(t_in, x) - t_out)**2)
return f.x
tl = tl_pulse_from_nm(765, 20)
dispersion(tl, 120)/54
25/dist(5)
import matplotlib.pyplot as plt
plt.figure(dpi=200)
d = np.linspace(4, 20, 100)
plt.plot(d, 2*np.floor(25.4/dist(d)))
plt.setp(plt.gca(), xlabel='distance mirrors (mm)', ylabel='max. bounces')
plt.annotate('Bounces at 10° AOI', (10, 25), fontsize='large')
dist(280)
# %%
a = np.arange(1,7)
for i in range(4):
a = np.dstack((a, a))
a.shape
# %%
6**4
# %%
import itertools
# %%
a = itertools.product(range(1, 7), repeat=3)
ar = np.array(list(a))
ar = ar.sum(1)
#plt.hist(ar.sum(1), bins=np.arange(3, 20), histtype='step', density=True)
plt.step(np.arange(ar.max()+1), np.bincount(ar)/len(ar))
a = itertools.product(range(1, 7), repeat=4)
ar = np.array(list(a))
ar = ar.sum(1)-ar.min()
#plt.hist(ar.sum(1), bins=np.arange(3, 25), histtype='step')
plt.step(np.arange(ar.max()+1), np.bincount(ar)/len(ar))
# %%
itertools.co | [
37811,
198,
26796,
7268,
49385,
329,
1402,
17952,
1876,
278,
1729,
29127,
36237,
198,
37811,
198,
2,
43313,
198,
6738,
1341,
586,
430,
7217,
13,
20850,
62,
1102,
47178,
1330,
269,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
17775,
... | 2.256983 | 895 |
from .undirected_graph import *
from .ggm import *
| [
6738,
764,
917,
1060,
276,
62,
34960,
1330,
1635,
198,
6738,
764,
1130,
76,
1330,
1635,
198
] | 3 | 17 |
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# TestBuildElevationMosaic.py
# Description: Test Build Elevation Mosaic Toolbox
# Requirements: ArcGIS Desktop Standard
# ----------------------------------------------------------------------------
import arcpy
import sys
import traceback
import TestUtilities
import os
try:
arcpy.ImportToolbox(TestUtilities.toolbox)
arcpy.env.overwriteOutput = True
#Set tool param variables
inputElevationFolderPath = os.path.join(TestUtilities.elevSourcePath)
inputRasterType = "DTED"
#inputAspectFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","Aspect.rft.xml")
#inputPercentSlopeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","PercentSlope.rft.xml")
#inputHillshadeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","Hillshade.rft.xml")
inputAspectFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","AspectNumericValues.rft.xml")
inputPercentSlopeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","SlopePercentRise.rft.xml")
inputHillshadeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","GreyScaleHillshade.rft.xml")
outputDTMMosaic = "DigitalTerrainModel"
outputHillshadeMosaic = os.path.join(TestUtilities.outputGDB, "Hillshade")
outputAspectMosaic = os.path.join(TestUtilities.outputGDB,"Aspect")
outputPercentSlopeMosaic = os.path.join(TestUtilities.outputGDB,"PercentSlope")
#Testing Build Elevation Mosaics - DTED input
arcpy.AddMessage("Starting Test: Build Elevation Mosaic Tools")
arcpy.BuildElevationMosaics_elevationmosaics(TestUtilities.outputGDB,inputElevationFolderPath,inputRasterType,
inputAspectFunctionTemplateFile,inputPercentSlopeFunctionTemplateFile,
inputHillshadeFunctionTemplateFile,outputDTMMosaic,outputHillshadeMosaic,
outputAspectMosaic,outputPercentSlopeMosaic)
#Verify Results
countDTMFootprints = int(arcpy.GetCount_management(os.path.join(TestUtilities.outputGDB,outputDTMMosaic)).getOutput(0))
print "DTM Footprint count: " + str(countDTMFootprints)
countSlopeFootprints = int(arcpy.GetCount_management(outputPercentSlopeMosaic).getOutput(0))
print "PercentSlope Footprint count: " + str(countSlopeFootprints)
countHillshadeFootprints = int(arcpy.GetCount_management(outputHillshadeMosaic).getOutput(0))
print "Hillshade Footprint count: " + str(countHillshadeFootprints)
countAspectFootprints = int(arcpy.GetCount_management(outputAspectMosaic).getOutput(0))
print "Aspect Footprint count: " + str(countAspectFootprints)
if (countDTMFootprints < 1) or (countSlopeFootprints < 1) or (countHillshadeFootprints < 1) or (countAspectFootprints < 1):
print "Invalid output footprint count!"
raise Exception("Test Failed")
print("Test Passed")
except LicenseError:
print "Spatial Analyst license is unavailable"
except arcpy.ExecuteError:
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
# return a system error code
sys.exit(-1)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# return a system error code
sys.exit(-1)
finally:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckInExtension("Spatial") | [
2,
10097,
26171,
198,
2,
15069,
2211,
8678,
380,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
92... | 2.872695 | 1,681 |
from click.testing import CliRunner
from demisto_sdk.__main__ import main
from TestSuite.test_tools import ChangeCWD
DOC_REVIEW = 'doc-review'
def test_spell_integration_dir_valid(repo):
"""
Given
- a integration directory.
When
- Running doc-review on it.
Then
- Ensure spell check runs on yml and md files only.
- Ensure no misspelled words are found.
"""
pack = repo.create_pack('my_pack')
integration = pack.create_integration('myint')
integration.create_default_integration()
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '-i', integration.path], catch_exceptions=False)
assert 'No misspelled words found ' in result.stdout
assert 'Words that might be misspelled were found in' not in result.stdout
assert integration.yml.path in result.stdout
assert integration.readme.path in result.stdout
assert integration.description.path in result.stdout
assert integration.code.path not in result.stdout
def test_spell_integration_invalid(repo):
"""
Given
- a integration file path with misspelled words.
When
- Running doc-review on it.
Then
- Ensure misspelled words are found.
"""
pack = repo.create_pack('my_pack')
integration = pack.create_integration('myint')
integration.create_default_integration()
yml_content = integration.yml.read_dict()
yml_content['display'] = 'legal words kfawh and some are not'
yml_content['description'] = 'ggghghgh'
integration.yml.write_dict(yml_content)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '-i', integration.yml.path], catch_exceptions=False)
assert 'No misspelled words found ' not in result.stdout
assert 'Words that might be misspelled were found in' in result.stdout
assert 'kfawh' in result.stdout
assert 'ggghghgh' in result.stdout
def test_spell_script_invalid(repo):
"""
Given
- a script file path with misspelled words.
When
- Running doc-review on it.
Then
- Ensure misspelled words are found.
"""
pack = repo.create_pack('my_pack')
script = pack.create_script('myscr')
script.create_default_script()
yml_content = script.yml.read_dict()
yml_content['comment'] = 'legal words kfawh and some are not'
arg_description = yml_content['args'][0].get('description') + ' some more ddddddd words '
yml_content['args'][0]['description'] = arg_description
script.yml.write_dict(yml_content)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '-i', script.yml.path], catch_exceptions=False)
assert 'No misspelled words found ' not in result.stdout
assert 'Words that might be misspelled were found in' in result.stdout
assert 'kfawh' in result.stdout
assert 'ddddddd' in result.stdout
def test_spell_playbook_invalid(repo):
"""
Given
- a playbook file path with misspelled words.
When
- Running doc-review on it.
Then
- Ensure misspelled words are found.
"""
pack = repo.create_pack('my_pack')
playbook = pack.create_playbook('myplaybook')
playbook.create_default_playbook()
yml_content = playbook.yml.read_dict()
yml_content['description'] = 'legal words kfawh and some are not'
task_description = yml_content['tasks']['0']['task'].get('description') + ' some more ddddddd words '
yml_content['tasks']['0']['task']['description'] = task_description
playbook.yml.write_dict(yml_content)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '-i', playbook.yml.path], catch_exceptions=False)
assert 'No misspelled words found ' not in result.stdout
assert 'Words that might be misspelled were found in' in result.stdout
assert 'kfawh' in result.stdout
assert 'ddddddd' in result.stdout
def test_spell_readme_invalid(repo):
"""
Given
- a readme file path with misspelled words and valid and invalid camelCase words.
When
- Running doc-review on it.
Then
- Ensure misspelled words are found.
- Ensure legal camelCase words are not marked.
"""
pack = repo.create_pack('my_pack')
integration = pack.create_integration('myint')
integration.create_default_integration()
integration.readme.write("some weird readme which is not really a word. "
"and should be noted bellow - also hghghghgh\n"
"GoodCase stillGoodCase notGidCase")
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '-i', integration.readme.path], catch_exceptions=False)
assert 'No misspelled words found ' not in result.stdout
assert 'Words that might be misspelled were found in' in result.stdout
assert 'readme' in result.stdout
assert 'hghghghgh' in result.stdout
assert 'notGidCase' in result.stdout
assert 'GoodCase' not in result.stdout
assert 'stillGoodCase' not in result.stdout
def test_review_release_notes_valid(repo):
"""
Given
- an valid rn file:
- Line start with capital letter.
- Line has a period in the end.
- Line does not use the word 'bug'.
- Line has no misspelled word.
- Line fits a template.
When
- Running doc-review on it.
Then
- Ensure no errors are found.
"""
pack = repo.create_pack('my_pack')
valid_rn = '\n' \
'#### Integrations\n' \
'##### Demisto\n' \
' - Fixed an issue where the ***ip*** command failed when unknown categories were returned.\n'
rn = pack.create_release_notes(version='1.1.0', content=valid_rn)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '-i', rn.path], catch_exceptions=False)
assert 'No misspelled words found' in result.stdout
assert f' - Release notes {rn.path} match a known template.' in result.stdout
def test_review_release_notes_invalid(repo):
"""
Given
- an invalid rn file:
- Line does not start with capital letter.
- Line does not have period in the end.
- Line uses the word 'bug'.
- Line has a misspelled word.
- Line does not fit any template.
When
- Running doc-review on it.
Then
- Ensure misspelled words are found and correct fix is suggested.
- Ensure all errors are found.
"""
pack = repo.create_pack('my_pack')
valid_rn = '\n' \
'#### Integrations\n' \
'##### Demisto\n' \
' - fixed a bug where the ***ip*** commanda failed when unknown categories were returned\n'
rn = pack.create_release_notes(version='1.1.0', content=valid_rn)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '-i', rn.path], catch_exceptions=False)
assert 'Notes for the line: "fixed a bug where the ***ip*** commanda ' \
'failed when unknown categories were returned"' in result.stdout
assert 'Line is not using one of our templates,' in result.stdout
assert 'Refrain from using the word "bug", use "issue" instead.' in result.stdout
assert 'Line should end with a period (.)' in result.stdout
assert 'Line should start with capital letter.' in result.stdout
assert "commanda - did you mean:" in result.stdout
assert 'command' in result.stdout
def test_templates_print(repo):
"""
Given
- templates flag
When
- Running doc-review with it.
Then
- Ensure templates are printed.
- Ensure no additional checks run.
"""
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOC_REVIEW, '--templates'], catch_exceptions=False)
assert 'General Pointers About Release Notes:' in result.stdout
assert 'Checking spelling on' not in result.stdout
| [
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
6738,
1357,
396,
78,
62,
21282,
74,
13,
834,
12417,
834,
1330,
1388,
198,
6738,
6208,
5606,
578,
13,
9288,
62,
31391,
1330,
9794,
34,
22332,
198,
198,
38715,
62,
2200,
28206,
7... | 2.584131 | 3,239 |
import vformer.decoder as decoder
| [
11748,
410,
16354,
13,
12501,
12342,
355,
875,
12342,
198
] | 3.4 | 10 |
import numpy as onp
from .utility.utils import broadcast_tile, block_matrix, sum_over_each_neighborhood_on_blocked_matrix, expand
from ._second_algorithm import run_second_algorithm
from ct_charachterization.utility.utils import expand, contract
| [
11748,
299,
32152,
355,
319,
79,
198,
6738,
764,
315,
879,
13,
26791,
1330,
7025,
62,
40927,
11,
2512,
62,
6759,
8609,
11,
2160,
62,
2502,
62,
27379,
62,
710,
394,
2865,
2894,
62,
261,
62,
2436,
3543,
62,
6759,
8609,
11,
4292,
198... | 3.205128 | 78 |
#! /usr/bin/env python
import argparse
import base64
import codecs
import collections
import StringIO
import logging
import json
import os
import stat
import pipes
import re
import signal
import sys
import thread
import time
import pexpect
import psutil
import six
logger = logging.getLogger('awx.main.utils.expect')
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, 0o600)
thread.start_new_thread(lambda p, d: open(p, 'w').write(d), (path, data))
def run_pexpect(args, cwd, env, logfile,
cancelled_callback=None, expect_passwords={},
extra_update_fields=None, idle_timeout=None, job_timeout=0,
pexpect_timeout=5, proot_cmd='bwrap'):
'''
Run the given command using pexpect to capture output and provide
passwords when requested.
:param args: a list of `subprocess.call`-style arguments
representing a subprocess e.g., ['ls', '-la']
:param cwd: the directory in which the subprocess should
run
:param env: a dict containing environment variables for the
subprocess, ala `os.environ`
:param logfile: a file-like object for capturing stdout
:param cancelled_callback: a callable - which returns `True` or `False`
- signifying if the job has been prematurely
cancelled
:param expect_passwords: a dict of regular expression password prompts
to input values, i.e., {r'Password:\s*?$':
'some_password'}
:param extra_update_fields: a dict used to specify DB fields which should
be updated on the underlying model
object after execution completes
:param idle_timeout a timeout (in seconds); if new output is not
sent to stdout in this interval, the process
will be terminated
:param job_timeout a timeout (in seconds); if the total job runtime
exceeds this, the process will be killed
:param pexpect_timeout a timeout (in seconds) to wait on
`pexpect.spawn().expect()` calls
:param proot_cmd the command used to isolate processes, `bwrap`
Returns a tuple (status, return_code) i.e., `('successful', 0)`
'''
expect_passwords[pexpect.TIMEOUT] = None
expect_passwords[pexpect.EOF] = None
if not isinstance(expect_passwords, collections.OrderedDict):
# We iterate over `expect_passwords.keys()` and
# `expect_passwords.values()` separately to map matched inputs to
# patterns and choose the proper string to send to the subprocess;
# enforce usage of an OrderedDict so that the ordering of elements in
# `keys()` matches `values()`.
expect_passwords = collections.OrderedDict(expect_passwords)
password_patterns = expect_passwords.keys()
password_values = expect_passwords.values()
# pexpect needs all env vars to be utf-8 encoded strings
# https://github.com/pexpect/pexpect/issues/512
for k, v in env.items():
if isinstance(v, six.text_type):
env[k] = v.encode('utf-8')
child = pexpect.spawn(
args[0], args[1:], cwd=cwd, env=env, ignore_sighup=True,
encoding='utf-8', echo=False, use_poll=True
)
child.logfile_read = logfile
canceled = False
timed_out = False
errored = False
last_stdout_update = time.time()
job_start = time.time()
while child.isalive():
result_id = child.expect(password_patterns, timeout=pexpect_timeout, searchwindowsize=100)
password = password_values[result_id]
if password is not None:
child.sendline(password)
last_stdout_update = time.time()
if cancelled_callback:
try:
canceled = cancelled_callback()
except Exception:
logger.exception('Could not check cancel callback - canceling immediately')
if isinstance(extra_update_fields, dict):
extra_update_fields['job_explanation'] = "System error during job execution, check system logs"
errored = True
else:
canceled = False
if not canceled and job_timeout != 0 and (time.time() - job_start) > job_timeout:
timed_out = True
if isinstance(extra_update_fields, dict):
extra_update_fields['job_explanation'] = "Job terminated due to timeout"
if canceled or timed_out or errored:
handle_termination(child.pid, child.args, proot_cmd, is_cancel=canceled)
if idle_timeout and (time.time() - last_stdout_update) > idle_timeout:
child.close(True)
canceled = True
if errored:
return 'error', child.exitstatus
elif canceled:
return 'canceled', child.exitstatus
elif child.exitstatus == 0 and not timed_out:
return 'successful', child.exitstatus
else:
return 'failed', child.exitstatus
def run_isolated_job(private_data_dir, secrets, logfile=sys.stdout):
'''
Launch `ansible-playbook`, executing a job packaged by
`build_isolated_job_data`.
:param private_data_dir: an absolute path on the local file system where
job metadata exists (i.e.,
`/tmp/ansible_awx_xyz/`)
:param secrets: a dict containing sensitive job metadata, {
'env': { ... } # environment variables,
'passwords': { ... } # pexpect password prompts
'ssh_key_data': 'RSA KEY DATA',
}
:param logfile: a file-like object for capturing stdout
Returns a tuple (status, return_code) i.e., `('successful', 0)`
'''
with open(os.path.join(private_data_dir, 'args'), 'r') as args:
args = json.load(args)
env = secrets.get('env', {})
expect_passwords = {
re.compile(pattern, re.M): password
for pattern, password in secrets.get('passwords', {}).items()
}
if 'AD_HOC_COMMAND_ID' in env:
cwd = private_data_dir
else:
cwd = os.path.join(private_data_dir, 'project')
# write the SSH key data into a fifo read by ssh-agent
ssh_key_data = secrets.get('ssh_key_data')
if ssh_key_data:
ssh_key_path = os.path.join(private_data_dir, 'ssh_key_data')
ssh_auth_sock = os.path.join(private_data_dir, 'ssh_auth.sock')
open_fifo_write(ssh_key_path, ssh_key_data)
args = wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock)
idle_timeout = secrets.get('idle_timeout', 10)
job_timeout = secrets.get('job_timeout', 10)
pexpect_timeout = secrets.get('pexpect_timeout', 5)
# Use local callback directory
callback_dir = os.getenv('AWX_LIB_DIRECTORY')
if callback_dir is None:
raise RuntimeError('Location for callbacks must be specified '
'by environment variable AWX_LIB_DIRECTORY.')
env['ANSIBLE_CALLBACK_PLUGINS'] = os.path.join(callback_dir, 'isolated_callbacks')
if 'AD_HOC_COMMAND_ID' in env:
env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
else:
env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
env['AWX_ISOLATED_DATA_DIR'] = private_data_dir
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + callback_dir + ':'
venv_path = env.get('VIRTUAL_ENV')
if venv_path and not os.path.exists(venv_path):
raise RuntimeError(
'a valid Python virtualenv does not exist at {}'.format(venv_path)
)
return run_pexpect(args, cwd, env, logfile,
expect_passwords=expect_passwords,
idle_timeout=idle_timeout,
job_timeout=job_timeout,
pexpect_timeout=pexpect_timeout)
def handle_termination(pid, args, proot_cmd, is_cancel=True):
'''
Terminate a subprocess spawned by `pexpect`.
:param pid: the process id of the running the job.
:param args: the args for the job, i.e., ['ansible-playbook', 'abc.yml']
:param proot_cmd the command used to isolate processes i.e., `bwrap`
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
'''
try:
if proot_cmd in ' '.join(args):
if not psutil:
os.kill(pid, signal.SIGKILL)
else:
try:
main_proc = psutil.Process(pid=pid)
child_procs = main_proc.children(recursive=True)
for child_proc in child_procs:
os.kill(child_proc.pid, signal.SIGKILL)
os.kill(main_proc.pid, signal.SIGKILL)
except (TypeError, psutil.Error):
os.kill(pid, signal.SIGKILL)
else:
os.kill(pid, signal.SIGTERM)
time.sleep(3)
except OSError:
keyword = 'cancel' if is_cancel else 'timeout'
logger.warn("Attempted to %s already finished job, ignoring" % keyword)
if __name__ == '__main__':
import awx
__version__ = awx.__version__
parser = argparse.ArgumentParser(description='manage a daemonized, isolated ansible playbook')
parser.add_argument('--version', action='version', version=__version__ + '-isolated')
parser.add_argument('command', choices=['start', 'stop', 'is-alive'])
parser.add_argument('private_data_dir')
args = parser.parse_args()
private_data_dir = args.private_data_dir
pidfile = os.path.join(private_data_dir, 'pid')
if args.command == 'start':
# create a file to log stderr in case the daemonized process throws
# an exception before it gets to `pexpect.spawn`
stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log')
if not os.path.exists(stderr_path):
os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
stderr = open(stderr_path, 'w+')
import daemon
from daemon.pidfile import TimeoutPIDLockFile
context = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile),
stderr=stderr
)
with context:
__run__(private_data_dir)
sys.exit(0)
try:
with open(pidfile, 'r') as f:
pid = int(f.readline())
except IOError:
sys.exit(1)
if args.command == 'stop':
try:
with open(os.path.join(private_data_dir, 'args'), 'r') as args:
handle_termination(pid, json.load(args), 'bwrap')
except IOError:
handle_termination(pid, [], 'bwrap')
elif args.command == 'is-alive':
try:
os.kill(pid, signal.SIG_DFL)
sys.exit(0)
except OSError:
sys.exit(1)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
2779,
2414,
198,
11748,
40481,
82,
198,
11748,
17268,
198,
11748,
10903,
9399,
198,
11748,
18931,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
... | 2.200737 | 5,156 |
def add(x, y):
""" Add two numbers and return their sum"""
return x+y
def subtract(x, y):
""" Subtract to numbers and return their results"""
return x-y | [
4299,
751,
7,
87,
11,
331,
2599,
198,
220,
220,
220,
37227,
3060,
734,
3146,
290,
1441,
511,
2160,
37811,
198,
220,
220,
220,
1441,
2124,
10,
88,
198,
198,
4299,
34128,
7,
87,
11,
331,
2599,
198,
220,
220,
220,
37227,
3834,
83,
... | 2.816667 | 60 |
#
# Python script for generating new documentation
#
# _author_ = Grant Curell <grant_curell@dell.com>
#
#
# Copyright (c) 2020 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ast
import logging
import re
import subprocess
import sys
from os import scandir
from os.path import abspath, basename, join
from collections import OrderedDict
import jinja2
import yaml
def _get_powershell_example(script_to_process: str):
""" Utility method for getting the PowerShell example """
logging.info("Retrieving PowerShell example for " + script_to_process)
pipe = subprocess.Popen(["powershell.exe",
"Get-Help",
abspath(join(categories_dictionary['powershell_path'], script_to_process)),
"-Examples"],
stdout=subprocess.PIPE)
try:
output = pipe.stdout.read().decode('utf-8').split("-------------------------- EXAMPLE 1 "
"--------------------------")[1].strip()
except IndexError:
print("Received an index error while processing " + script_to_process + ". This typically means the help "
"section of the PowerShell is not formatted correctly. Try running 'Get-Help " + script_to_process +
" -Examples' and verify that the examples output correctly.")
sys.exit(0)
output = output.splitlines()
# Remove blank lines - PowerShell otherwise prints with several unnecessary blank lines
example = ""
for line_to_clean in filter(lambda x: not re.match(r'^\s*$', x), output):
example = example + line_to_clean + '\n'
example = re.sub(r"-------------------------- EXAMPLE \d --------------------------", "\n", example)
return example
logging.info("Reading in YML...")
with open('categories.yml') as category_file:
categories_dictionary = yaml.load(category_file, Loader=yaml.FullLoader)
python_file_list = []
module_data = {'deploy': {}, 'update': {}, 'monitor': {}, 'maintain': {}, 'other': {}}
for entry in scandir(categories_dictionary['python_code_path']):
if entry.path.endswith(".py"):
python_file_list.append(entry.path)
logging.info("Scanning Python files for docstrings and extracting them...")
script_tracker = {} # Used to track if a key has Python scripts, PowerShell scripts, or both
for module_path in python_file_list:
with open(module_path) as fd:
module_contents = fd.read()
module = ast.parse(module_contents)
docstring = ast.get_docstring(module)
if docstring is None:
docstring = ""
# Key is the name without py- ex: get_group_details
key = basename(module_path).replace('.py', '')
if key in categories_dictionary['deploy']:
category = 'deploy'
elif key in categories_dictionary['update']:
category = 'update'
elif key in categories_dictionary['monitor']:
category = 'monitor'
elif key in categories_dictionary['maintain']:
category = 'maintain'
else:
category = 'other'
logging.error(key + " is not in categories! It will not be displayed in the documentation. "
"You should add it to categories before continuing.")
sys.exit(0)
# Call PowerShell's help and then extract examples from the help page
powershell_example = None
script_tracker[key] = {}
script_tracker[key]['has_powershell'] = False
script_tracker[key]['has_python'] = False
for script in categories_dictionary[category][key]:
if script.endswith('.ps1'):
if script_tracker[key]['has_powershell']:
logging.error("It looks like a PowerShell script for " + key + " may have been listed twice. Fix and"
" then rerun this script.")
sys.exit(0)
logging.info("Retrieving PowerShell example for " + script)
powershell_example = _get_powershell_example(script)
script_tracker[key]['has_powershell'] = True
elif script.endswith('py'):
if script_tracker[key]['has_python']:
logging.error("It looks like a Python script for " + key + " may have been listed twice. Fix and"
" then rerun this script.")
sys.exit(0)
script_tracker[key]['has_python'] = True
else:
logging.error(key + " has a script listed that does not end with either"
" ps1 or .py. This is probably an error. Fix and then rerun this script.")
sys.exit(0)
if not powershell_example:
logging.warning("No PowerShell script found for " + key)
module_data[category][key] = {
'path': abspath(module_path),
'docstring': docstring,
'readable_name': key.replace('_', ' ').title(),
'available_scripts': categories_dictionary[category][key],
'anchor_link': '#' + key.replace('_', '-').lower(),
'powershell_example': powershell_example
}
# Handle cases where a PowerShell script exists, but a Python script does not
# TODO - This doesn't check to see if there are PowerShell files on the file system that aren't in categories
for category, scripts in categories_dictionary.items():
if category == 'deploy' or category == 'update' or category == 'monitor' or category == 'maintain':
for key in scripts:
if key not in script_tracker:
logging.warning("No Python script found for " + key)
for script in categories_dictionary[category][key]:
script_tracker[key] = {}
script_tracker[key]['has_powershell'] = False
script_tracker[key]['has_python'] = False
if script.endswith('.ps1'):
if script_tracker[key]['has_powershell']:
logging.error("It looks like a PowerShell script for " + key + " may have been listed "
"twice. Fix and then rerun this script.")
sys.exit(0)
# Get synopsis from PowerShell
logging.info("Retrieving synopsis for " + script)
p = subprocess.Popen(["powershell.exe",
"Get-Help",
abspath(join(categories_dictionary['powershell_path'], script))],
stdout=subprocess.PIPE)
powershell = p.stdout.read().decode('utf-8')
powershell_no_blanklines = ""
for line in filter(lambda x: not re.match(r'^\s*$', x), powershell.splitlines()):
powershell_no_blanklines = powershell_no_blanklines + line + '\n'
results = re.search('SYNOPSIS(.*)SYNTAX', powershell_no_blanklines, re.DOTALL)
docstring = "#### Synopsis" + results.group(1)
# Get description from PowerShell
results = re.search('DESCRIPTION(.*)RELATED LINKS', powershell_no_blanklines, re.DOTALL)
docstring = docstring + "#### Description" + results.group(1)
# Get rid of the leading whitespaces on each line which would convert the line to code
# in markdown
docstring = re.sub(r"\n(\s){2,}", "\n", docstring)
module_data[category][key] = {
'path': abspath(join(categories_dictionary['powershell_path'], script)),
'docstring': docstring,
'readable_name': key.replace('_', ' ').title(),
'available_scripts': categories_dictionary[category][key],
'anchor_link': '#' + key.replace('_', '-').lower(),
'powershell_example': _get_powershell_example(script)
}
script_tracker[key]['has_powershell'] = True
elif script.endswith('py'):
logging.error("We shouldn't be here. Is there something strange about this script?")
sys.exit(0)
else:
logging.error(key + " has a script listed that does not end with either"
" ps1 or .py. This is probably an error. Fix and then rerun this script.")
sys.exit(0)
# Alphabetize all dictionaries by key
for category, scripts in module_data.items():
if category == 'deploy' or category == 'update' or category == 'monitor' or category == 'maintain':
module_data[category] = OrderedDict(sorted(module_data[category].items()))
logging.info("Creating API doc from jinja2 template...")
templateLoader = jinja2.FileSystemLoader(searchpath="./")
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "API.md.j2"
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(module_data=module_data) # this is where to put args to the template renderer
with open("API.md", "w") as f:
f.write(outputText)
logging.info("API.md generated!")
| [
2,
198,
2,
11361,
4226,
329,
15453,
649,
10314,
198,
2,
198,
2,
4808,
9800,
62,
796,
12181,
36947,
297,
1279,
2164,
415,
62,
66,
495,
297,
31,
67,
695,
13,
785,
29,
198,
2,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
23617,
412,... | 2.309629 | 4,289 |
"""" Unit test file. """
import unittest
import math
import pandas as pd
from ..LineageInputOutput import import_exp_data, tryRecursion
from ..CellVar import CellVar as c
class TestModel(unittest.TestCase):
"""
Unit test class for importing data.
"""
def setUp(self):
"""
Gets the path to synthetic data.
This data is formatted similarly to Heiser lab data,
but contains known mistakes/exceptions that the functions
should be able to handle.
"""
self.path_to_synthetic_data = r"lineage/data/heiser_data/Synth_data.xlsx"
# manually setting up trees from Synth_data
# 1
self.parent1 = c(parent=None, gen=1)
self.parent1.obs = [1, 1, 10, 10, 0, 1]
self.left1 = c(parent=self.parent1, gen=2)
self.left1.obs = [1, 0, 10, 10, 1, 1]
self.right1 = c(parent=self.parent1, gen=2)
self.right1.obs = [1, float('nan'), 20, 105, 1, 0]
self.parent1.left = self.left1
self.parent1.right = self.right1
self.lin1 = [self.left1, self.right1, self.parent1]
# 2
self.parent2 = c(parent=None, gen=1)
self.parent2.obs = [1, 1, 10, 10, 0, 1]
self.left2 = c(parent=self.parent2, gen=2)
self.left2.obs = [float('nan'), float(
'nan'), 125, float('nan'), 0, float('nan')]
self.right2 = c(parent=self.parent2, gen=2)
self.right2.obs = [1, 0, 10, 10, 1, 1]
self.parent2.left = self.left2
self.parent2.right = self.right2
self.lin2 = [self.left2, self.right2, self.parent2]
# 3
self.parent3 = c(parent=None, gen=1)
self.parent3.obs = [1, 1, float('nan'), 30, float('nan'), 0]
self.left3_1 = c(parent=self.parent3, gen=2)
self.left3_1.obs = [1, 1, 30, 30, 1, 1]
self.right3_1 = c(parent=self.parent3, gen=2)
self.right3_1.obs = [1, 0, 10, 80, 1, 1]
self.parent3.left = self.left3_1
self.parent3.right = self.right3_1
self.left3_2 = c(parent=self.left3_1, gen=3)
self.left3_2.obs = [1, float('nan'), 30, 25, 1, 0]
self.right3_2 = c(parent=self.left3_1, gen=3)
self.right3_2.obs = [1, float('nan'), 25, 30, 1, 0]
self.lin3 = [self.left3_2, self.right3_2,
self.left3_1, self.right3_1, self.parent3]
self.lin = [self.lin1, self.lin2, self.lin3]
def test_import_Heiser(self):
"""
Tests the main import function for Heiser lab data.
"""
path2use = self.path_to_synthetic_data
lineages = import_exp_data(path2use)
self.assertTrue(len(lineages) == 3)
self.assertTrue(len(lineages[0]) == 3)
self.assertTrue(len(lineages[1]) == 3)
self.assertTrue(len(lineages[2]) == 5)
# This won't work if the order the cells are stored is changed
for i, lineage in enumerate(lineages):
# soft check that the order is probably the same
assert lineage[len(lineage) - 1].gen == 1
def test_tryRecursion(self):
"""
Tests the recursion function used to recurse acros Excel cells
in Heiser lab data.
"""
path2use = self.path_to_synthetic_data
excel_file = pd.read_excel(path2use, header=None, engine="openpyxl")
data = excel_file.to_numpy()
cLin = []
_ = tryRecursion(1, 45, 37, self.parent3, cLin, data, 30, 145, [145])
self.assertTrue(len(cLin) == 3)
i = 0
while i < len(cLin) and cLin[i].gen != 2:
i += 1
assert i < len(cLin)
for j in range(6):
self.assertTrue(cLin[i].obs[j] == self.left3_1.obs[j] or (
math.isnan(self.left3_1.obs[j]) and math.isnan(cLin[i].obs[j])))
self.assertTrue(cLin[i].right.obs[j] == self.right3_2.obs[j] or (
math.isnan(self.right3_2.obs[j]) and math.isnan(cLin[i].right.obs[j])))
self.assertTrue(cLin[i].left.obs[j] == self.left3_2.obs[j] or (
math.isnan(self.left3_2.obs[j]) and math.isnan(cLin[i].left.obs[j])))
self.assertTrue(cLin[i].parent.obs[j] == self.parent3.obs[j] or (
math.isnan(self.parent3.obs[j]) and math.isnan(cLin[i].parent.obs[j])))
| [
15931,
15931,
11801,
1332,
2393,
13,
37227,
198,
11748,
555,
715,
395,
198,
11748,
10688,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
11485,
13949,
496,
20560,
26410,
1330,
1330,
62,
11201,
62,
7890,
11,
1949,
6690,
24197,
198... | 2.03085 | 2,107 |
from . import preprocessing_step
| [
6738,
764,
1330,
662,
36948,
62,
9662,
628
] | 4.25 | 8 |
"""
https://www.codechef.com/problems/JAIN
Type: Dynamic Programming
"""
if __name__ == '__main__':
for t in range(int(input())):
solve() | [
37811,
198,
5450,
1378,
2503,
13,
19815,
721,
258,
69,
13,
785,
14,
1676,
22143,
14,
41,
29833,
198,
6030,
25,
26977,
30297,
198,
37811,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
329,
25... | 2.47541 | 61 |
from flask_restful import Resource, fields, marshal_with, reqparse
from flask_jwt_extended import jwt_required, get_jwt_identity
from mini_gplus.daos.user import find_user
from mini_gplus.daos.circle import create_circle, get_circles, find_circle, toggle_member, delete_circle, rename_circle
from mini_gplus.daos.exceptions import NotFound
from .users import user_fields
circle_fields = {
'id': fields.String(attribute='eid'),
'owner': fields.Nested(user_fields),
'name': fields.String,
'members': fields.List(fields.Nested(user_fields))
}
circle_parser = reqparse.RequestParser()
circle_parser.add_argument('name')
| [
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
7032,
11,
22397,
282,
62,
4480,
11,
43089,
29572,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
474,
46569,
62,
35827,
11,
651,
62,
73,
46569,
62,
738,
414,
198,
6738,
9927,
62,
... | 3.009434 | 212 |
from django.contrib import admin
# Register your models here.
from .models import Relationship,Person
admin.site.register(Relationship)
admin.site.register(Person)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
6738,
764,
27530,
1330,
39771,
11,
15439,
198,
198,
28482,
13,
15654,
13,
30238,
7,
47117,
1056,
8,
198,
28482,
13,
15654,
13,
30238,
... | 3.630435 | 46 |
__copyright__ = "Copyright (C) 2019 Zachary J Weiner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import h5py
class OutputFile:
"""
A wrapper to :class:`h5py:File` which collects and saves useful run
information and provides functionality to append to datasets.
No arguments are required, but the following keyword arguments are
recognized:
:arg context: A :class:`pyopencl.Context`. If not *None*, information
about the device, driver, and platform is saved to the
:attr:`attrs` dictionary.
Defaults to *None*.
:arg name: The name of the ``.h5`` (sans the extension) file to create.
If *None*, a unique filename is chosen based on the current date and
time.
Defaults to *None*.
:arg runfile: A file whose content will be saved as a string to
``attrs["runfile"]``, if not *None*. Useful for attaching the run file
of a simulation to its output.
Defaults to *None*.
Any remaining keyword arguments are saved to the :attr:`attrs` dictionary.
If any value ``val`` is not of valid type to be saved, the ``val.__name__``
attribute is saved if the value is a :class:`type` instance, or else
``str(val)`` is saved.
Versions and git revisions (when available) of :mod:`pystella` and its
dependencies are saved as ``"versions"`` and ``"git_revs"``
:class:`h5py:Dataset`\\ s. The hostname is recorded in the ``"hostname"``
key of the :attr:`attrs` dictionary.
.. automethod:: output
"""
def output(self, group, **kwargs):
"""
Appends values to datasets within a :class:`h5py:Group` named ``group``.
``group`` is created if it does not exist, and the :class:`h5py:Dataset`'s
of this :class:`h5py:Group` are determined by the keys of keyword arguments.
If ``group`` already exists, iterates over each :class:`h5py:Dataset` and
appends values from keyword arguments (matching :class:`h5py:Dataset`
names to keys).
:arg group: The :class:`h5py:Group` to append :class:`h5py:Dataset`
values to.
If ``group`` already exists, a keyword argument for each
:class:`h5py:Dataset` in ``group`` must be provided.
"""
# create group and datasets if they don't exist
with self.open() as f:
if group not in f:
self.create_from_kwargs(f, group, **kwargs)
# ensure that all fields are provided
for key in f[group]:
val = kwargs.pop(key)
append(f[group][key], val)
| [
834,
22163,
4766,
834,
796,
366,
15269,
357,
34,
8,
13130,
18825,
560,
449,
35055,
1,
201,
198,
201,
198,
834,
43085,
834,
796,
37227,
201,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866... | 2.722141 | 1,364 |
"Output stream buffering: A first look"
'''
By default, standard output is fully buffered when connected to a pipe like this; it’s only
line-buffered when connected to a terminal:
>>> pipe = os.popen('python testexit_os.py')
>>> pipe.read() # streams not flushed on exit
''
>>> pipe = os.popen('python -u testexit_os.py') # force unbuffered streams
>>> pipe.read()
'Bye os world\n'
Confusingly, you can pass mode and buffering argument to specify line buffering in
both os.popen and subprocess.Popen, but this won’t help here—arguments passed to
these tools pertain to the calling process’s input end of the pipe, not to the spawned
program’s output stream:
>>> pipe = os.popen('python testexit_os.py', 'r', 1) # line buffered only
>>> pipe.read() # but my pipe, not program's!
''
>>> from subprocess import Popen, PIPE
>>> pipe = Popen('python testexit_os.py', bufsize=1, stdout=PIPE) # for my pipe
>>> pipe.stdout.read() # doesn't help
b''
Really, buffering mode arguments in these tools pertain to output the caller writes to
a command’s standard input stream, not to output read from that command.
'''
| [
1,
26410,
4269,
6940,
1586,
25,
317,
717,
804,
1,
198,
7061,
6,
198,
3886,
4277,
11,
3210,
5072,
318,
3938,
6940,
1068,
618,
5884,
284,
257,
12656,
588,
428,
26,
340,
447,
247,
82,
691,
198,
1370,
12,
36873,
1068,
618,
5884,
284,
... | 3.278107 | 338 |
from supersql import Query
from .schemas.actor import Actor
from .schemas.staff import Staff
query = Query(
user='postgres',
password='postgres',
vendor='postrgres',
host='localhost:5432/mydatabase'
)
actor = Actor()
prep = query.SELECT(
actor.actor_id,
actor.first_name
).FROM(
actor
).WHERE(
actor.last_name == 'Connery'
).OFFSET(
5
).FETCH(
10
);
fetch_select = """
WITH
cohort_data AS (
SELECT sk_customer,
iso_country_code country,
FIRST(sk_order) OVER (PARTITION BY sk_customer ORDER BY order_date) sk_first_order,
FIRST(order_date) OVER (PARTITION BY sk_customer ORDER BY order_date) first_order_date
FROM f_salesorder_position sale
LEFT JOIN d_shop shop
ON sale.sk_shop = shop.sk_shop
WHERE (sale.sk_order_date BETWEEN {cohort_start} AND {cohort_end}) AND
sales_channel = 'SHOP'
),
cohort_data2 AS (
SELECT sk_customer,
sk_first_order,
first_order_date,
country
FROM cohort_data
GROUP BY 1,2,3,4
ORDER BY COUNT(country) DESC
),
cohort AS (
SELECT sk_customer,
sk_first_order,
first_order_date,
FIRST(country) country
FROM cohort_data2
GROUP BY 1,2,3
),
shop_training AS (
SELECT cohort.sk_customer,
cohort.country,
sk_order,
first_order_date,
(YEAR(order_date)-YEAR(first_order_date))*52 + WEEKOFYEAR(order_date) - WEEKOFYEAR(first_order_date) tt,
(YEAR('{training_end_date}')-YEAR(first_order_date))*52 + WEEKOFYEAR('{training_end_date}') - WEEKOFYEAR(first_order_date) T,
COALESCE(SUM(pcii),0) pcii
FROM cohort
LEFT JOIN f_salesorder_position sale
ON cohort.sk_customer = sale.sk_customer
LEFT JOIN d_shop shop
ON sale.sk_shop = shop.sk_shop
WHERE (sk_order_date BETWEEN {training_start} AND {training_end}) AND
sales_channel = 'SHOP'
GROUP BY 1,2,3,4,5,6
),
customer_stat AS (
SELECT country,
CAST(sk_customer AS int) sk_customer,
CAST(first_order_date AS date) first_order_date,
CAST(COUNT(*)-1 AS int) x,
MAX(tt) tx,
LEAST(MAX(T), {training_weeks_max}) T,
SUM(pcii) pcii
FROM shop_training
WHERE tt <= {training_weeks_max}
GROUP BY 1,2,3
),
result AS (
SELECT *,
row_number() OVER (PARTITION BY country ORDER BY RAND()) index
FROM customer_stat
WHERE x <= {training_n_basket_max} AND T >= {training_weeks_min}
)
SELECT country, sk_customer, x, tx, T, pcii
FROM result
WHERE index <= {training_n_customer}
"""
| [
6738,
22754,
13976,
1330,
43301,
198,
198,
6738,
764,
1416,
4411,
292,
13,
11218,
1330,
27274,
198,
6738,
764,
1416,
4411,
292,
13,
28120,
1330,
9983,
628,
198,
22766,
796,
43301,
7,
198,
220,
220,
220,
2836,
11639,
7353,
34239,
3256,
... | 2.286871 | 1,112 |
import base64
import hmac
import hashlib
import logging
import bcrypt
from django.conf import settings
from django.contrib.auth.hashers import (BCryptPasswordHasher,
BasePasswordHasher, mask_hash)
from django.utils.crypto import constant_time_compare
from django.utils.encoding import smart_str
from django.utils.datastructures import SortedDict
log = logging.getLogger('common.hashers')
algo_name = lambda hmac_id: 'bcrypt{0}'.format(hmac_id.replace('-', '_'))
def get_hasher(hmac_id):
"""
Dynamically create password hashers based on hmac_id.
This class takes the hmac_id corresponding to an HMAC_KEY and creates a
password hasher class based off of it. This allows us to use djangos
built-in updating mechanisms to automatically update the HMAC KEYS.
"""
dash_hmac_id = hmac_id.replace('_', '-')
return BcryptHMACPasswordHasher
# We must have HMAC_KEYS. If not, let's raise an import error.
if not settings.HMAC_KEYS:
raise ImportError('settings.HMAC_KEYS must not be empty.')
# For each HMAC_KEY, dynamically create a hasher to be imported.
for hmac_key in settings.HMAC_KEYS.keys():
hmac_id = hmac_key.replace('-', '_')
globals()[algo_name(hmac_id)] = get_hasher(hmac_id)
class BcryptHMACCombinedPasswordVerifier(BCryptPasswordHasher):
"""
This reads anything with 'bcrypt' as the algo. This should be used
to read bcypt values (with or without HMAC) in order to re-encode them
as something else.
"""
algorithm = 'bcrypt'
rounds = getattr(settings, 'BCRYPT_ROUNDS', 12)
def encode(self, password, salt):
"""This hasher is not meant to be used for encoding"""
raise NotImplementedError()
def _hmac_create(self, password, shared_key):
"""Create HMAC value based on pwd"""
hmac_value = base64.b64encode(hmac.new(
smart_str(shared_key),
smart_str(password),
hashlib.sha512).digest())
return hmac_value
class SHA256PasswordHasher(BasePasswordHasher):
"""The SHA256 password hashing algorithm."""
algorithm = 'sha256'
class SHA1PasswordHasher(SHA256PasswordHasher):
"""The SHA1 password hashing algorithm."""
algorithm = 'sha1'
class SHA512PasswordHasher(SHA256PasswordHasher):
"""The SHA512 password hashing algorithm."""
algorithm = 'sha512'
class SHA512b64PasswordHasher(SHA512PasswordHasher):
"""The SHA512 password hashing algorithm with base64 encoding."""
algorithm = 'sha512b64'
| [
11748,
2779,
2414,
198,
11748,
289,
20285,
198,
11748,
12234,
8019,
198,
11748,
18931,
198,
198,
11748,
275,
29609,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
10134,
7084,
... | 2.711702 | 940 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from sets import Set
import warnings
from twisted.internet import interfaces, defer, main
from twisted.persisted import styles
from twisted.python import log, failure
from ops import ReadFileOp, WriteFileOp
from util import StateEventMachineType
from zope.interface import implements
from socket import error as socket_error
| [
2,
15069,
357,
66,
8,
5878,
12,
15724,
40006,
24936,
46779,
13,
198,
2,
4091,
38559,
24290,
329,
3307,
13,
628,
198,
6738,
5621,
1330,
5345,
198,
11748,
14601,
198,
198,
6738,
19074,
13,
37675,
1330,
20314,
11,
29135,
11,
1388,
198,
... | 4.257732 | 97 |
# vim: expandtab tabstop=4 shiftwidth=4
from dspftw import *
from .constellation_plot import conplot, constellation_plot
from .plot_3d_complex import plot_3d_complex, plot3c
from .plot_complex import plotc, plot_complex
from .plot_slider import plot_slider, plots
from .plot_func_slider import plot_func_slider, plotfs
| [
2,
43907,
25,
4292,
8658,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
198,
198,
6738,
288,
2777,
701,
86,
1330,
1635,
198,
198,
6738,
764,
9979,
28828,
62,
29487,
1330,
369,
29487,
11,
38712,
62,
29487,
198,
6738,
764,
29487,
62,
18,
... | 3.096154 | 104 |
# -*- coding: utf-8 -*-
import logging
import os
from datetime import datetime
import boto3
from boto3.dynamodb.conditions import Key
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
"""
if __name__ == "__main__":
ddb = DdbChat()
table = ddb.createConnection('chat')
name = 'oranie'
comment = 'チャットシステムです'
chat_room = 'chat'
ddb.putComment(table, name, comment, chat_room)
result = ddb.getLatestComments(table, chat_room)
list = result['Items']
for index, item in enumerate(list):
logging.info(f"id: {str(index)} name: {item['name']} time: {str(item['time'])} comment: {item['comment']}")
result = ddb.getAllComments(table, chat_room)
for index, item in enumerate(result):
logging.info(
f"ALL Result id: {str(index)} name: {item['name']} time: {str(item['time'])} comment: {item['comment']}")
logging.info(result)
result = ddb.getRangeComments(table, chat_room, 0)
for index, item in enumerate(result):
logging.info(
f"RANGE Result id: {str(index)}name: {item['name']} time: {str(item['time'])} comment: {item['comment']}")
logging.info(result)
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
275,
2069,
18,
198,
6738,
275,
2069,
18,
13,
67,
4989,
375,
65,
13,
17561,
1... | 2.470103 | 485 |
if __name__ == "__main__":
main()
| [
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.105263 | 19 |
import logging
import logging.handlers
class HTTPHandler(logging.Handler):
"""
A class Based on Vinay Sajip HTTPHandler-class which sends records to a Web server,
using either GET or POST semantics. It supports HTTP, HTTPS and basic authorization.
"""
def __init__(self, host, url, method="GET", secure=False, authorization=None):
"""
Initialize the instance with the host, the request URL, the method ("GET" or "POST"), the secure-flag
(to use HTTPS), and HTTP basic auth credentials as a tuple of username and password.
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.authorization = authorization
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
import socket
record.__dict__.update(hostname=socket.gethostname())
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import httplib, urllib
host = self.host
if self.secure:
h = httplib.HTTPS(host)
else:
h = httplib.HTTP(host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.authorization:
import base64
auth = base64.encodestring("%s:%s" % self.authorization).replace('\n', '')
h.putheader("Authorization",
"Basic %s" % auth)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.method == "POST":
h.endheaders()
h.send(data)
else:
h.endheaders()
h.getreply() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
| [
11748,
18931,
198,
11748,
18931,
13,
4993,
8116,
628,
198,
4871,
14626,
25060,
7,
6404,
2667,
13,
25060,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
1398,
13403,
319,
11820,
323,
311,
1228,
541,
14626,
25060,
12,
4871,
5... | 2.070981 | 1,437 |
#!/usr/bin/python3
from requests import get
import base64
# Gets the splash for the game
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
7007,
1330,
651,
198,
11748,
2779,
2414,
198,
198,
2,
29620,
262,
22870,
329,
262,
983,
198
] | 3.25 | 28 |
# Copyright 2018 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from feast.core.DatasetService_pb2 import FeatureSet as FeatureSet_pb
class FeatureSet:
"""
Represent a collection of features having same entity.
"""
@property
def features(self):
"""
Return list of feature ID of this feature set
Returns: list of feature ID in this feature set
"""
return self._features
@property
@property
class FileType(object):
"""
File type for downloading training dataset as file
"""
CSV = "CSV"
"""CSV file format"""
JSON = "NEWLINE_DELIMITED_JSON"
"""Newline delimited JSON file format"""
AVRO = "AVRO"
"""Avro file format"""
| [
2,
15069,
2864,
383,
42936,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743... | 3.172589 | 394 |
from .core import run
__all__ = ['run']
| [
6738,
764,
7295,
1330,
1057,
198,
198,
834,
439,
834,
796,
37250,
5143,
20520,
198
] | 2.733333 | 15 |
# -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import torch
import torch.utils.data
from utils.dataset import coco
from opt import opt
from tqdm import tqdm
from models.FastPose import createModel
from utils.eval import DataLogger, accuracy
from utils.img import flip, shuffleLR
from evaluation import prediction
from tensorboardX import SummaryWriter
import os
if __name__ == '__main__':
main()
| [
2,
20368,
19351,
12,
198,
2,
15069,
357,
66,
8,
21865,
449,
13481,
26565,
2059,
13,
1439,
2489,
10395,
13,
198,
2,
22503,
416,
449,
2086,
1516,
7455,
357,
73,
14822,
13,
7197,
13,
82,
73,
28047,
31,
14816,
13,
785,
8,
198,
2,
20... | 3.806452 | 155 |
from itertools import groupby
import logging
from django.conf import settings
from django.db import models
from zconnect._models.base import ModelBase
from zconnect.models import Product
from zconnect.zc_billing.util import BillingPeriod, next_bill_period
logger = logging.getLogger(__name__)
CURRENCIES = [
("USD", "USD"),
("GBP", "GBP"),
("EUR", "EUR"),
]
BILLING_PERIODS = [
("WEEKLY", BillingPeriod.weekly),
("MONTHLY", BillingPeriod.monthly),
("YEARLY", BillingPeriod.yearly),
]
class BillGenerator(ModelBase):
"""Specified how bills are created
Taking into account things like the type of device and how often bills
should be created
Attributes:
active_from_date (datetime): When billing was first activated
cancelled_at (datetime): When billing was cancelled (if it was
cancelled)
currency (str): Billing currency
enabled (bool): Whether this billing is active
period (str): billing period - not in days, this is 'monthly', 'weekly',
and 'yearly'
rate_per_device (str): Amount to charge per device per 'period'
"""
enabled = models.BooleanField(default=True)
rate_per_device = models.IntegerField()
currency = models.CharField(max_length=3, choices=CURRENCIES)
cancelled_at = models.DateTimeField(null=True)
period = models.CharField(max_length=20, choices=BILLING_PERIODS)
active_from_date = models.DateTimeField()
organization = models.OneToOneField(
"organizations.Organization",
models.PROTECT,
# null=True,
related_name="billed_by",
)
class Bill(ModelBase):
"""Represents a single 'invoice'/bill that has been created using the
BillGenerator
Because the number of devices that an org is being billed for might change
from one period to another, store a reference between each device that was
being billed for this bill via a m2m field
Attributes:
paid (bool): Whether this has been paid or not
period_end (datetime): End of period this bill applies to
period_start (datetime): Start of period this bill applies to
devices (list(Device)): devices active for this bill period
generated_by (BillGenerator): Generator for this bill
"""
paid = models.BooleanField(default=False)
period_start = models.DateTimeField()
period_end = models.DateTimeField()
# (fields.W340) null has no effect on ManyToManyField
devices = models.ManyToManyField(settings.ZCONNECT_DEVICE_MODEL) #, null=False)
generated_by = models.ForeignKey(BillGenerator, models.PROTECT,
related_name="bills")
@property
@property
@property
@property
@property
def devices_by_product(self):
""" Get devices listed on this bill, grouped by product. """
# Order by product first...
by_product = self.devices.all().order_by("product_id")
# Already grouped by product, so we can just do a groupby() immediately
return [{
"product": Product.objects.get(pk=product_id),
"devices": devices
} for product_id, devices in groupby(by_product, pid)]
| [
6738,
340,
861,
10141,
1330,
1448,
1525,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
1976,
8443,
13557,
27530,
13,
8692,
1330,
9104,
14881,
198,
... | 2.758298 | 1,175 |
# -*- coding: utf-8 -*-
import logging
import stripe
from django.utils.translation import gettext as _
from rest_framework import permissions, status, viewsets
from rest_framework.response import Response
from stripe.error import InvalidRequestError, StripeError
from apps.common.rest_framework.mixins import MultiSerializerMixin
from apps.payment import status as payment_status
from apps.payment.models import (
PaymentDelay,
PaymentPrice,
PaymentRelation,
PaymentTransaction,
)
from apps.payment.serializers import (
PaymentDelayReadOnlySerializer,
PaymentPriceReadOnlySerializer,
PaymentRelationCreateSerializer,
PaymentRelationReadOnlySerializer,
PaymentRelationUpdateSerializer,
PaymentTransactionCreateSerializer,
PaymentTransactionReadOnlySerializer,
PaymentTransactionUpdateSerializer,
)
logger = logging.getLogger(__name__)
class PaymentDelayReadOnlyViewSet(viewsets.ReadOnlyModelViewSet):
"""
Allow users to view their own payment delays.
"""
permission_classes = (permissions.IsAuthenticated,)
serializer_class = PaymentDelayReadOnlySerializer
class PaymentTransactionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
"""
A user should be allowed to view their transactions.
Transactions are created with Stripe payment intents.
Transactions are only updated to confirm pending payment intents.
A use should not be able to delete a transaction.
"""
permission_classes = (permissions.IsAuthenticated,)
serializer_classes = {
"read": PaymentTransactionReadOnlySerializer,
"create": PaymentTransactionCreateSerializer,
"update": PaymentTransactionUpdateSerializer,
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
18931,
198,
198,
11748,
39858,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
355,
4808,
198,
6738,
1334,
62,
30604,
1330,
21627,
11,
3722,
... | 3.419483 | 503 |
from typing import Union
from pandas import DataFrame
| [
6738,
19720,
1330,
4479,
198,
198,
6738,
19798,
292,
1330,
6060,
19778,
628
] | 4.307692 | 13 |
"""Test the module SMOTE ENN."""
from __future__ import print_function
from unbalanced_dataset.combine import SMOTEENN
def test_smote_enn():
"""Test the SMOTE ENN function."""
print('Test SMOTE ENN')
| [
37811,
14402,
262,
8265,
9447,
23051,
412,
6144,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
555,
27753,
62,
19608,
292,
316,
13,
24011,
500,
1330,
9447,
23051,
34571,
628,
198,
4299,
1332,
62,
5796,
... | 2.944444 | 72 |
"""
Usage example:
```sh
# To train model on all words:
python model.py "${HOME}/ml-data/speech-command-browser" 232
# To train model on a subset of the words (in addition to _background_noise
# and _unknown_):
python model.py \
--include_words=left,right,up,down \
"${HOME}/ml-data/speech-command-browser" 232
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import keras
import tensorflow as tf
from tensorflow.python import debug as tf_debug
import data
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train model for browser speech commands.')
parser.add_argument(
'data_root', type=str, help='Root directory for data.')
parser.add_argument(
'n_fft', type=int,
help='Number of FFT points (after possible truncation). This is the '
'number of frequency points per column of spectrogram.')
parser.add_argument(
'--epochs', type=int, default=300,
help='Number of epochs to call Model.fit() with.')
parser.add_argument(
'--include_words', type=str, default=None,
help='Optional list of words to include (in addition to _unknown_ '
'and _background_noise_, which are always included). The words should '
'be separated with commas (e.g., "up,down").')
parser.add_argument(
'--tf_debug', action='store_true',
help='Use TensroFlow Debugger CLI.')
parsed = parser.parse_args()
if parsed.include_words:
include_words = parsed.include_words.split(',')
include_words = sorted([w for w in include_words if w])
print('Including only words (in addition to ' +
'_unknown_ and _background_noise_: %s' % include_words)
if len(set(include_words)) != len(include_words):
raise ValueError(
'Found duplicate words in include_words: %s' % include_words)
if '_background_noise_' in include_words:
raise ValueError(
'You don\'t need to include _background_noise_ in '
'--include_words. It is always included.')
if '_unknown_' in include_words:
raise ValueError(
'You don\'t need to include _unknown_ in '
'--include_words. It is always included.')
train_model(parsed.data_root,
parsed.n_fft,
parsed.epochs,
include_words=include_words,
debug=parsed.tf_debug)
| [
37811,
198,
28350,
1672,
25,
198,
198,
15506,
63,
1477,
198,
2,
1675,
4512,
2746,
319,
477,
2456,
25,
198,
29412,
2746,
13,
9078,
17971,
90,
39069,
92,
14,
4029,
12,
7890,
14,
45862,
12,
21812,
12,
40259,
1,
31773,
198,
198,
2,
16... | 2.675497 | 906 |