text
stringlengths 3
1.05M
|
|---|
/*
american fuzzy lop++ - high-performance binary-only instrumentation
-------------------------------------------------------------------
Originally written by Andrew Griffiths <agriffiths@google.com> and
Michal Zalewski
TCG instrumentation and block chaining support by Andrea Biondo
<andrea.biondo965@gmail.com>
QEMU 3.1.1 port, TCG thread-safety, CompareCoverage and NeverZero
counters by Andrea Fioraldi <andreafioraldi@gmail.com>
Copyright 2015, 2016, 2017 Google Inc. All rights reserved.
Copyright 2019-2020 AFLplusplus Project. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This code is a shim patched into the separately-distributed source
code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality
to implement AFL-style instrumentation and to take care of the remaining
parts of the AFL fork server logic.
The resulting QEMU binary is essentially a standalone instrumentation
tool; for an example of how to leverage it for other purposes, you can
have a look at afl-showmap.c.
*/
#include "afl-qemu-common.h"
#include "tcg.h"
void HELPER(afl_entry_routine)(CPUArchState *env) {
afl_forkserver(ENV_GET_CPU(env));
}
void HELPER(afl_compcov_16)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t idx = cur_loc;
if ((arg1 & 0xff00) == (arg2 & 0xff00)) { INC_AFL_AREA(idx); }
}
void HELPER(afl_compcov_32)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t idx = cur_loc;
if ((arg1 & 0xff000000) == (arg2 & 0xff000000)) {
INC_AFL_AREA(idx + 2);
if ((arg1 & 0xff0000) == (arg2 & 0xff0000)) {
INC_AFL_AREA(idx + 1);
if ((arg1 & 0xff00) == (arg2 & 0xff00)) { INC_AFL_AREA(idx); }
}
}
}
void HELPER(afl_compcov_64)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t idx = cur_loc;
if ((arg1 & 0xff00000000000000) == (arg2 & 0xff00000000000000)) {
INC_AFL_AREA(idx + 6);
if ((arg1 & 0xff000000000000) == (arg2 & 0xff000000000000)) {
INC_AFL_AREA(idx + 5);
if ((arg1 & 0xff0000000000) == (arg2 & 0xff0000000000)) {
INC_AFL_AREA(idx + 4);
if ((arg1 & 0xff00000000) == (arg2 & 0xff00000000)) {
INC_AFL_AREA(idx + 3);
if ((arg1 & 0xff000000) == (arg2 & 0xff000000)) {
INC_AFL_AREA(idx + 2);
if ((arg1 & 0xff0000) == (arg2 & 0xff0000)) {
INC_AFL_AREA(idx + 1);
if ((arg1 & 0xff00) == (arg2 & 0xff00)) { INC_AFL_AREA(idx); }
}
}
}
}
}
}
}
void HELPER(afl_cmplog_8)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t k = (uintptr_t)cur_loc;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
// if (!__afl_cmp_map->headers[k].cnt)
// __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++;
__afl_cmp_map->headers[k].shape = 0;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = arg1;
__afl_cmp_map->log[k][hits].v1 = arg2;
}
void HELPER(afl_cmplog_16)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t k = (uintptr_t)cur_loc;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
// if (!__afl_cmp_map->headers[k].cnt)
// __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++;
__afl_cmp_map->headers[k].shape = 1;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = arg1;
__afl_cmp_map->log[k][hits].v1 = arg2;
}
void HELPER(afl_cmplog_32)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t k = (uintptr_t)cur_loc;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 3;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = arg1;
__afl_cmp_map->log[k][hits].v1 = arg2;
}
void HELPER(afl_cmplog_64)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t k = (uintptr_t)cur_loc;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 7;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = arg1;
__afl_cmp_map->log[k][hits].v1 = arg2;
}
#include <sys/mman.h>
static int area_is_mapped(void *ptr, size_t len) {
char *p = ptr;
char *page = (char *)((uintptr_t)p & ~(sysconf(_SC_PAGE_SIZE) - 1));
int r = msync(page, (p - page) + len, MS_ASYNC);
if (r < 0) return errno != ENOMEM;
return 1;
}
void HELPER(afl_cmplog_rtn)(CPUArchState *env) {
#if defined(TARGET_X86_64)
void *ptr1 = g2h(env->regs[R_EDI]);
void *ptr2 = g2h(env->regs[R_ESI]);
#elif defined(TARGET_I386)
target_ulong *stack = g2h(env->regs[R_ESP]);
if (!area_is_mapped(stack, sizeof(target_ulong) * 2)) return;
// when this hook is executed, the retaddr is not on stack yet
void * ptr1 = g2h(stack[0]);
void * ptr2 = g2h(stack[1]);
#else
// stupid code to make it compile
void *ptr1 = NULL;
void *ptr2 = NULL;
return;
#endif
if (!area_is_mapped(ptr1, 32) || !area_is_mapped(ptr2, 32)) return;
#if defined(TARGET_X86_64) || defined(TARGET_I386)
uintptr_t k = (uintptr_t)env->eip;
#else
uintptr_t k = 0;
#endif
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
__afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 31;
hits &= CMP_MAP_RTN_H - 1;
__builtin_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0,
ptr1, 32);
__builtin_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1,
ptr2, 32);
}
void HELPER(afl_fncov)(target_ulong fn_loc){
if(afl_area_ctrl[0]) return;
if(fn_loc < afl_start_code || fn_loc > afl_end_code)
return;
fn_loc = (fn_loc >> 4) ^ (fn_loc << 8);
fn_loc &= (MAP_SIZE - 1);
register uintptr_t idx = fn_loc;
idx += (COV_FN - 1) * MAP_SIZE;
INC_AFL_AREA(idx);
}
static const u8 distance_class_lookup8[9] = {
[0] = 1,
[1] = 2,
[2] = 4,
[3] = 8,
[4] = 16,
[5] = 32,
[6] = 64,
[7 ... 8] = 128
};
void HELPER(afl_cmpcov_8)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
if(afl_area_ctrl[0]) return;
register uintptr_t idx = (afl_path_cksum >> 1) ^ cur_loc;
u8 distance = distance_class_lookup8[__builtin_popcountll(arg1 ^ arg2)];
idx += (COV_LAST - 1) * MAP_SIZE;
afl_area_ptr[idx] |= distance;
}
static const u8 distance_class_lookup16[17] = {
[0] = 1,
[1 ... 2] = 2,
[3 ... 4] = 4,
[5 ... 6] = 8,
[7 ... 8] = 16,
[9 ... 10] = 32,
[11 ... 12] = 64,
[13 ... 16] = 128
};
void HELPER(afl_cmpcov_16)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
if(afl_area_ctrl[0]) return;
register uintptr_t idx = (afl_path_cksum >> 1) ^ cur_loc;
idx += (COV_LAST - 1) * MAP_SIZE;
u8 distance = distance_class_lookup16[__builtin_popcountll(arg1 ^ arg2)];
afl_area_ptr[idx] |= distance;
}
static const u8 distance_class_lookup32[33] = {
[0] = 1,
[1 ... 2] = 2,
[3 ... 4] = 4,
[5 ... 8] = 8,
[9 ... 12] = 16,
[13 ... 16] = 32,
[17 ... 24] = 64,
[25 ... 32] = 128
};
void HELPER(afl_cmpcov_32)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
if(afl_area_ctrl[0]) return;
register uintptr_t idx = (afl_path_cksum >> 1) ^ cur_loc;
idx += (COV_LAST - 1) * MAP_SIZE;
u8 distance = distance_class_lookup32[__builtin_popcountll(arg1 ^ arg2)];
afl_area_ptr[idx] |= distance;
}
static const u8 distance_class_lookup64[65] = {
[0] = 1,
[1 ... 4] = 2,
[5 ... 8] = 4,
[9 ... 16] = 8,
[17 ... 24] = 16,
[25 ... 36] = 32,
[37 ... 48] = 64,
[49 ... 64] = 128
};
void HELPER(afl_cmpcov_64)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
if(afl_area_ctrl[0]) return;
register uintptr_t idx = (afl_path_cksum >> 1) ^ cur_loc;
idx += (COV_LAST - 1) * MAP_SIZE;
u8 distance = distance_class_lookup64[__builtin_popcountll(arg1 ^ arg2)];
afl_area_ptr[idx] |= distance;
}
// void HELPER(afl_memcov)(target_ulong cur_pc, target_ulong mem_loc) {
// if(afl_area_ctrl[0]) return;
// cur_pc = (cur_pc >> 4) ^ (cur_pc << 8);
// cur_pc &= MAP_SIZE - 1;
// u8 offset = (mem_loc >> 7) & 0x7;
// mem_loc = ((mem_loc >> 3) & 0xffffff80) | (mem_loc & 0x7f);
// mem_loc &= MAP_SIZE - 1;
// register uintptr_t idx = (cur_pc >> 1) ^ mem_loc;
// idx &= (MAP_SIZE - 1);
// idx += MAP_SIZE * (COV_LAST - 1);
// afl_area_ptr[idx] |= (1 << offset);
// }
|
import catwigopy.twitter_manager as tm
from catwigopy.User import User
from catwigopy.auxiliar import *
from catwigopy.analysis import *
import pandas as pd
import pickle
import pkg_resources
name = "catwigopy"
class Catwigopy:
"""
The main class of the package. It offers methods to manage all the functionality
:param user_name: User's Twitter name (@name).
:param consumer_key: consumer key generated by creating Twitter application.
:param consumer_secret: consumer secret key generated by creating Twitter application.
:param access_token: access token key generated by creating Twitter application.
:param access_token_secret: access token secret key generated by creating Twitter application.
:param number_of_tweets: Number of tweets to retrieve
"""
# Class attribute configured via class method.
api = None
name = None
user_name = None
image = None
description = None
tweets = None
analysis_results = None
tweets_terms = None
hashtags_terms = None
topics_top_terms = None
nmf = None
tfidf = None
tfidf_vectorizer = None
def __init__(self, user_name, consumer_key, consumer_secret, access_token, access_token_secret, number_of_tweets=1200):
self.api = tm.do_authentication(consumer_key, consumer_secret, access_token, access_token_secret)
result = tm.search_user_tweets(self.api, user_name, number_of_tweets)
self.user_name = user_name
self.image = result[1]
self.name = result[2]
self.description = result[3]
self.tweets = pd.DataFrame(result[0])
self.analysis_results = {'nmf': None, 'kmeans': None}
# Classify using NMF with the best hyperparameter configuration acquired in training phase.
def get_user_classification(self):
"""
This function launch the classification. First of all, it gets the model variables.
:return: analysis dictionary.
"""
if self.nmf is None:
# Create routes
resource_package = __name__
resource_path = '/'.join(('data', 'models', 'nmf', 'nmf.pickle'))
resource_path2 = '/'.join(('data', 'models', 'nmf', 'tfidf.pickle'))
resource_path3 = '/'.join(('data', 'models', 'nmf', 'tfidf_vectorizer.pickle'))
# If exists, load the models
if pkg_resources.resource_exists(resource_package, resource_path) and \
pkg_resources.resource_exists(resource_package, resource_path2) and \
pkg_resources.resource_exists(resource_package, resource_path3):
with open(pkg_resources.resource_filename(resource_package, resource_path), 'rb') as f:
self.nmf = pickle.load(f)
with open(pkg_resources.resource_filename(resource_package, resource_path2), 'rb') as f:
self.tfidf = pickle.load(f)
with open(pkg_resources.resource_filename(resource_package, resource_path3), 'rb') as f:
self.tfidf_vectorizer = pickle.load(f)
if self.analysis_results['nmf'] is None:
doc = " ".join(self.tweets['preprocessed_tweet'])
self.analysis_results['nmf'] = apply_nmf(self.nmf, self.tfidf, self.tfidf_vectorizer, doc)
return self.analysis_results['nmf']
def get_user_name(self):
"""
This function returns user name.
:return: name of the user.
"""
return self.name
def get_user_username(self):
"""
This function returns user username.
:return: username of the user.
"""
return self.user_name
def get_user_description(self):
"""
This function returns user description.
:return: user description.
"""
return self.description
def get_user_image(self):
"""
This function returns user image path.
:return: user image path.
"""
return self.image
# Returns a dict with shape {name_of_category: [{text: term_i, count: 21}, {text: term_j, count: 15} ...], ...}
def get_topics_top_terms(self, nterms=30):
"""
This function gets model variables and launch the construction of topics top terms.
:param nterms: Number of top terms per topic.
:return: dict containing top terms per topic.
"""
if self.nmf is None:
# Create routes
resource_package = __name__
resource_path = '/'.join(('data', 'models', 'nmf', 'nmf.pickle'))
resource_path2 = '/'.join(('data', 'models', 'nmf', 'tfidf.pickle'))
resource_path3 = '/'.join(('data', 'models', 'nmf', 'tfidf_vectorizer.pickle'))
# If exists, load the models
if pkg_resources.resource_exists(resource_package, resource_path) and \
pkg_resources.resource_exists(resource_package, resource_path2) and \
pkg_resources.resource_exists(resource_package, resource_path3):
with open(pkg_resources.resource_filename(resource_package, resource_path), 'rb') as f:
self.nmf = pickle.load(f)
with open(pkg_resources.resource_filename(resource_package, resource_path2), 'rb') as f:
self.tfidf = pickle.load(f)
with open(pkg_resources.resource_filename(resource_package, resource_path3), 'rb') as f:
self.tfidf_vectorizer = pickle.load(f)
if self.topics_top_terms is None:
self.topics_top_terms = generate_top_terms_dictionary(self.nmf, self.tfidf_vectorizer, nterms)
return self.topics_top_terms
# Returns a list of dictionaries with shape {text: #hashtag, count: 12}
def get_hashtags_terms_count(self):
"""
This function launch the construction of a term count array for hashtags terms.
:return: list containing hashtags terms and its occurrences values.
"""
if self.tweets is None:
return "error, user tweets have not been searched yet."
if self.hashtags_terms is None:
self.hashtags_terms = generate_occurences_dictionay([l for l in self.tweets['hashtags'] if l])
return self.hashtags_terms
# Returns a list of dictionaries with shape {text: term, count: 12}
def get_tweets_terms_count(self):
"""
This function launch the construction of a term count array for tweets terms.
:return: list containing tweets terms and its occurrences values.
"""
if self.tweets is None:
return "error, user tweets have not been searched yet."
if self.tweets_terms is None:
self.tweets_terms = generate_occurences_dictionay([l for l in self.tweets['preprocessed_tokens'] if l])
return self.tweets_terms
|
import api from './api';
export default {
resource: 'deviceid',
patchRelease(deviceid){
console.log("patchRelease from relaesedevice.js: deviceid",deviceid)
return api().patch(`/devices/${deviceid}/release`)
}
}
|
import { serialize, parse } from 'cookie';
const TOKEN_NAME = 'token';
export const MAX_AGE = 60 * 60 * 8; // 8 hours
export function setTokenCookie(res, token) {
const cookie = serialize(TOKEN_NAME, token, {
maxAge: MAX_AGE,
expires: new Date(Date.now() + MAX_AGE * 1000),
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
path: '/',
sameSite: 'lax',
});
res.setHeader('Set-Cookie', cookie);
}
export function removeTokenCookie(res) {
const cookie = serialize(TOKEN_NAME, '', {
maxAge: -1,
path: '/',
});
res.setHeader('Set-Cookie', cookie);
}
export function parseCookies(req) {
// For API Routes we don't nd to parse the cookies.
if (req.cookies) return req.cookies;
// For pages we do need to parse the cookies.
const cookie = req.headers?.cookie;
return parse(cookie || '');
}
export function getTokenCookie(req) {
const cookies = parseCookies(req);
return cookies[TOKEN_NAME];
}
|
import {Utils, Support} from '@wiajs/core';
function resizablePanel(panel) {
const app = panel.app;
if (panel.resizableInitialized) return;
Utils.extend(panel, {
resizable: true,
resizableWidth: null,
resizableInitialized: true,
});
const $htmlEl = $('html');
const { $el, $backdropEl, side, effect } = panel;
if (!$el) return;
let isTouched;
let isMoved;
const touchesStart = {};
let touchesDiff;
let panelWidth;
let $viewEl;
let panelMinWidth;
let panelMaxWidth;
let visibleByBreakpoint;
function transformCSSWidth(v) {
if (!v) return null;
if (v.indexOf('%') >= 0 || v.indexOf('vw') >= 0) {
return (parseInt(v, 10) / 100) * app.width;
}
const newV = parseInt(v, 10);
if (Number.isNaN(newV)) return null;
return newV;
}
function isResizable() {
return panel.resizable && $el.hasClass('panel-resizable');
}
function handleTouchStart(e) {
if (!isResizable()) return;
touchesStart.x = e.type === 'touchstart' ? e.targetTouches[0].pageX : e.pageX;
touchesStart.y = e.type === 'touchstart' ? e.targetTouches[0].pageY : e.pageY;
isMoved = false;
isTouched = true;
panelMinWidth = transformCSSWidth($el.css('min-width'));
panelMaxWidth = transformCSSWidth($el.css('max-width'));
visibleByBreakpoint = $el.hasClass('panel-in-breakpoint');
}
function handleTouchMove(e) {
if (!isTouched) return;
const pageX = e.type === 'touchmove' ? e.targetTouches[0].pageX : e.pageX;
if (!isMoved) {
panelWidth = $el[0].offsetWidth;
$el.transition(0);
$el.addClass('panel-resizing');
$htmlEl.css('cursor', 'col-resize');
if (effect === 'reveal' || visibleByBreakpoint) {
$viewEl = $(panel.getViewEl());
if (panel.$containerEl && panel.$containerEl.hasClass('page')) {
$viewEl.add(panel.$containerEl.children('.page-content, .tabs, .fab'));
}
}
if (effect === 'reveal' && !visibleByBreakpoint) {
$backdropEl.transition(0);
$viewEl.transition(0);
}
}
isMoved = true;
e.preventDefault();
touchesDiff = pageX - touchesStart.x;
let newPanelWidth = side === 'left' ? panelWidth + touchesDiff : panelWidth - touchesDiff;
if (panelMinWidth && !Number.isNaN(panelMinWidth)) {
newPanelWidth = Math.max(newPanelWidth, panelMinWidth);
}
if (panelMaxWidth && !Number.isNaN(panelMaxWidth)) {
newPanelWidth = Math.min(newPanelWidth, panelMaxWidth);
}
newPanelWidth = Math.min(Math.max(newPanelWidth, 0), app.width);
panel.resizableWidth = newPanelWidth;
$el[0].style.width = `${newPanelWidth}px`;
if (effect === 'reveal' && !visibleByBreakpoint) {
if ($viewEl) {
$viewEl.transform(
`translate3d(${side === 'left' ? newPanelWidth : -newPanelWidth}px, 0, 0)`,
);
}
if ($backdropEl) {
$backdropEl.transform(
`translate3d(${side === 'left' ? newPanelWidth : -newPanelWidth}px, 0, 0)`
);
}
} else if (visibleByBreakpoint && $viewEl) {
$viewEl.css(`margin-${side}`, `${newPanelWidth}px`);
}
$el.trigger('panel:resize', newPanelWidth);
panel.emit('local::resize panelResize', panel, newPanelWidth);
}
function handleTouchEnd() {
$('html').css('cursor', '');
if (!isTouched || !isMoved) {
isTouched = false;
isMoved = false;
return;
}
isTouched = false;
isMoved = false;
$htmlEl[0].style.setProperty(`--f7-panel-${side}-width`, `${panel.resizableWidth}px`);
$el[0].style.width = '';
if (effect === 'reveal' && !visibleByBreakpoint) {
$viewEl.transform('');
$backdropEl.transform('');
}
$el.removeClass('panel-resizing');
Utils.nextFrame(() => {
$el.transition('');
if (effect === 'reveal') {
$backdropEl.transition('');
if ($viewEl) $viewEl.transition('');
}
});
}
function handleResize() {
if (!panel.opened || !panel.resizableWidth) return;
panelMinWidth = transformCSSWidth($el.css('min-width'));
panelMaxWidth = transformCSSWidth($el.css('max-width'));
if (panelMinWidth && !Number.isNaN(panelMinWidth) && panel.resizableWidth < panelMinWidth) {
panel.resizableWidth = Math.max(panel.resizableWidth, panelMinWidth);
}
if (panelMaxWidth && !Number.isNaN(panelMaxWidth) && panel.resizableWidth > panelMaxWidth) {
panel.resizableWidth = Math.min(panel.resizableWidth, panelMaxWidth);
}
panel.resizableWidth = Math.min(Math.max(panel.resizableWidth, 0), app.width);
$htmlEl[0].style.setProperty(`--f7-panel-${side}-width`, `${panel.resizableWidth}px`);
}
if (panel.$el.find('.panel-resize-handler').length === 0) {
panel.$el.append('<div class="panel-resize-handler"></div>');
}
panel.$resizeHandlerEl = panel.$el.children('.panel-resize-handler');
$el.addClass('panel-resizable');
// Add Events
const passive = Support.passiveListener ? { passive: true } : false;
panel.$el.on(app.touchEvents.start, '.panel-resize-handler', handleTouchStart, passive);
app.on('touchmove:active', handleTouchMove);
app.on('touchend:passive', handleTouchEnd);
app.on('resize', handleResize);
panel.on('beforeOpen', handleResize);
panel.once('panelDestroy', () => {
$el.removeClass('panel-resizable');
panel.$resizeHandlerEl.remove();
panel.$el.off(app.touchEvents.start, '.panel-resize-handler', handleTouchStart, passive);
app.off('touchmove:active', handleTouchMove);
app.off('touchend:passive', handleTouchEnd);
app.off('resize', handleResize);
panel.off('beforeOpen', handleResize);
});
}
export default resizablePanel;
|
from decimal import Decimal
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms.fields import Field
from django.forms.formsets import formset_factory, BaseFormSet
import commonware
import happyforms
from slumber.exceptions import HttpClientError
from tower import ugettext as _, ugettext_lazy as _lazy
import mkt
from lib.pay_server import client
from mkt.api.forms import SluggableModelChoiceField
from mkt.constants import (BANGO_COUNTRIES, BANGO_OUTPAYMENT_CURRENCIES,
FREE_PLATFORMS, PAID_PLATFORMS)
from mkt.constants.payments import (PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR)
from mkt.developers.models import AddonPaymentAccount, PaymentAccount
from mkt.prices.models import AddonPremium, Price
from mkt.reviewers.models import RereviewQueue
from mkt.site.forms import AddonChoiceField
from mkt.submit.forms import DeviceTypeForm
from mkt.webapps.models import AddonUpsell, Webapp
log = commonware.log.getLogger('z.devhub')
def _restore_app_status(app, save=True):
"""
Restore an incomplete app to its former status. The app will be marked
as its previous status or PENDING if it was never reviewed.
"""
log.info('Changing app from incomplete to previous status: %d' % app.pk)
app.status = (app.highest_status if
app.highest_status != mkt.STATUS_NULL else
mkt.STATUS_PENDING)
if save:
app.save()
class PremiumForm(DeviceTypeForm, happyforms.Form):
"""
The premium details for an addon, which is unfortunately
distributed across a few models.
"""
# This does a nice Yes/No field like the mockup calls for.
allow_inapp = forms.ChoiceField(
choices=((True, _lazy(u'Yes')), (False, _lazy(u'No'))),
widget=forms.RadioSelect, required=False)
# Choices are provided at init by group_tier_choices.
price = forms.ChoiceField(choices=(), label=_lazy(u'App Price'),
required=False)
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.addon = kw.pop('addon')
self.user = kw.pop('user')
kw['initial'] = {
'allow_inapp': self.addon.premium_type in mkt.ADDON_INAPPS
}
if self.addon.premium_type == mkt.ADDON_FREE_INAPP:
kw['initial']['price'] = 'free'
elif self.addon.premium and self.addon.premium.price:
# If the app has a premium object, set the initial price.
kw['initial']['price'] = self.addon.premium.price.pk
super(PremiumForm, self).__init__(*args, **kw)
self.fields['paid_platforms'].choices = PAID_PLATFORMS(self.request)
self.fields['free_platforms'].choices = FREE_PLATFORMS()
if (self.is_paid() and not self.is_toggling()):
# Require the price field if the app is premium and
# we're not toggling from free <-> paid.
self.fields['price'].required = True
# Get the list of supported devices and put them in the data.
self.device_data = {}
supported_devices = [mkt.REVERSE_DEVICE_LOOKUP[dev.id] for dev in
self.addon.device_types]
self.initial.setdefault('free_platforms', [])
self.initial.setdefault('paid_platforms', [])
for platform in set(x[0].split('-', 1)[1] for x in
(FREE_PLATFORMS() + PAID_PLATFORMS(self.request))):
supported = platform in supported_devices
self.device_data['free-%s' % platform] = supported
self.device_data['paid-%s' % platform] = supported
if supported:
self.initial['free_platforms'].append('free-%s' % platform)
self.initial['paid_platforms'].append('paid-%s' % platform)
if not self.initial.get('price'):
self.initial['price'] = self._initial_price_id()
self.fields['price'].choices = self.group_tier_choices()
def group_tier_choices(self):
"""Creates tier choices with optgroups based on payment methods"""
price_choices = [
('free', _('Free (with in-app payments)')),
]
card_billed = []
operator_billed = []
card_and_operator_billed = []
for price in Price.objects.active():
choice = (price.pk, unicode(price))
# Special case price tier 0.
if price.price == Decimal('0.00'):
price_choices.append((price.pk, '%s (%s)' %
(unicode(price),
_('Promotional Pricing'))))
# Tiers that can only be operator billed.
elif price.method == PAYMENT_METHOD_OPERATOR:
operator_billed.append(choice)
# Tiers that can only be card billed.
elif price.method == PAYMENT_METHOD_CARD:
card_billed.append(choice)
# Tiers that are can generally be billed by either
# operator or card.
elif price.method == PAYMENT_METHOD_ALL:
card_and_operator_billed.append(choice)
if operator_billed:
price_choices.append((_lazy('Only supports carrier billing'),
operator_billed))
if card_billed:
price_choices.append((_lazy('Only supports credit-card billing'),
card_billed))
if card_and_operator_billed:
price_choices.append(
(_lazy('Supports all billing methods'),
card_and_operator_billed))
return price_choices
def _initial_price_id(self):
"""Sets the inital price tier if available."""
try:
return Price.objects.active().get(price='0.99').id
except Price.DoesNotExist:
log.warning('Could not find a price tier 0.99 to set as default.')
return None
def _make_premium(self):
if self.addon.premium:
return self.addon.premium
log.info('New AddonPremium object for addon %s' % self.addon.pk)
self.addon._premium = AddonPremium(addon=self.addon,
price_id=self._initial_price_id())
return self.addon._premium
def is_paid(self):
is_paid = (self.addon.premium_type in mkt.ADDON_PREMIUMS or
self.is_free_inapp())
return is_paid
def is_free_inapp(self):
return self.addon.premium_type == mkt.ADDON_FREE_INAPP
def is_toggling(self):
value = self.request.POST.get('toggle-paid')
return value if value in ('free', 'paid') else False
def clean(self):
is_toggling = self.is_toggling()
if not is_toggling:
# If a platform wasn't selected, raise an error.
if not self.cleaned_data[
'%s_platforms' % ('paid' if self.is_paid() else 'free')]:
self._add_error('none')
# We want to throw out the user's selections in this case and
# not update the <select> element that goes along with this.
# I.e.: we don't want to re-populate these big chunky
# checkboxes with bad data.
# Also, I'm so, so sorry.
self.data = dict(self.data)
platforms = dict(
free_platforms=self.initial.get('free_platforms', []),
paid_platforms=self.initial.get('paid_platforms', []))
self.data.update(**platforms)
return self.cleaned_data
def clean_price(self):
price_value = self.cleaned_data.get('price')
premium_type = self.cleaned_data.get('premium_type')
if ((premium_type in mkt.ADDON_PREMIUMS or
premium_type == mkt.ADDON_FREE_INAPP) and
not price_value and not self.is_toggling()):
raise ValidationError(Field.default_error_messages['required'])
if not price_value and self.fields['price'].required is False:
return None
# Special case for a free app - in-app payments must be enabled.
# Note: this isn't enforced for tier zero apps.
if price_value == 'free':
if self.cleaned_data.get('allow_inapp') != 'True':
raise ValidationError(_('If app is Free, '
'in-app payments must be enabled'))
return price_value
try:
price = Price.objects.get(pk=price_value, active=True)
except (ValueError, Price.DoesNotExist):
raise ValidationError(_('Not a valid choice'))
return price
def save(self):
toggle = self.is_toggling()
upsell = self.addon.upsold
# is_paid is true for both premium apps and free apps with
# in-app payments.
is_paid = self.is_paid()
if toggle == 'paid' and self.addon.premium_type == mkt.ADDON_FREE:
# Toggle free apps to paid by giving them a premium object.
premium = self._make_premium()
premium.price_id = self._initial_price_id()
premium.save()
self.addon.premium_type = mkt.ADDON_PREMIUM
self.addon.status = mkt.STATUS_NULL
is_paid = True
elif toggle == 'free' and is_paid:
# If the app is paid and we're making it free, remove it as an
# upsell (if an upsell exists).
upsell = self.addon.upsold
if upsell:
log.debug('[1@%s] Removing upsell; switching to free' %
self.addon.pk)
upsell.delete()
log.debug('[1@%s] Removing app payment account' % self.addon.pk)
AddonPaymentAccount.objects.filter(addon=self.addon).delete()
log.debug('[1@%s] Setting app premium_type to FREE' %
self.addon.pk)
self.addon.premium_type = mkt.ADDON_FREE
# Remove addonpremium
try:
log.debug('[1@%s] Removing addon premium' % self.addon.pk)
self.addon.addonpremium.delete()
except AddonPremium.DoesNotExist:
pass
if (self.addon.has_incomplete_status() and
self.addon.is_fully_complete()):
_restore_app_status(self.addon, save=False)
is_paid = False
# Right is_paid is both paid apps and free with in-app payments.
elif is_paid:
price = self.cleaned_data.get('price')
# If price is free then we want to make this an app that's
# free with in-app payments.
if price == 'free':
self.addon.premium_type = mkt.ADDON_FREE_INAPP
log.debug('[1@%s] Changing to free with in_app'
% self.addon.pk)
# Remove upsell
upsell = self.addon.upsold
if upsell:
log.debug('[1@%s] Removing upsell; switching to free '
'with in_app' % self.addon.pk)
upsell.delete()
# Remove addonpremium
try:
log.debug('[1@%s] Removing addon premium' % self.addon.pk)
self.addon.addonpremium.delete()
except AddonPremium.DoesNotExist:
pass
else:
# The dev is submitting updates for payment data about a paid
# app. This might also happen if he/she is associating a new
# paid app with an existing bank account.
premium = self._make_premium()
self.addon.premium_type = (
mkt.ADDON_PREMIUM_INAPP if
self.cleaned_data.get('allow_inapp') == 'True' else
mkt.ADDON_PREMIUM)
if price and price != 'free':
log.debug('[1@%s] Updating app price (%s)' %
(self.addon.pk, self.cleaned_data['price']))
premium.price = self.cleaned_data['price']
premium.save()
if not toggle:
# Save the device compatibility information when we're not
# toggling.
super(PremiumForm, self).save(self.addon, is_paid)
log.info('Saving app payment changes for addon %s.' % self.addon.pk)
self.addon.save()
class UpsellForm(happyforms.Form):
upsell_of = AddonChoiceField(
queryset=Webapp.objects.none(), required=False,
label=_lazy(u'This is a paid upgrade of'),
empty_label=_lazy(u'Not an upgrade'))
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.user = kw.pop('user')
kw.setdefault('initial', {})
if self.addon.upsold:
kw['initial']['upsell_of'] = self.addon.upsold.free
super(UpsellForm, self).__init__(*args, **kw)
self.fields['upsell_of'].queryset = (
self.user.addons.exclude(pk=self.addon.pk,
status=mkt.STATUS_DELETED)
.filter(premium_type__in=mkt.ADDON_FREES))
def save(self):
current_upsell = self.addon.upsold
new_upsell_app = self.cleaned_data.get('upsell_of')
if new_upsell_app:
# We're changing the upsell or creating a new one.
if not current_upsell:
# If the upsell is new or we just deleted the old upsell,
# create a new upsell.
log.debug('[1@%s] Creating app upsell' % self.addon.pk)
current_upsell = AddonUpsell(premium=self.addon)
# Set the upsell object to point to the app that we're upselling.
current_upsell.free = new_upsell_app
current_upsell.save()
elif current_upsell:
# We're deleting the upsell.
log.debug('[1@%s] Deleting the app upsell' % self.addon.pk)
current_upsell.delete()
class BangoPaymentAccountForm(happyforms.Form):
bankAccountPayeeName = forms.CharField(
max_length=50, label=_lazy(u'Bank Account Holder Name'))
companyName = forms.CharField(
max_length=255, label=_lazy(u'Company Name'))
vendorName = forms.CharField(
max_length=255, label=_lazy(u'Vendor Name'))
financeEmailAddress = forms.EmailField(
required=True, label=_lazy(u'Financial Email'),
max_length=100)
adminEmailAddress = forms.EmailField(
required=True, label=_lazy(u'Administrative Email'),
max_length=100)
supportEmailAddress = forms.EmailField(
required=True, label=_lazy(u'Support Email'),
max_length=100)
address1 = forms.CharField(
max_length=255, label=_lazy(u'Address'))
address2 = forms.CharField(
max_length=255, required=False, label=_lazy(u'Address 2'))
addressCity = forms.CharField(
max_length=128, label=_lazy(u'City/Municipality'))
addressState = forms.CharField(
max_length=64, label=_lazy(u'State/Province/Region'))
addressZipCode = forms.CharField(
max_length=10, label=_lazy(u'Zip/Postal Code'))
addressPhone = forms.CharField(
max_length=20, label=_lazy(u'Phone'))
countryIso = forms.ChoiceField(
choices=BANGO_COUNTRIES, label=_lazy(u'Country'))
currencyIso = forms.ChoiceField(
choices=BANGO_OUTPAYMENT_CURRENCIES,
label=_lazy(u'I prefer to be paid in'))
vatNumber = forms.CharField(
max_length=17, required=False, label=_lazy(u'VAT Number'))
bankAccountNumber = forms.CharField(
max_length=20, label=_lazy(u'Bank Account Number'))
bankAccountCode = forms.CharField(
# l10n: SWIFT is http://bit.ly/15e7RJx and might not need translating.
max_length=20, label=_lazy(u'SWIFT code'))
bankName = forms.CharField(
max_length=50, label=_lazy(u'Bank Name'))
bankAddress1 = forms.CharField(
max_length=50, label=_lazy(u'Bank Address'))
bankAddress2 = forms.CharField(
max_length=50, required=False, label=_lazy(u'Bank Address 2'))
bankAddressCity = forms.CharField(
max_length=50, required=False, label=_lazy(u'Bank City/Municipality'))
bankAddressState = forms.CharField(
max_length=50, required=False,
label=_lazy(u'Bank State/Province/Region'))
bankAddressZipCode = forms.CharField(
max_length=10, label=_lazy(u'Bank Zip/Postal Code'))
bankAddressIso = forms.ChoiceField(
choices=BANGO_COUNTRIES, label=_lazy(u'Bank Country'))
account_name = forms.CharField(max_length=64, label=_lazy(u'Account Name'))
# These are the fields that Bango uses for bank details. They're read-only
# once written.
read_only_fields = set(['bankAccountPayeeName', 'bankAccountNumber',
'bankAccountCode', 'bankName', 'bankAddress1',
'bankAddressZipCode', 'bankAddressIso',
'adminEmailAddress', 'currencyIso',
'companyName'])
def __init__(self, *args, **kwargs):
self.account = kwargs.pop('account', None)
super(BangoPaymentAccountForm, self).__init__(*args, **kwargs)
if self.account:
# We don't need the bank account fields if we're getting
# modifications.
for field in self.fields:
if field in self.read_only_fields:
self.fields[field].required = False
def save(self):
# Save the account name, if it was updated.
self.account.get_provider().account_update(self.account,
self.cleaned_data)
class AccountListForm(happyforms.Form):
accounts = forms.ModelChoiceField(
queryset=PaymentAccount.objects.none(),
label=_lazy(u'Payment Account'), required=False)
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon')
self.provider = kwargs.pop('provider')
self.user = kwargs.pop('user')
super(AccountListForm, self).__init__(*args, **kwargs)
self.is_owner = None
if self.addon:
self.is_owner = self.addon.authors.filter(
pk=self.user.pk,
addonuser__role=mkt.AUTHOR_ROLE_OWNER).exists()
self.fields['accounts'].queryset = self.agreed_payment_accounts
if self.is_owner is False:
self.fields['accounts'].widget.attrs['disabled'] = ''
self.current_payment_account = None
try:
current_acct = AddonPaymentAccount.objects.get(
addon=self.addon,
payment_account__provider=self.provider.provider)
payment_account = PaymentAccount.objects.get(
uri=current_acct.account_uri)
# If this user owns this account then set initial otherwise
# we'll stash it on the form so we can display the non-owned
# current account separately.
if payment_account.user.pk == self.user.pk:
self.initial['accounts'] = payment_account
self.fields['accounts'].empty_label = None
else:
self.current_payment_account = payment_account
except (AddonPaymentAccount.DoesNotExist, PaymentAccount.DoesNotExist):
pass
@property
def payment_accounts(self):
queryset = (PaymentAccount.objects
.filter(inactive=False)
.filter(Q(user=self.user) | Q(shared=True))
.order_by('name', 'shared'))
if self.provider is not None:
queryset = queryset.filter(provider=self.provider.provider)
return queryset
@property
def agreed_payment_accounts(self):
return self.payment_accounts.filter(agreed_tos=True)
def has_accounts(self):
return self.payment_accounts.exists()
def has_completed_accounts(self):
return self.agreed_payment_accounts.exists()
def clean_accounts(self):
accounts = self.cleaned_data.get('accounts')
# When cleaned if the accounts field wasn't submitted or it's an empty
# string the cleaned value will be None for a ModelChoiceField.
# Therefore to tell the difference between the non-submission and the
# empty string we need to check the raw data.
accounts_submitted = 'accounts' in self.data
if (AddonPaymentAccount.objects.filter(addon=self.addon).exists() and
accounts_submitted and not accounts):
raise forms.ValidationError(
_('You cannot remove a payment account from an app.'))
if accounts and not self.is_owner:
raise forms.ValidationError(
_('You are not permitted to change payment accounts.'))
return accounts
def save(self):
if self.cleaned_data.get('accounts'):
try:
log.info('[1@%s] Attempting to delete app payment account'
% self.addon.pk)
AddonPaymentAccount.objects.get(
addon=self.addon,
payment_account__provider=self.provider.provider
).delete()
except AddonPaymentAccount.DoesNotExist:
log.info('[1@%s] Deleting failed, this is usually fine'
% self.addon.pk)
log.info('[1@%s] Creating new app payment account' % self.addon.pk)
account = self.cleaned_data['accounts']
uri = self.provider.product_create(account, self.addon)
AddonPaymentAccount.objects.create(
addon=self.addon, account_uri=account.uri,
payment_account=account, product_uri=uri)
# If the app is marked as paid and the information is complete
# and the app is currently marked as incomplete, put it into the
# re-review queue.
if (self.addon.status == mkt.STATUS_NULL and
self.addon.highest_status
in mkt.WEBAPPS_APPROVED_STATUSES):
# FIXME: This might cause noise in the future if bank accounts
# get manually closed by Bango and we mark apps as STATUS_NULL
# until a new account is selected. That will trigger a
# re-review.
log.info(u'[Webapp:%s] (Re-review) Public app, premium type '
u'upgraded.' % self.addon)
RereviewQueue.flag(
self.addon, mkt.LOG.REREVIEW_PREMIUM_TYPE_UPGRADE)
if (self.addon.has_incomplete_status() and
self.addon.is_fully_complete()):
_restore_app_status(self.addon)
class AccountListBaseFormSet(BaseFormSet):
"""Base FormSet for AccountListForm. Provide the extra data for the
AccountListForm as a list in `provider_data`.
Example:
formset = AccountListFormSet(provider_data=[
{'provider': Bango()}, {'provider': Boku()}])
"""
def __init__(self, **kwargs):
self.provider_data = kwargs.pop('provider_data', [])
super(AccountListBaseFormSet, self).__init__(**kwargs)
def _construct_form(self, i, **kwargs):
if i < len(self.provider_data):
_kwargs = self.provider_data[i]
else:
_kwargs = {}
_kwargs.update(kwargs)
return (super(AccountListBaseFormSet, self)
._construct_form(i, **_kwargs))
def save(self):
for form in self.forms:
form.save()
# Wrap the formset_factory call in a function so that extra/max_num works with
# different values of settings.PAYMENT_PROVIDERS in the tests.
def AccountListFormSet(*args, **kwargs):
provider_count = len(settings.PAYMENT_PROVIDERS)
current_form_set = formset_factory(AccountListForm,
formset=AccountListBaseFormSet,
extra=provider_count,
max_num=provider_count)
return current_form_set(*args, **kwargs)
class ReferenceAccountForm(happyforms.Form):
uuid = forms.CharField(max_length=36, required=False,
widget=forms.HiddenInput())
account_name = forms.CharField(max_length=50, label=_lazy(u'Account name'))
name = forms.CharField(max_length=50, label=_lazy(u'Name'))
email = forms.EmailField(max_length=100, label=_lazy(u'Email'))
def __init__(self, *args, **kwargs):
self.account = kwargs.pop('account', None)
super(ReferenceAccountForm, self).__init__(*args, **kwargs)
def save(self):
# Save the account name, if it was updated.
provider = self.account.get_provider()
provider.account_update(self.account, self.cleaned_data)
class BokuAccountForm(happyforms.Form):
signup_url = settings.BOKU_SIGNUP_URL
account_name = forms.CharField(max_length=50, label=_lazy(u'Account name'))
# The lengths of these are not specified in the Boku documentation, so
# making a guess here about max lengths.
service_id = forms.CharField(max_length=50, label=_lazy(u'Service ID'))
def clean_service_id(self):
service_id = self.cleaned_data['service_id']
try:
client.api.boku.verify_service.post({'service_id': service_id})
except HttpClientError:
raise ValidationError(_('Service ID is not valid'))
else:
return service_id
class PaymentCheckForm(happyforms.Form):
app = SluggableModelChoiceField(
queryset=Webapp.objects.filter(
premium_type__in=mkt.ADDON_HAS_PAYMENTS),
sluggable_to_field_name='app_slug')
def clean_app(self):
app = self.cleaned_data['app']
if not app.has_payment_account():
raise ValidationError(_('No payment account set up for that app'))
return app
|
import gym
from baselines import deepq
def main():
env = gym.make("CartPole-v0")
act = deepq.load("cartpole_model.pkl")
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()
|
# @Author : Wang Xiaoqiang
# @GitHub : https://github.com/rzjing
# @File : scheduler.py
# @Time : 2020/1/5 15:34
from apscheduler.jobstores.redis import RedisJobStore
from apscheduler.schedulers.background import BackgroundScheduler
from app.config import Config
class Scheduler(object):
def __init__(self):
self.conf = Config()
self.scheduler = BackgroundScheduler(jobstores={'redis': RedisJobStore(**self.conf.get_redis())})
def add_job(self, func, trigger: str, params=None, next_run_time=None, jobstore='default', **trigger_args):
"""
:param func: 执行函数
:param trigger: 任务类型, Options: ['once', 'interval', 'cron']
:param params: 执行函数参数列表
:param next_run_time: 任务执行时间, 当 trigger='once' 时, 必传 next_run_time: datetime.datetime
:param jobstore: 任务存储器, Options: ['default', 'redis']
:param trigger_args: e.g. if trigger == 'interval': Options: ['days', 'hours', 'minutes', 'seconds', ...]
elif trigger == 'cron': Options: ['year', 'month', 'day', 'week', 'day_of_week', ...]
:return: Job
"""
if trigger == 'once':
job = self.scheduler.add_job(func, next_run_time=next_run_time, args=params, jobstore=jobstore)
else:
job = self.scheduler.add_job(func, trigger, args=params, **trigger_args, jobstore=jobstore)
return job
def get_job(self, job_id: str):
"""
:param job_id: 任务ID
:return: Job
"""
return self.scheduler.get_job(job_id)
def get_jobs(self):
"""
:return: list[Job]
"""
return self.scheduler.get_jobs()
def del_job(self, job_id: str):
"""
:param job_id: 任务ID
"""
self.scheduler.remove_job(job_id)
def del_jobs(self):
self.scheduler.remove_all_jobs()
def start(self):
self.scheduler.start()
if __name__ == '__main__':
pass
|
"""Helpers for sanitising HTML input to the bot."""
import bleach
__all__ = ["clean"]
"""
Take the list of allowed tags and attributes from Element for consistency:
https://github.com/matrix-org/matrix-react-sdk/blob/master/src/HtmlUtils.js#L180-L195
"""
ALLOWED_TAGS = [
"font", # custom to matrix for IRC-style font coloring
"del", # for markdown
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"blockquote",
"p",
"a",
"ul",
"ol",
"sup",
"sub",
"nl",
"li",
"b",
"i",
"u",
"strong",
"em",
"strike",
"code",
"hr",
"br",
"div",
"table",
"thead",
"caption",
"tbody",
"tr",
"th",
"td",
"pre",
"span",
"img",
]
ALLOWED_ATTRIBUTES = {
"font": ["color", "data-mx-bg-color", "data-mx-color", "style"],
"span": ["data-mx-bg-color", "data-mx-color", "style"],
"a": ["href", "name", "target", "rel"],
"img": ["src", "width", "height", "alt", "title"],
"ol": ["start"],
}
def clean(html, **kwargs):
"""
Sanitise HTML fragments.
A version of `bleach.clean` but with Element's allowed tags and ``strip=True``
by default.
"""
defaults = {
"strip": True,
"tags": ALLOWED_TAGS,
"attributes": ALLOWED_ATTRIBUTES,
"protocols": ["https", "http", "mxc"],
}
defaults.update(kwargs)
return bleach.clean(html, **defaults)
|
Package.describe({
name: 'nobutakaoshiro:framework7-ios-material',
version: '1.0.4',
summary: 'Framework7 (v1.4.2) with Meteor. You can choose iOS or Android theme.',
git: 'https://github.com/nobutakaoshiro/meteor-framework7-ios-material.git',
documentation: 'README.md'
});
Package.onUse(function(api) {
api.addFiles('framework7/dist/js/framework7.min.js', 'client');
api.addAssets([
'framework7/dist/css/framework7.ios.colors.css',
'framework7/dist/css/framework7.ios.colors.min.css',
'framework7/dist/css/framework7.ios.css',
'framework7/dist/css/framework7.ios.min.css',
'framework7/dist/css/framework7.ios.rtl.css',
'framework7/dist/css/framework7.ios.rtl.min.css',
'framework7/dist/css/framework7.material.colors.css',
'framework7/dist/css/framework7.material.colors.min.css',
'framework7/dist/css/framework7.material.css',
'framework7/dist/css/framework7.material.min.css',
'framework7/dist/css/framework7.material.rtl.css',
'framework7/dist/css/framework7.material.rtl.min.css'
], 'client');
api.addAssets([
'framework7/dist/img/i-f7-ios.png',
'framework7/dist/img/i-f7-material.png',
'framework7/dist/img/i-form-calendar-ios.svg',
'framework7/dist/img/i-form-calendar-material.svg',
'framework7/dist/img/i-form-comment-ios.svg',
'framework7/dist/img/i-form-comment-material.svg',
'framework7/dist/img/i-form-email-ios.svg',
'framework7/dist/img/i-form-email-material.svg',
'framework7/dist/img/i-form-gender-ios.svg',
'framework7/dist/img/i-form-gender-material.svg',
'framework7/dist/img/i-form-name-ios.svg',
'framework7/dist/img/i-form-name-material.svg',
'framework7/dist/img/i-form-password-ios.svg',
'framework7/dist/img/i-form-password-material.svg',
'framework7/dist/img/i-form-settings-ios.svg',
'framework7/dist/img/i-form-settings-material.svg',
'framework7/dist/img/i-form-tel-ios.svg',
'framework7/dist/img/i-form-tel-material.svg',
'framework7/dist/img/i-form-toggle-ios.svg',
'framework7/dist/img/i-form-toggle-material.svg',
'framework7/dist/img/i-form-url-ios.svg',
'framework7/dist/img/i-form-url-material.svg'
], 'client');
});
|
// hashtag/[tag].js
import React, { useEffect } from "react";
import { useDispatch, useSelector } from "react-redux";
import { useRouter } from "next/router";
import { END } from "redux-saga";
import axios from "axios";
import { LOAD_HASHTAG_POSTS_REQUEST } from "../../reducers/post";
import PostCard from "../../components/PostCard";
import wrapper from "../../store/configureStore";
import { LOAD_MY_INFO_REQUEST } from "../../reducers/user";
import AppLayout from "../../components/AppLayout";
const Hashtag = () => {
const dispatch = useDispatch();
const router = useRouter();
const { tag } = router.query;
const { mainPosts, hasMorePosts, loadPostsLoading } = useSelector((state) => state.post);
useEffect(() => {
const onScroll = () => {
if (
window.pageYOffset + document.documentElement.clientHeight >
document.documentElement.scrollHeight - 300
) {
if (hasMorePosts && !loadPostsLoading) {
dispatch({
type: LOAD_HASHTAG_POSTS_REQUEST,
lastId: mainPosts[mainPosts.length - 1] && mainPosts[mainPosts.length - 1].id,
data: tag,
});
}
}
};
window.addEventListener("scroll", onScroll);
return () => {
window.removeEventListener("scroll", onScroll);
};
}, [mainPosts.length, hasMorePosts, tag, loadPostsLoading]);
return (
<AppLayout>
{mainPosts.map((c) => (
<PostCard key={c.id} post={c} />
))}
</AppLayout>
);
};
export const getServerSideProps = wrapper.getServerSideProps(async (context) => {
console.log(context);
const cookie = context.req ? context.req.headers.cookie : "";
console.log(context);
axios.defaults.headers.Cookie = "";
if (context.req && cookie) {
axios.defaults.headers.Cookie = cookie;
}
context.store.dispatch({
type: LOAD_MY_INFO_REQUEST,
});
context.store.dispatch({
type: LOAD_HASHTAG_POSTS_REQUEST,
data: context.params.tag,
});
context.store.dispatch(END);
await context.store.sagaTask.toPromise();
});
export default Hashtag;
|
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from common.Benchmarks import *
from common import CpuConfig
from common import MemConfig
from common import PlatformConfig
def _listCpuTypes(option, opt, value, parser):
CpuConfig.print_cpu_list()
sys.exit(0)
def _listMemTypes(option, opt, value, parser):
MemConfig.print_mem_list()
sys.exit(0)
def _listPlatformTypes(option, opt, value, parser):
PlatformConfig.print_platform_list()
sys.exit(0)
# Add the very basic options that work also in the case of the no ISA
# being used, and consequently no CPUs, but rather various types of
# testers and traffic generators.
def addNoISAOptions(parser):
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--sys-voltage", action="store", type="string",
default='1.0V',
help = """Top-level voltage for blocks running at system
power supply""")
parser.add_option("--sys-clock", action="store", type="string",
default='1GHz',
help = """Top-level clock for blocks running at system
speed""")
# Memory Options
parser.add_option("--list-mem-types",
action="callback", callback=_listMemTypes,
help="List available memory types")
parser.add_option("--mem-type", type="choice", default="DDR3_1600_8x8",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-channels", type="int", default=1,
help = "number of memory channels")
parser.add_option("--mem-ranks", type="int", default=None,
help = "number of memory ranks per channel")
parser.add_option("--mem-size", action="store", type="string",
default="512MB",
help="Specify the physical memory size (single memory)")
parser.add_option("--memchecker", action="store_true")
# Cache Options
parser.add_option("--external-memory-system", type="string",
help="use external ports of this port_type for caches")
parser.add_option("--tlm-memory", type="string",
help="use external port for SystemC TLM cosimulation")
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--prefetchers", type="string", default="")
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="16kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="128kB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
# Enable Ruby
parser.add_option("--ruby", action="store_true")
# Run duration options
parser.add_option("-m", "--abs-max-tick", type="int", default=m5.MaxTick,
metavar="TICKS", help="Run to absolute simulated tick "
"specified including ticks from a restored checkpoint")
parser.add_option("--rel-max-tick", type="int", default=None,
metavar="TICKS", help="Simulate for specified number of"
" ticks relative to the simulation start tick (e.g. if "
"restoring a checkpoint)")
parser.add_option("--maxtime", type="float", default=None,
help="Run to the specified absolute simulated time in "
"seconds")
# Add common options that assume a non-NULL ISA.
def addCommonOptions(parser):
# start by adding the base options that do not assume an ISA
addNoISAOptions(parser)
# system options
parser.add_option("--list-cpu-types",
action="callback", callback=_listCpuTypes,
help="List available CPU types")
parser.add_option("--cpu-type", type="choice", default="AtomicSimpleCPU",
choices=CpuConfig.cpu_names(),
help = "type of cpu to run with")
parser.add_option("--checker", action="store_true");
parser.add_option("--cpu-clock", action="store", type="string",
default='2GHz',
help="Clock for blocks running at CPU speed")
parser.add_option("--smt", action="store_true", default=False,
help = """
Only used if multiple programs are specified. If true,
then the number of threads per cpu is same as the
number of programs.""")
parser.add_option("--elastic-trace-en", action="store_true",
help="""Enable capture of data dependency and instruction
fetch traces using elastic trace probe.""")
# Trace file paths input to trace probe in a capture simulation and input
# to Trace CPU in a replay simulation
parser.add_option("--inst-trace-file", action="store", type="string",
help="""Instruction fetch trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("--data-trace-file", action="store", type="string",
help="""Data dependency trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("-l", "--lpae", action="store_true")
parser.add_option("-V", "--virtualisation", action="store_true")
parser.add_option("--fastmem", action="store_true")
# dist-gem5 options
parser.add_option("--dist", action="store_true",
help="Parallel distributed gem5 simulation.")
parser.add_option("--dist-sync-on-pseudo-op", action="store_true",
help="Use a pseudo-op to start dist-gem5 synchronization.")
parser.add_option("--is-switch", action="store_true",
help="Select the network switch simulator process for a"\
"distributed gem5 run")
parser.add_option("--dist-rank", default=0, action="store", type="int",
help="Rank of this system within the dist gem5 run.")
parser.add_option("--dist-size", default=0, action="store", type="int",
help="Number of gem5 processes within the dist gem5 run.")
parser.add_option("--dist-server-name",
default="127.0.0.1",
action="store", type="string",
help="Name of the message server host\nDEFAULT: localhost")
parser.add_option("--dist-server-port",
default=2200,
action="store", type="int",
help="Message server listen port\nDEFAULT: 2200")
parser.add_option("--dist-sync-repeat",
default="0us",
action="store", type="string",
help="Repeat interval for synchronisation barriers among dist-gem5 processes\nDEFAULT: --ethernet-linkdelay")
parser.add_option("--dist-sync-start",
default="5200000000000t",
action="store", type="string",
help="Time to schedule the first dist synchronisation barrier\nDEFAULT:5200000000000t")
parser.add_option("--ethernet-linkspeed", default="10Gbps",
action="store", type="string",
help="Link speed in bps\nDEFAULT: 10Gbps")
parser.add_option("--ethernet-linkdelay", default="10us",
action="store", type="string",
help="Link delay in seconds\nDEFAULT: 10us")
# Run duration options
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--num-work-ids", action="store", type="int",
help="Number of distinct work item types")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
parser.add_option("--initialize-only", action="store_true", default=False,
help="""Exit after initialization. Do not simulate time.
Useful when gem5 is run as a library.""")
# Simpoint options
parser.add_option("--simpoint-profile", action="store_true",
help="Enable basic block profiling for SimPoints")
parser.add_option("--simpoint-interval", type="int", default=10000000,
help="SimPoint interval in num of instructions")
parser.add_option("--take-simpoint-checkpoints", action="store", type="string",
help="<simpoint file,weight file,interval-length,warmup-length>")
parser.add_option("--restore-simpoint-checkpoint", action="store_true",
help="restore from a simpoint checkpoint taken with " +
"--take-simpoint-checkpoints")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> take checkpoints at tick M and every N ticks thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="AtomicSimpleCPU", choices=CpuConfig.cpu_names(),
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="str",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
parser.add_option("--spec-input", default="ref", type="choice",
choices=["ref", "test", "train", "smred", "mdred",
"lgred"],
help="Input set size for SPEC CPU2000 benchmarks.")
parser.add_option("--arm-iset", default="arm", type="choice",
choices=["arm", "thumb", "aarch64"],
help="ARM instruction set.")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-e", "--env", default="",
help="Initialize workload environment from text file.")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
from FSConfig import os_types
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--os-type", action="store", type="choice",
choices=os_types[buildEnv['TARGET_ISA']], default="linux",
help="Specifies type of OS to boot")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--list-machine-types",
action="callback", callback=_listPlatformTypes,
help="List available platform types")
parser.add_option("--machine-type", action="store", type="choice",
choices=PlatformConfig.platform_names(),
default="VExpress_EMM")
parser.add_option("--dtb-filename", action="store", type="string",
help="Specifies device tree blob file to use with device-tree-"\
"enabled kernels")
parser.add_option("--enable-security-extensions", action="store_true",
help="Turn on the ARM Security Extensions")
parser.add_option("--enable-context-switch-stats-dump", \
action="store_true", help="Enable stats dump at context "\
"switches and dump tasks file (required for Streamline)")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
parser.add_option("--root-device", action="store", type="string", default=None,
help="OS device name for root partition")
# Command line options
parser.add_option("--command-line", action="store", type="string",
default=None,
help="Template for the kernel command line.")
parser.add_option("--command-line-file", action="store",
default=None, type="string",
help="File with a template for the kernel command line")
|
import React from "react";
import Router from './router'
function App() {
return (
<Router />
);
}
export default App;
|
import React, { Component } from "react";
import axios from "axios";
// banner_image_url: null
// category: "Computer programming"
// column_names_merged_with_images: (9) ["id", "name", "description", "url", "category", "position", "thumb_image", "banner_image", "logo"]
// description: "A tech school that teaches the languages of the computing world."
// id: 19268
// logo_url: "https://devcamp-space.s3.amazonaws.com/pcvnHx5PJP2vXgnnWBzure1B?response-content-disposition=inline%3B%20filename%3D%22SidneysLogo.png%22%3B%20filename%2A%3DUTF-8%27%27SidneysLogo.png&response-content-type=image%2Fpng&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAJEHZJNHM5JFESRRQ%2F20200820%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200820T164442Z&X-Amz-Expires=300&X-Amz-SignedHeaders=host&X-Amz-Signature=62b779c7325c9c3467ba150213164ba42b586cda92db5b454f90816a3197822a"
// name: "Bottega Tech"
// position: 0
// thumb_image_url: "https://devcamp-space.s3.amazonaws.com/p6nVcmHuKHswbsNky6pB2eBi?response-content-disposition=inline%3B%20filename%3D%22styleshot.jpg%22%3B%20filename%2A%3DUTF-8%27%27styleshot.jpg&response-content-type=image%2Fjpeg&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAJEHZJNHM5JFESRRQ%2F20200820%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200820T164442Z&X-Amz-Expires=300&X-Amz-SignedHeaders=host&X-Amz-Signature=241910feaac80f9ffcb420f3f0ed1b3a2f97621a62cfac5e5615303b772ec814"
// url: "www.bottega.com"
export default class PortfolioDetail extends Component {
constructor(props) {
super(props);
this.state = {
portfolioItem: {}
}
}
UNSAFE_componentWillMount() {
this.getPortfolioItem()
}
getPortfolioItem() {
axios.get(`https://davidmondragon.devcamp.space/portfolio/portfolio_items/${this.props.match.params.slug}`, {
withCredentials: true
}).then(response => {
console.log("response", response)
this.setState({
portfolioItem: response.data.portfolio_item
})
}).catch(error => {
console.log('getportfolioItem error', error)
})
}
render() {
const {
banner_image_url,
category,
description,
logo_url,
name,
thumb_image_url,
url
} = this.state.portfolioItem;
const bannerStyles = {
background: "url(" + thumb_image_url + ") no-repeat",
}
const logoStyles = {
height: "275px",
}
return (
<div className="portfolio-detail-wrapper">
<div className="banner" style={bannerStyles}>
<img src={logo_url} style={logoStyles}/>
</div>
<div className="portfolio-detail-description-wrapper">
<div className="description">
{description}
</div>
</div>
<div className="bottom-content-wrapper">
<a href={url} className="site-link" target="_blank">Visit {name}</a>
</div>
</div>
)
}
}
|
## Copyright (C) 2019, Huan Zhang <huan@huan-zhang.com>
## Hongge Chen <chenhg@mit.edu>
## Chaowei Xiao <xiaocw@umich.edu>
##
## This program is licenced under the BSD 2-Clause License,
## contained in the LICENCE file in this directory.
##
import sys
import copy
import torch
import numpy as np
from bound_layers import BoundSequential
# from gpu_profile import gpu_profile
import time
from datetime import datetime
from eps_scheduler import EpsilonScheduler
from config import load_config, get_path, config_modelloader, config_dataloader
from argparser import argparser
from train import Train, Logger
# sys.settrace(gpu_profile)
def main(args):
config = load_config(args)
global_eval_config = config["eval_params"]
models, model_names = config_modelloader(config, load_pretrain = True)
robust_errs = []
errs = []
for model, model_id, model_config in zip(models, model_names, config["models"]):
# make a copy of global training config, and update per-model config
eval_config = copy.deepcopy(global_eval_config)
if "eval_params" in model_config:
eval_config.update(model_config["eval_params"])
model = BoundSequential.convert(model, eval_config["method_params"]["bound_opts"])
model = model.cuda()
# read training parameters from config file
method = eval_config["method"]
verbose = eval_config["verbose"]
eps = eval_config["epsilon"]
# parameters specific to a training method
method_param = eval_config["method_params"]
norm = float(eval_config["norm"])
train_data, test_data = config_dataloader(config, **eval_config["loader_params"])
model_name = get_path(config, model_id, "model", load = False)
print(model_name)
model_log = get_path(config, model_id, "eval_log")
logger = Logger(open(model_log, "w"))
logger.log("evaluation configurations:", eval_config)
logger.log("Evaluating...")
with torch.no_grad():
# evaluate
robust_err, err = Train(model, 0, test_data, EpsilonScheduler("linear", 0, 0, eps, eps, 1), eps, norm, logger, verbose, False, None, method, **method_param)
robust_errs.append(robust_err)
errs.append(err)
print('model robust errors (for robustly trained models, not valid for naturally trained models):')
print(robust_errs)
robust_errs = np.array(robust_errs)
print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(np.min(robust_errs), np.max(robust_errs), np.median(robust_errs), np.mean(robust_errs)))
print('clean errors for models with min, max and median robust errors')
i_min = np.argmin(robust_errs)
i_max = np.argmax(robust_errs)
i_median = np.argsort(robust_errs)[len(robust_errs) // 2]
print('for min: {:.4f}, for max: {:.4f}, for median: {:.4f}'.format(errs[i_min], errs[i_max], errs[i_median]))
print('model clean errors:')
print(errs)
print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(np.min(errs), np.max(errs), np.median(errs), np.mean(errs)))
if __name__ == "__main__":
args = argparser()
main(args)
|
from expects import expect, be, equal
from mamba import describe, it
from genyrator.entities.Column import ForeignKey
from genyrator.entities.entity.create_entity_from_type_dict import create_entity_from_type_dict
with describe('create from type dict'):
with it('converts identifier column'):
result = create_entity_from_type_dict(
class_name='Book',
identifier_column_name='book_identifier',
type_dict={
'bookIdentifier': 'str',
}
)
expect(result.identifier_column.python_name).to(equal('book_identifier'))
with it('converts type? to a nullable column'):
result = create_entity_from_type_dict(
class_name='Book',
identifier_column_name='book_identifier',
type_dict={
'bookIdentifier': 'str',
'rating': 'float?',
'name': 'str',
}
)
expect(result.columns[1].nullable).to(be(True))
expect(result.columns[1].python_type.value).to(be('float'))
expect(result.columns[2].nullable).to(be(False))
expect(result.columns[2].python_type.value).to(be('str'))
with it('figures out foreign key columns'):
result = create_entity_from_type_dict(
class_name='Book',
identifier_column_name='book_identifier',
type_dict={
'bookIdentifier': 'str',
'author_identifier': 'str',
},
foreign_keys={(
'author_identifier',
'author_external_identifier',
'str',
)}
)
expect(isinstance(result.columns[1], ForeignKey)).to(be(True))
|
// @flow
import type { Node } from 'react';
import React, { Component } from 'react';
import { observer } from 'mobx-react';
import { handleExternalLinkClick } from '../../../utils/routing';
import PickCurrencyOptionDialog from '../../../components/wallet/add/option-dialog/PickCurrencyOptionDialog';
type Props = {|
+onClose: void => void,
+onCardano: void => void,
+onCardanoTestnet: void => void,
+onErgo: void | (void => void),
|};
@observer
export default class PickCurrencyDialogContainer extends Component<Props> {
render(): Node {
return (
<PickCurrencyOptionDialog
onExternalLinkClick={handleExternalLinkClick}
onCancel={this.props.onClose}
onCardano={this.props.onCardano}
onCardanoTestnet={this.props.onCardanoTestnet}
onErgo={this.props.onErgo}
/>
);
}
}
|
import ItemSerializer from '@apps/drive/serializers/item_serializer'
import Folder from '@apps/drive/models/folder'
import Item from '@apps/drive/models/item'
const getFolder = async (req, { code }) => {
if(code === 'drive') return 'null'
const folder = await Folder.where({
code
}).fetch({
transacting: req.trx
})
if(!folder) return req.status(404).respond({
code: 404,
message: 'folder not found'
})
return folder.get('id').toString()
}
const listRoute = async (req, res) => {
req.query.$filter = req.query.$filter || {}
req.query.$filter.$and = req.query.$filter.$and || []
const parent = req.query.parent || req.params.parent
if(parent) {
req.query.$filter.$and.push({
folder_id: {
$eq: await getFolder(req, {
code: parent
})
}
})
}
const items = await Item.filterFetch({
scope: (qb) => {
qb.select('drive_items.*','drive_access_types.text as access_type')
qb.joinRaw('inner join drive_items_access on drive_items_access.code=drive_items.code and drive_items_access.user_id=?', req.user.get('id'))
qb.innerJoin('drive_access_types', 'drive_access_types.id', 'drive_items_access.access_type_id')
qb.where('drive_items.team_id', req.team.get('id'))
qb.whereRaw('drive_items.type != ?', ['metafile'])
qb.whereNull('drive_items.deleted_at')
},
aliases: {
access_type: 'drive_access_types.text'
},
filter: {
params: req.query.$filter,
allowed: ['code','folder_id','type','access_type'],
search: ['label']
},
sort: {
params: req.query.$sort,
defaults: 'label'
},
page: req.query.$page,
withRelated: ['asset','accesses.access_type','accesses.user.photo','accesses.group','accesses.grouping','folder'],
transacting: req.trx
})
req.starred = await req.trx('drive_starred').where({
starrer_id: req.user.get('id')
}).then(stars => stars.map(star => {
return star.code
}))
await res.status(200).respond(items, ItemSerializer)
}
export default listRoute
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory(require('vue')) :
typeof define === 'function' && define.amd ? define(['vue'], factory) :
(global = global || self, global['vue-trend-chart'] = factory(global.vue));
}(this, (function (vue) { 'use strict';
function validatePadding (padding) {
var arr = padding
.split(" ")
.filter(function (item) { return item !== ""; })
.map(function (item) { return parseInt(item); });
if (arr.length < 1 || arr.length > 4) { return false; }
return arr.every(function (item) { return typeof item == "number" && item >= 0; });
}
function getPadding (padding) {
var arr = padding
.split(" ")
.filter(function (item) { return item !== ""; })
.map(function (item) { return parseInt(item); });
switch (arr.length) {
case 4:
return { top: arr[0], right: arr[1], bottom: arr[2], left: arr[3] };
case 3:
return { top: arr[0], right: arr[1], bottom: arr[2], left: arr[1] };
case 2:
return { top: arr[0], right: arr[1], bottom: arr[0], left: arr[1] };
default:
return { top: arr[0], right: arr[0], bottom: arr[0], left: arr[0] };
}
}
var TrendChartGrid = {
name: "TrendChartGrid",
compatConfig: { RENDER_FUNCTION: false },
props: {
boundary: {
required: true,
type: Object
},
verticalLines: {
default: false,
type: Boolean
},
verticalLinesNumber: {
default: 0,
type: Number
},
horizontalLines: {
default: false,
type: Boolean
},
horizontalLinesNumber: {
default: 0,
type: Number
}
},
methods: {
setVerticalLinesParams: function setVerticalLinesParams(n) {
var ref = this;
var boundary = ref.boundary;
var verticalLinesNumber = ref.verticalLinesNumber;
var step =
verticalLinesNumber > 1
? (boundary.maxX - boundary.minX) / (verticalLinesNumber - 1)
: 0;
var x = boundary.minX + step * (n - 1);
var y1 = boundary.minY;
var y2 = boundary.maxY;
return {
x1: x,
x2: x,
y1: y1,
y2: y2,
stroke: "rgba(0,0,0,0.1)"
};
},
setHorizontalLinesParams: function setHorizontalLinesParams(n) {
var ref = this;
var boundary = ref.boundary;
var horizontalLinesNumber = ref.horizontalLinesNumber;
var step =
horizontalLinesNumber > 1
? (boundary.maxY - boundary.minY) / (horizontalLinesNumber - 1)
: 0;
var y = boundary.maxY - step * (n - 1);
var x1 = boundary.minX;
var x2 = boundary.maxX;
return {
x1: x1,
x2: x2,
y1: y,
y2: y,
stroke: "rgba(0,0,0,0.1)"
};
}
},
render: function render() {
if (!this.verticalLines && !this.horizontalLines) { return; }
var children = [];
// Vertical Lines
if (this.verticalLines && this.verticalLinesNumber > 0) {
var lines = [];
for (var i = 1; i <= this.verticalLinesNumber; i++) {
lines.push(
vue.h("line", {
class: "line",
attrs: Object.assign({}, this.setVerticalLinesParams(i))
})
);
}
children.push(
vue.h(
"g",
{
class: "vertical"
},
lines
)
);
}
// Horizontal Lines
if (this.horizontalLines && this.horizontalLinesNumber > 0) {
var lines$1 = [];
for (var i$1 = 1; i$1 <= this.horizontalLinesNumber; i$1++) {
lines$1.push(
vue.h("line", {
class: "line",
attrs: Object.assign({}, this.setHorizontalLinesParams(i$1))
})
);
}
children.push(
vue.h(
"g",
{
class: "horizontal"
},
lines$1
)
);
}
// Render component
return vue.h("g", children);
}
};
var TrendChartLabels = {
name: "TrendChartLabels",
compatConfig: { RENDER_FUNCTION: false },
props: {
boundary: {
required: true,
type: Object
},
minValue: {
type: Number
},
maxValue: {
type: Number
},
xLabels: {
type: Array
},
yLabels: {
type: Number
},
yLabelsTextFormatter: {
default: function (value) { return value; },
type: Function
}
},
data: function data() {
return {
xLabelHeight: null,
yLabelHeight: null
};
},
methods: {
setXLabelsParams: function setXLabelsParams(n) {
var ref = this;
var boundary = ref.boundary;
var xLabels = ref.xLabels;
var step = (boundary.maxX - boundary.minX) / (xLabels.length - 1);
var x = boundary.minX + step * n;
var y = boundary.maxY;
return { transform: ("translate(" + x + ", " + y + ")") };
},
setYLabelsParams: function setYLabelsParams(n) {
var ref = this;
var boundary = ref.boundary;
var yLabels = ref.yLabels;
var step = (boundary.maxY - boundary.minY) / (yLabels - 1);
var x = boundary.minX;
var y = boundary.maxY - step * n;
return { transform: ("translate(" + x + ", " + y + ")") };
}
},
mounted: function mounted() {
if (this.xLabels && this.xLabels.length) {
this.xLabelHeight = this.$refs.xLabels
.querySelector("text")
.getBoundingClientRect().height;
}
if (this.yLabels && this.yLabels > 0) {
this.yLabelHeight = this.$refs.yLabels
.querySelector("text")
.getBoundingClientRect().height;
}
},
render: function render() {
var this$1 = this;
if (
!(this.xLabels && this.xLabels.length) &&
!(this.yLabels && this.yLabels > 0)
)
{ return; }
var children = [];
// x labels
if (this.xLabels && this.xLabels.length) {
children.push(
vue.h(
"g",
{
class: "x-labels",
ref: "xLabels"
},
this.xLabels.map(function (label, i) {
return vue.h(
"g",
{
class: "label",
attrs: Object.assign({}, this$1.setXLabelsParams(i))
},
[
vue.h(
"text",
{
attrs: {
dy: this$1.xLabelHeight + 5,
"text-anchor": "middle"
}
},
label
),
vue.h("line", { attrs: { stroke: "rgba(0,0,0,0.1)", y2: 5 } })
]
);
})
)
);
}
// y labels
if (this.yLabels && this.yLabels > 0) {
var labels = [];
for (var i = 0; i < this.yLabels; i++) {
labels.push(
vue.h(
"g",
{
class: "label",
attrs: Object.assign({}, this.setYLabelsParams(i))
},
[
vue.h(
"text",
{
attrs: {
dx: -10,
dy: this.yLabelHeight / 4,
"text-anchor": "end"
}
},
this.yLabelsTextFormatter(
this.minValue +
((this.maxValue - this.minValue) / (this.yLabels - 1)) * i
)
),
vue.h("line", { attrs: { stroke: "rgba(0,0,0,0.1)", x1: 0, x2: -5 } })
]
)
);
}
children.push(
vue.h(
"g",
{
class: "y-labels",
ref: "yLabels"
},
labels
)
);
}
// Render component
return vue.h("g", children);
}
};
function genPoints (arr, ref, max, min, maxAmount) {
var minX = ref.minX;
var minY = ref.minY;
var maxX = ref.maxX;
var maxY = ref.maxY;
arr = arr.map(function (item) { return (typeof item === "number" ? item : item.value); });
var minValue = min - 0.001;
var gridX = (maxX - minX) / (maxAmount - 1);
var gridY = (maxY - minY) / (max + 0.001 - minValue);
return arr.map(function (value, index) {
return {
x: index * gridX + minX,
y:
maxY -
(value - minValue) * gridY +
+(index === maxAmount - 1) * 0.00001 -
+(index === 0) * 0.00001
};
});
}
function genPath (pnts, smooth, ref) {
var maxY = ref.maxY;
var points = [].concat( pnts );
var start = points.shift();
var end = points[points.length - 1];
var distance = points[0].x - start.x;
var bezierX = distance / 2;
// Create Line Path
var linePath = "M " + (start.x) + "," + (start.y);
points.forEach(function (point, index) {
if (!smooth) { linePath += " L" + (point.x) + "," + (point.y); }
else {
var prev = points[index - 1] || start;
linePath += " C " + (bezierX + prev.x) + "," + (prev.y) + " " + (bezierX + prev.x) + "," + (point.y) + " " + (point.x) + "," + (point.y);
}
});
// Create Fill Path
var fillPath = linePath;
if (end.Y !== maxY) { fillPath += " L" + (end.x) + "," + maxY; }
if (start.Y !== maxY) { fillPath += " L" + (start.x) + "," + maxY; }
fillPath += " Z";
return { linePath: linePath, fillPath: fillPath };
}
var TrendChartCurve = {
name: "TrendChartCurve",
compatConfig: { RENDER_FUNCTION: false },
props: {
boundary: {
required: true,
type: Object
},
minValue: {
required: true,
type: Number
},
maxValue: {
required: true,
type: Number
},
maxAmount: {
required: true,
type: Number
},
activeLineParams: {
type: Object
},
data: {
required: true,
type: Array
},
className: {
type: String
},
smooth: {
default: false,
type: Boolean
},
stroke: {
default: true,
type: Boolean
},
fill: {
default: false,
type: Boolean
},
showPoints: {
default: false,
type: Boolean
}
},
computed: {
points: function points() {
return genPoints(
this.data,
this.boundary,
this.maxValue,
this.minValue,
this.maxAmount
);
},
paths: function paths() {
return genPath(this.points, this.smooth, this.boundary);
}
},
render: function render() {
var this$1 = this;
var children = [];
// Fill path
if (this.fill && this.paths && this.paths.fillPath) {
children.push(
vue.h("path", {
class: "fill",
attrs: {
d: this.paths.fillPath,
fill: "rgba(0,0,0,0.15)"
}
})
);
}
// Stroke path
if (this.stroke && this.paths && this.paths.linePath) {
children.push(
vue.h("path", {
class: "stroke",
attrs: {
d: this.paths.linePath,
fill: "none",
stroke: "black"
}
})
);
}
// Points
if (this.showPoints && this.points) {
children.push(
vue.h(
"g",
{
class: "points"
},
this.points.map(function (point, i) { return vue.h("circle", {
class: {
point: true,
"is-active":
this$1.activeLineParams && this$1.activeLineParams.index === i
},
attrs: {
cx: point.x,
cy: point.y,
r: 2,
stroke: "#000000",
"stroke-width": 1
}
}); }
)
)
);
}
// Render component
return vue.h(
"g",
{
class: this.className
},
children
);
}
};
var TrendChart = {
name: "TrendChart",
components: { TrendChartGrid: TrendChartGrid, TrendChartLabels: TrendChartLabels, TrendChartCurve: TrendChartCurve },
compatConfig: { RENDER_FUNCTION: false },
props: {
datasets: {
required: true,
type: Array
},
grid: {
default: null,
type: Object
},
labels: {
default: null,
type: Object
},
max: {
type: Number
},
min: {
type: Number
},
padding: {
default: "5",
type: String,
validator: function validator(val) {
return validatePadding(val);
}
},
interactive: {
default: false,
type: Boolean
}
},
data: function data() {
return {
width: null,
height: null,
labelsOverflowObject: { top: 0, right: 0, bottom: 0, left: 0 },
activeLine: null,
activeLineParams: null
};
},
computed: {
paddingObject: function paddingObject() {
if (!this.padding) { return getPadding("0"); }
return getPadding(this.padding);
},
boundary: function boundary() {
var ref = this;
var width = ref.width;
var height = ref.height;
var paddingObject = ref.paddingObject;
var labelsOverflowObject = ref.labelsOverflowObject;
var boundary = {
minX: paddingObject.left + labelsOverflowObject.left,
minY: paddingObject.top + labelsOverflowObject.top,
maxX: width - paddingObject.right - labelsOverflowObject.right,
maxY: height - paddingObject.bottom - labelsOverflowObject.bottom
};
return boundary;
},
params: function params() {
var maxValue = -Infinity;
var minValue = Infinity;
var maxAmount = 0;
this.datasets.forEach(function (dataset) {
var dataArr = dataset.data.map(function (item) { return typeof item === "number" ? item : item.value; }
);
var max = Math.max.apply(Math, dataArr);
if (max > maxValue) { maxValue = max; }
var min = Math.min.apply(Math, dataArr);
if (min < minValue) { minValue = min; }
if (dataArr.length > maxAmount) { maxAmount = dataArr.length; }
});
if (this.max !== undefined && this.max > maxValue) { maxValue = this.max; }
if (this.min !== undefined && this.min < minValue) { minValue = this.min; }
return { maxValue: maxValue, minValue: minValue, maxAmount: maxAmount };
},
chartOverlayParams: function chartOverlayParams() {
var ref = this;
var boundary = ref.boundary;
var width = boundary.maxX - boundary.minX;
var height = boundary.maxY - boundary.minY;
return {
x: boundary.minX,
y: boundary.minY,
width: width > 0 ? width : 0,
height: height > 0 ? height : 0,
opacity: 0
};
},
chartAxesXCoords: function chartAxesXCoords() {
var axes = [];
var step =
(this.boundary.maxX - this.boundary.minX) / (this.params.maxAmount - 1);
for (var i = 0; i < this.params.maxAmount; i++) {
axes.push(step * i + this.boundary.minX);
}
return axes;
}
},
methods: {
setSize: function setSize() {
var params = this.$refs["chart"].getBoundingClientRect();
this.width = params.width;
this.height = params.height;
},
fitLabels: function fitLabels() {
var chart = this.$refs["chart"];
var chartLabels = this.$refs["labels"];
if (
chartLabels &&
((chartLabels.xLabels && chartLabels.xLabels.length) ||
chartLabels.yLabels > 0)
) {
var chartClientRect = chart.getBoundingClientRect();
var chartLabelsClientRect = chartLabels.$el.getBoundingClientRect();
var top =
chartClientRect.top -
chartLabelsClientRect.top +
this.paddingObject.top;
var right =
chartLabelsClientRect.right -
chartClientRect.right +
this.paddingObject.right;
var bottom =
chartLabelsClientRect.bottom -
chartClientRect.bottom +
this.paddingObject.bottom;
var left =
this.paddingObject.left -
chartLabelsClientRect.left +
chartClientRect.left;
this.labelsOverflowObject = {
top: top > 0 ? top : 0,
right: right > 0 ? right : 0,
bottom: bottom > 0 ? bottom : 0,
left: left > 0 ? left : 0
};
} else {
this.labelsOverflowObject = { top: 0, right: 0, bottom: 0, left: 0 };
}
},
init: function init() {
var this$1 = this;
this.setSize();
this.$nextTick(function () {
this$1.fitLabels();
});
},
getNearestCoordinate: function getNearestCoordinate(val) {
return (
this.chartAxesXCoords.reduce(
function (p, n) { return (Math.abs(p) > Math.abs(n - val) ? n - val : p); },
Infinity
) + val
);
},
mouseMove: function mouseMove(e) {
if (this.$refs.chart !== undefined) {
var rect = this.$refs.chart.getBoundingClientRect();
this.activeLine = this.getNearestCoordinate(e.clientX - rect.left);
}
},
mouseOut: function mouseOut() {
this.activeLine = null;
this.activeLineParams = null;
},
onWindowResize: function onWindowResize() {
this.setSize();
}
},
watch: {
activeLine: function activeLine(val) {
var this$1 = this;
var data = [];
if (val) {
this.activeLineParams = {
index: this.chartAxesXCoords.indexOf(this.activeLine)
};
this.datasets.forEach(function (dataset) {
data.push(dataset.data[this$1.activeLineParams.index]);
});
}
this.$emit(
"mouse-move",
this.activeLineParams ? Object.assign({}, this.activeLineParams, {data: data}) : null
);
},
labels: {
handler: function handler() {
var this$1 = this;
// Reset labels rect overflow
this.labelsOverflowObject = { top: 0, right: 0, bottom: 0, left: 0 };
// Calculate new labels rect overflow
this.$nextTick(function () {
this$1.fitLabels();
});
},
deep: true
}
},
mounted: function mounted() {
this.init();
window.addEventListener("resize", this.onWindowResize);
},
unmounted: function unmounted() {
window.removeEventListener("resize", this.onWindowResize);
},
render: function render() {
var this$1 = this;
var children = [];
// Grid
if (this.grid) {
children.push(
vue.h(TrendChartGrid, {
class: "grid",
attrs: {
verticalLines: this.grid.verticalLines,
verticalLinesNumber:
this.grid.verticalLinesNumber || this.params.maxAmount,
horizontalLines: this.grid.horizontalLines,
horizontalLinesNumber:
this.grid.horizontalLinesNumber ||
(this.labels && this.labels.yLabels) ||
0,
boundary: this.boundary
}
})
);
}
// Chart active line
if (this.interactive && this.chartOverlayParams) {
children.push(
vue.h("line", {
class: "active-line",
ref: "active-line",
attrs: {
x1: this.activeLine || this.boundary.minX,
x2: this.activeLine || this.boundary.minX,
y1: this.boundary.minY,
y2: this.boundary.maxY,
stroke: "black",
visibility: this.activeLine ? "visible" : "hidden"
}
})
);
}
// Labels
if (this.labels) {
children.push(
vue.h(TrendChartLabels, {
class: "labels",
ref: "labels",
attrs: Object.assign({}, this.labels,
{boundary: this.boundary,
minValue: this.params.minValue,
maxValue: this.params.maxValue})
})
);
}
// Curves
this.datasets.map(function (dataset) {
children.push(
vue.h(TrendChartCurve, {
class: "curve",
attrs: Object.assign({}, dataset,
{boundary: this$1.boundary,
minValue: this$1.params.minValue,
maxValue: this$1.params.maxValue,
maxAmount: this$1.params.maxAmount,
activeLineParams: this$1.activeLineParams})
})
);
});
// Chart overlay
if (this.interactive && this.chartOverlayParams) {
children.push(
vue.h("rect", {
ref: "interactive-area",
attrs: Object.assign({}, this.chartOverlayParams),
on: {
mousemove: function (e) { return this$1.mouseMove(e); },
mouseout: function () { return this$1.mouseOut(); }
}
})
);
}
// Render component
return vue.h(
"svg",
{
class: "vtc",
ref: "chart",
attrs: {
xmlns: "http://www.w3.org/2000/svg",
width: "100%",
height: "100%"
}
},
children
);
}
};
TrendChart.install = function(Vue) {
Vue.component(TrendChart.name, TrendChart);
};
if (typeof window !== "undefined" && window.Vue) {
window.Vue.use(TrendChart);
}
return TrendChart;
})));
|
//_ => ~ => -
{
/* <script src="/jspsych-6.3.0/plugins/jspsych-html-keyboard-response.js"></script> */
// <div id="clock"></div>
// <div id="jspsych-experiment"></div>
// let J = new L(true, "clock");
}
class J {
constructor(isExercise, clockId) {
this._one = '';
this._all = '';
this._tmpAll = {
Acc: 0,
RT: 0,
Score: 0,
};
this._clockId = clockId || 'clock';
this._mode = isExercise;
this._questionsNum = 100;
this._imgPath = '/image/L/';
this._bounder = {
x: 155,
y: 70,
};
this._questionType = {
TEN: 0,
TABLE: 1,
TABLE_J: 2,
TABLE_F: 3,
WHITE: 4,
};
}
_start() {
const { _questionType: TYPE, _questionsNum: questionsNum } = this;
const getPlaceList = () => {
const place_basis = [TYPE.TABLE_J, TYPE.TABLE_F];
let placeList = [];
for (let i = 0; i < questionsNum / 2; i++)
placeList = placeList.concat(place_basis);
return placeList.sort(() => 0.5 - Math.random());
};
let questions = [];
const placeList = getPlaceList();
placeList.map((place) => {
const trail = [TYPE.TEN, TYPE.TABLE, place, TYPE.WHITE];
questions = questions.concat(trail);
});
return questions;
}
_level(questions) {
const {
_questionsNum: num,
_questionType: TYPE,
_imgPath: path,
_bounder: { x: bounderX, y: bounderY },
} = this;
const generateColorList = () => {
let list = [];
for (let i = 0; i < num; i++) list.push(i % 2 == 0 ? 'orange' : 'white');
return list.sort(() => 0.5 - Math.random());
};
const generateRandomInt = (min, max) =>
Math.floor(Math.random() * (max + 1 - min) + min);
const getStimulus = (isLeft, colorList) => {
const getPlace = (isLeft) => {
const x = generateRandomInt(20, bounderX);
const y =
Math.random() > 0.5
? generateRandomInt(0, bounderY)
: -generateRandomInt(0, bounderY);
return isLeft ? { x: -x, y } : { x: +x, y };
};
const color = colorList.shift();
const { x, y } = getPlace(isLeft);
return `<img src='${path}table.jpg' class="table"><img src='${path}${color}.png' style='margin-left:${x}px;margin-top:${y}px;' class="ball">`;
};
let timeline = [];
let colorList = generateColorList();
questions.map((value, index) => {
switch (value) {
case TYPE.TEN:
timeline.push({
type: 'html-keyboard-response',
stimulus:
"<p style='font-size: 30px; font-weight: bold; color: black'>+</p>",
choices: jsPsych.NO_KEYS,
trial_duration: generateRandomInt(200, 800),
});
break;
case TYPE.TABLE:
timeline.push({
type: 'html-keyboard-response',
stimulus: `<img src='${path}table.jpg' class="table">`,
choices: ['j', 'f'],
trial_duration: 500,
});
break;
case TYPE.TABLE_J:
timeline.push({
type: 'html-keyboard-response',
stimulus: getStimulus(false, colorList),
choices: ['j', 'f'],
trial_duration: 500,
});
break;
case TYPE.TABLE_F:
timeline.push({
type: 'html-keyboard-response',
stimulus: getStimulus(true, colorList),
choices: ['j', 'f'],
trial_duration: 500,
});
break;
case TYPE.WHITE:
timeline.push({
type: 'html-keyboard-response',
stimulus: `<label id="score"><label>`,
choices: jsPsych.NO_KEYS,
trial_duration: generateRandomInt(100, 300),
});
break;
}
});
return timeline;
}
_round() {
const questions = this._start();
const timeline = this._level(questions);
console.log(timeline);
let questionsIndex = 0;
const score = document.getElementById(this._clockId);
score.innerHTML = '<br>';
return new Promise((resolve) => {
jsPsych.init({
timeline: timeline,
display_element: 'jspsych-experiment',
on_trial_start: () => {
score.innerHTML = this._tmpAll.Score;
},
on_trial_finish: () => {
if (
questions[questionsIndex] == this._questionType.TABLE_J ||
questions[questionsIndex] == this._questionType.TABLE_F
) {
const lastData = JSON.parse(jsPsych.data.getLastTrialData().json());
const localType =
questions[questionsIndex] == this._questionType.TABLE_J
? 'j'
: 'f';
this._tmpAll.Score += localType == lastData[0].response ? 1 : 0;
}
questionsIndex++;
},
on_finish: () => {
const { _questionType: TYPE } = this;
const data = JSON.parse(jsPsych.data.get().json());
console.log(data);
let typeJ = null;
data.map((value, index) => {
switch (questions[index]) {
case TYPE.TEN:
if (index > 0) this._one += '~';
break;
case TYPE.TABLE_J:
typeJ = 'j';
case TYPE.TABLE_F:
const type = typeJ || 'f';
const pos = type == 'j' ? 1 : 2;
const press =
value.response == null ? 'NS' : value.response == 'j' ? 1 : 2;
const acc = pos == press ? 1 : 0;
const rt = value.rt == null ? 'NS' : Math.floor(value.rt);
this._one += `${pos}_${press}_${acc}_${rt}`;
if (acc == 1) {
this._tmpAll.Acc++;
this._tmpAll.RT += rt;
}
typeJ = null;
break;
case TYPE.WHITE:
break;
}
});
resolve('end');
},
});
});
}
async process() {
await this._round();
const { _questionsNum: num } = this;
const { Acc, RT, Score } = this._tmpAll;
this._all = `${Math.floor((Acc * 100) / num)}_${Math.floor(
RT / Acc
)}_${Score}`;
if (this._mode) return { one: this._one, all: this._all };
return { one: this._one, all: this._all };
}
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from bayeslite.exception import BQLParseError
from bayeslite.util import casefold
import grammar
'''
grep -o 'K_[A-Z][A-Z0-9_]*' < grammar.y | sort -u | awk '
{
sub("^K_", "", $1);
printf(" '\''%s'\'': grammar.K_%s,\n", tolower($1), $1);
}'
'''
KEYWORDS = {
'clustering': grammar.K_CLUSTERING,
'concentration': grammar.K_CONCENTRATION,
'hyperparameters': grammar.K_HYPERPARAMETERS,
'loom': grammar.K_LOOM,
'optimized': grammar.K_OPTIMIZED,
'quiet': grammar.K_QUIET,
'row': grammar.K_ROW,
'rows': grammar.K_ROWS,
'skip': grammar.K_SKIP,
'subproblem': grammar.K_SUBPROBLEM,
'subproblems': grammar.K_SUBPROBLEMS,
'variable': grammar.K_VARIABLE,
'variables': grammar.K_VARIABLES,
}
PUNCTUATION = {
',': grammar.T_COMMA,
'(': grammar.T_LROUND,
')': grammar.T_RROUND,
';': grammar.T_SEMI,
}
def parse(tokens):
semantics = CGpmAnalyzeSemantics()
parser = grammar.Parser(semantics)
for token in tokenize(tokens):
semantics.context.append(token)
if len(semantics.context) > 10:
semantics.context.pop(0)
parser.feed(token)
if semantics.errors:
raise BQLParseError(semantics.errors)
if semantics.failed:
raise BQLParseError(['parse failed mysteriously'])
assert semantics.phrases is not None
return semantics.phrases
def tokenize(tokens):
for token in tokens:
if isinstance(token, str):
if casefold(token) in KEYWORDS:
yield KEYWORDS[casefold(token)], token
elif token in PUNCTUATION:
yield PUNCTUATION[token], token
else: # XXX check for alphanumeric/_
yield grammar.L_NAME, token
elif isinstance(token, (int, float)):
yield grammar.L_NUMBER, token
else:
raise IOError('Invalid token: %r' % (token,))
yield 0, '' # EOF
class CGpmAnalyzeSemantics(object):
def __init__(self):
self.context = []
self.errors = []
self.failed = False
self.phrases = None
def accept(self):
pass
def parse_failed(self):
self.failed = True
def syntax_error(self, (token, text)):
if token == -1: # error
self.errors.append('Bad token: %r' % (text,))
else:
self.errors.append("Syntax error near [%s] after [%s]" % (
text, ' '.join([str(t) for (_t, t) in self.context[:-1]])))
def p_analysis_start(self, ps): self.phrases = ps
def p_phrases_one(self, p): return [p] if p else []
def p_phrases_many(self, ps, p):
if p: ps.append(p)
return ps
def p_phrase_none(self,): return None
def p_phrase_variables(self, cols): return Variables(cols)
def p_phrase_skip(self, cols): return Skip(cols)
def p_phrase_rows(self, rows): return Rows(rows)
def p_phrase_loom(self): return Optimized('loom')
def p_phrase_optimized(self): return Optimized('lovecat')
def p_phrase_quiet(self): return Quiet(True)
def p_phrase_subproblems(self, s): return Subproblem(s)
def p_subproblems_list_one(self, s): return [s]
def p_subproblems_list_many(self, s): return s
def p_subproblems_one(self, s): return [s]
def p_subproblems_many(self, ss, s): ss.append(s); return ss
def p_subproblem_variable_hyperparameters(self):
return 'variable_hyperparameters'
def p_subproblem_variable_clustering(self):
return 'variable_clustering'
def p_subproblem_variable_clustering_concentration(self):
return 'variable_clustering_concentration'
def p_subproblem_row_clustering(self):
return 'row_clustering'
def p_subproblem_row_clustering_concentration(self):
return 'row_clustering_concentration'
def p_column_list_one(self, col): return [col]
def p_column_list_many(self, cols, col): cols.append(col); return cols
def p_column_name_n(self, name): return name
def p_row_list_one(self, row): return [row]
def p_row_list_many(self, rows, row): rows.append(row); return rows
def p_row_index_n(self, n): return n
Optimized = namedtuple('Optimized', ['backend'])
Quiet = namedtuple('Quiet', ['flag'])
Rows = namedtuple('Rows', ['rows'])
Skip = namedtuple('Skip', ['vars'])
Subproblem = namedtuple('Subproblem', ['subproblems'])
Variables = namedtuple('Variables', ['vars'])
|
'use strict';
/**
* @class dE00
* @classdesc
* The CIE2000 color difference algorithm.
* http://en.wikipedia.org/wiki/Color_difference#CIEDE2000
* @constructs dE00
* @memberOf DeltaE
* @property {object} x1 The LAB color configuration object.
* @property {number} x1.L The lightness value, on scale of 0-100.
* @property {number} x1.A The chroma value, on scale of -128 to 128.
* @property {number} x1.B The hue value, on scale of -128 to 128.
* @property {object} x2 The LAB color configuration object.
* @property {number} x2.L The lightness value, on scale of 0-100.
* @property {number} x2.A The chroma value, on scale of -128 to 128.
* @property {number} x2.B The hue value, on scale of -128 to 128.
* @property {object} weights The weights configuration object.
* @property {number} weights.lightness A weight factor to apply to lightness.
* @property {number} weights.chroma A weight factor to apply to chroma.
* @property {number} weights.hue A weight factor to apply to hue.
* @example
* var deltaE = new dE00(
* {L:50, A:50, B:50},
* {L:100, A:50, B:50},
* );
* console.log(deltaE.getDeltaE());
*/
function dE00(x1, x2, weights) {
var sqrt = Math.sqrt;
var pow = Math.pow;
x1.L = parseFloat(x1.L);
x1.A = parseFloat(x1.A);
x1.B = parseFloat(x1.B);
x2.L = parseFloat(x2.L);
x2.A = parseFloat(x2.A);
x2.B = parseFloat(x2.B);
this.x1 = x1;
this.x2 = x2;
this.weights = weights || {};
this.ksubL = this.weights.lightness || 1;
this.ksubC = this.weights.chroma || 1;
this.ksubH = this.weights.hue || 1;
// Delta L Prime
this.deltaLPrime = x2.L - x1.L;
// L Bar
this.LBar = (x1.L + x2.L) / 2;
// C1 & C2
this.C1 = sqrt(pow(x1.A, 2) + pow(x1.B, 2));
this.C2 = sqrt(pow(x2.A, 2) + pow(x2.B, 2));
// C Bar
this.CBar = (this.C1 + this.C2) / 2;
// A Prime 1
this.aPrime1 = x1.A +
(x1.A / 2) *
(1 - sqrt(
pow(this.CBar, 7) /
(pow(this.CBar, 7) + pow(25, 7))
));
// A Prime 2
this.aPrime2 = x2.A +
(x2.A / 2) *
(1 - sqrt(
pow(this.CBar, 7) /
(pow(this.CBar, 7) + pow(25, 7))
));
// C Prime 1
this.CPrime1 = sqrt(
pow(this.aPrime1, 2) +
pow(x1.B, 2)
);
// C Prime 2
this.CPrime2 = sqrt(
pow(this.aPrime2, 2) +
pow(x2.B, 2)
);
// C Bar Prime
this.CBarPrime = (this.CPrime1 + this.CPrime2) / 2;
// Delta C Prime
this.deltaCPrime = this.CPrime2 - this.CPrime1;
// S sub L
this.SsubL = 1 + (
(0.015 * pow(this.LBar - 50, 2)) /
sqrt(20 + pow(this.LBar - 50, 2))
);
// S sub C
this.SsubC = 1 + 0.045 * this.CBarPrime;
/**
* Properties set in getDeltaE method, for access to convenience functions
*/
// h Prime 1
this.hPrime1 = 0;
// h Prime 2
this.hPrime2 = 0;
// Delta h Prime
this.deltahPrime = 0;
// Delta H Prime
this.deltaHPrime = 0;
// H Bar Prime
this.HBarPrime = 0;
// T
this.T = 0;
// S sub H
this.SsubH = 0;
// R sub T
this.RsubT = 0;
}
/**
* Returns the deltaE value.
* @method
* @returns {number}
*/
dE00.prototype.getDeltaE = function() {
var sqrt = Math.sqrt;
var sin = Math.sin;
var pow = Math.pow;
// h Prime 1
this.hPrime1 = this.gethPrime1();
// h Prime 2
this.hPrime2 = this.gethPrime2();
// Delta h Prime
this.deltahPrime = this.getDeltahPrime();
// Delta H Prime
this.deltaHPrime = 2 * sqrt(this.CPrime1 * this.CPrime2) * sin(this.degreesToRadians(this.deltahPrime) / 2);
// H Bar Prime
this.HBarPrime = this.getHBarPrime();
// T
this.T = this.getT();
// S sub H
this.SsubH = 1 + 0.015 * this.CBarPrime * this.T;
// R sub T
this.RsubT = this.getRsubT();
// Put it all together!
var lightness = this.deltaLPrime / (this.ksubL * this.SsubL);
var chroma = this.deltaCPrime / (this.ksubC * this.SsubC);
var hue = this.deltaHPrime / (this.ksubH * this.SsubH);
return sqrt(
pow(lightness, 2) +
pow(chroma, 2) +
pow(hue, 2) +
this.RsubT * chroma * hue
);
};
/**
* Returns the RT variable calculation.
* @method
* @returns {number}
*/
dE00.prototype.getRsubT = function() {
var sin = Math.sin;
var sqrt = Math.sqrt;
var pow = Math.pow;
var exp = Math.exp;
return -2 *
sqrt(
pow(this.CBarPrime, 7) /
(pow(this.CBarPrime, 7) + pow(25, 7))
) *
sin(this.degreesToRadians(
60 *
exp(
-(
pow(
(this.HBarPrime - 275) / 25, 2
)
)
)
));
};
/**
* Returns the T variable calculation.
* @method
* @returns {number}
*/
dE00.prototype.getT = function() {
var cos = Math.cos;
return 1 -
0.17 * cos(this.degreesToRadians(this.HBarPrime - 30)) +
0.24 * cos(this.degreesToRadians(2 * this.HBarPrime)) +
0.32 * cos(this.degreesToRadians(3 * this.HBarPrime + 6)) -
0.20 * cos(this.degreesToRadians(4 * this.HBarPrime - 63));
};
/**
* Returns the H Bar Prime variable calculation.
* @method
* @returns {number}
*/
dE00.prototype.getHBarPrime= function() {
var abs = Math.abs;
if (abs(this.hPrime1 - this.hPrime2) > 180) {
return (this.hPrime1 + this.hPrime2 + 360) / 2
}
return (this.hPrime1 + this.hPrime2) / 2
};
/**
* Returns the Delta h Prime variable calculation.
* @method
* @returns {number}
*/
dE00.prototype.getDeltahPrime = function() {
var abs = Math.abs;
// When either C′1 or C′2 is zero, then Δh′ is irrelevant and may be set to
// zero.
if (0 === this.C1 || 0 === this.C2) {
return 0;
}
if (abs(this.hPrime1 - this.hPrime2) <= 180) {
return this.hPrime2 - this.hPrime1;
}
if (this.hPrime2 <= this.hPrime1) {
return this.hPrime2 - this.hPrime1 + 360;
} else {
return this.hPrime2 - this.hPrime1 - 360;
}
};
/**
* Returns the h Prime 1 variable calculation.
* @method
* @returns {number}
*/
dE00.prototype.gethPrime1 = function() {
return this._gethPrimeFn(this.x1.B, this.aPrime1);
};
/**
* Returns the h Prime 2 variable calculation.
* @method
* @returns {number}
*/
dE00.prototype.gethPrime2 = function() {
return this._gethPrimeFn(this.x2.B, this.aPrime2);
};
/**
* A helper function to calculate the h Prime 1 and h Prime 2 values.
* @method
* @private
* @returns {number}
*/
dE00.prototype._gethPrimeFn = function(x, y) {
var hueAngle;
if (x === 0 && y === 0) {
return 0;
}
hueAngle = this.radiansToDegrees(Math.atan2(x, y));
if (hueAngle >= 0) {
return hueAngle;
} else {
return hueAngle + 360;
}
};
/**
* Gives the radian equivalent of a specified degree angle.
* @method
* @returns {number}
*/
dE00.prototype.radiansToDegrees = function(radians) {
return radians * (180 / Math.PI);
};
/**
* Gives the degree equivalent of a specified radian.
* @method
* @returns {number}
*/
dE00.prototype.degreesToRadians = function(degrees) {
return degrees * (Math.PI / 180);
};
module.exports = dE00;
|
"use strict";
/*
* Copyright The OpenTelemetry Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports._globalThis = void 0;
/** only globals that common to node and browsers are allowed */
// eslint-disable-next-line node/no-unsupported-features/es-builtins
exports._globalThis = typeof globalThis === 'object' ? globalThis : global;
//# sourceMappingURL=globalThis.js.map
|
/**
******************************************************************************
* @file Audio/Audio_playback_and_record/Src/waveplayer.c
* @author MCD Application Team
* @brief This file provides the Audio Out (playback) interface API
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2017 STMicroelectronics International N.V.
* All rights reserved.</center></h2>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted, provided that the following conditions are met:
*
* 1. Redistribution of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of other
* contributors to this software may be used to endorse or promote products
* derived from this software without specific written permission.
* 4. This software, including modifications and/or derivative works of this
* software, must execute solely and exclusively on microcontroller or
* microprocessor devices manufactured by or for STMicroelectronics.
* 5. Redistribution and use of this software other than as permitted under
* this license is void and will automatically terminate your rights under
* this license.
*
* THIS SOFTWARE IS PROVIDED BY STMICROELECTRONICS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS, IMPLIED OR STATUTORY WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY
* RIGHTS ARE DISCLAIMED TO THE FULLEST EXTENT PERMITTED BY LAW. IN NO EVENT
* SHALL STMICROELECTRONICS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "waveplayer.h"
/* Private define ------------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private typedef -----------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
static AUDIO_OUT_BufferTypeDef BufferCtl;
static int16_t FilePos = 0;
static __IO uint32_t uwVolume = 70;
WAVE_FormatTypeDef WaveFormat;
FIL WavFile;
extern FILELIST_FileTypeDef FileList;
/* Private function prototypes -----------------------------------------------*/
static AUDIO_ErrorTypeDef GetFileInfo(uint16_t file_idx, WAVE_FormatTypeDef *info);
static uint8_t PlayerInit(uint32_t AudioFreq);
/* Private functions ---------------------------------------------------------*/
/**
* @brief Initializes Audio Interface.
* @param None
* @retval Audio error
*/
AUDIO_ErrorTypeDef AUDIO_PLAYER_Init(void)
{
if(BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_AUTO, uwVolume, I2S_AUDIOFREQ_48K) == 0)
{
return AUDIO_ERROR_NONE;
}
else
{
return AUDIO_ERROR_IO;
}
}
/**
* @brief Starts Audio streaming.
* @param idx: File index
* @retval Audio error
*/
AUDIO_ErrorTypeDef AUDIO_PLAYER_Start(uint8_t idx)
{
uint32_t bytesread;
f_close(&WavFile);
if(AUDIO_GetWavObjectNumber() > idx)
{
GetFileInfo(idx, &WaveFormat);
/*Adjust the Audio frequency */
PlayerInit(WaveFormat.SampleRate);
BufferCtl.state = BUFFER_OFFSET_NONE;
/* Get Data from USB Flash Disk */
f_lseek(&WavFile, 0);
/* Fill whole buffer at first time */
if(f_read(&WavFile,
&BufferCtl.buff[0],
AUDIO_OUT_BUFFER_SIZE,
(void *)&bytesread) == FR_OK)
{
AudioState = AUDIO_STATE_PLAY;
BSP_LCD_DisplayStringAt(250, LINE(14), (uint8_t *)" [PLAY ]", LEFT_MODE);
{
if(bytesread != 0)
{
BSP_AUDIO_OUT_Play((uint16_t*)&BufferCtl.buff[0], AUDIO_OUT_BUFFER_SIZE);
BufferCtl.fptr = bytesread;
return AUDIO_ERROR_NONE;
}
}
}
}
return AUDIO_ERROR_IO;
}
/**
* @brief Manages Audio process.
* @param None
* @retval Audio error
*/
AUDIO_ErrorTypeDef AUDIO_PLAYER_Process(void)
{
uint32_t bytesread, elapsed_time;
AUDIO_ErrorTypeDef audio_error = AUDIO_ERROR_NONE;
static uint32_t prev_elapsed_time = 0xFFFFFFFF;
uint8_t str[16];
switch(AudioState)
{
case AUDIO_STATE_PLAY:
if(BufferCtl.fptr >= WaveFormat.FileSize)
{
BSP_AUDIO_OUT_Stop(CODEC_PDWN_SW);
AudioState = AUDIO_STATE_NEXT;
}
if(BufferCtl.state == BUFFER_OFFSET_HALF)
{
if(f_read(&WavFile,
&BufferCtl.buff[0],
AUDIO_OUT_BUFFER_SIZE/2,
(void *)&bytesread) != FR_OK)
{
BSP_AUDIO_OUT_Stop(CODEC_PDWN_SW);
return AUDIO_ERROR_IO;
}
BufferCtl.state = BUFFER_OFFSET_NONE;
BufferCtl.fptr += bytesread;
}
if(BufferCtl.state == BUFFER_OFFSET_FULL)
{
if(f_read(&WavFile,
&BufferCtl.buff[AUDIO_OUT_BUFFER_SIZE /2],
AUDIO_OUT_BUFFER_SIZE/2,
(void *)&bytesread) != FR_OK)
{
BSP_AUDIO_OUT_Stop(CODEC_PDWN_SW);
return AUDIO_ERROR_IO;
}
BufferCtl.state = BUFFER_OFFSET_NONE;
BufferCtl.fptr += bytesread;
}
/* Display elapsed time */
elapsed_time = BufferCtl.fptr / WaveFormat.ByteRate;
if(prev_elapsed_time != elapsed_time)
{
prev_elapsed_time = elapsed_time;
sprintf((char *)str, "[%02d:%02d]", (int)(elapsed_time /60), (int)(elapsed_time%60));
BSP_LCD_SetTextColor(LCD_COLOR_CYAN);
BSP_LCD_DisplayStringAt(263, LINE(8), str, LEFT_MODE);
BSP_LCD_SetTextColor(LCD_COLOR_WHITE);
}
break;
case AUDIO_STATE_STOP:
BSP_AUDIO_OUT_Stop(CODEC_PDWN_SW);
AudioState = AUDIO_STATE_IDLE;
audio_error = AUDIO_ERROR_IO;
break;
case AUDIO_STATE_NEXT:
if(++FilePos >= AUDIO_GetWavObjectNumber())
{
FilePos = 0;
}
BSP_AUDIO_OUT_Stop(CODEC_PDWN_SW);
AUDIO_PLAYER_Start(FilePos);
break;
case AUDIO_STATE_PREVIOUS:
if(--FilePos < 0)
{
FilePos = AUDIO_GetWavObjectNumber() - 1;
}
BSP_AUDIO_OUT_Stop(CODEC_PDWN_SW);
AUDIO_PLAYER_Start(FilePos);
break;
case AUDIO_STATE_PAUSE:
BSP_LCD_ClearStringLine(LINE(14));
BSP_LCD_DisplayStringAt(250, LINE(14), (uint8_t *)" [PAUSE]", LEFT_MODE);
BSP_AUDIO_OUT_Pause();
AudioState = AUDIO_STATE_WAIT;
break;
case AUDIO_STATE_RESUME:
BSP_LCD_ClearStringLine(LINE(14));
BSP_LCD_DisplayStringAt(250, LINE(14), (uint8_t *)" [PLAY ]", LEFT_MODE);
BSP_AUDIO_OUT_Resume();
AudioState = AUDIO_STATE_PLAY;
break;
case AUDIO_STATE_VOLUME_UP:
if( uwVolume <= 90)
{
uwVolume += 10;
}
BSP_AUDIO_OUT_SetVolume(uwVolume);
AudioState = AUDIO_STATE_PLAY;
break;
case AUDIO_STATE_VOLUME_DOWN:
if( uwVolume >= 10)
{
uwVolume -= 10;
}
BSP_AUDIO_OUT_SetVolume(uwVolume);
AudioState = AUDIO_STATE_PLAY;
break;
case AUDIO_STATE_WAIT:
case AUDIO_STATE_IDLE:
case AUDIO_STATE_INIT:
default:
/* Do Nothing */
break;
}
return audio_error;
}
/**
* @brief Stops Audio streaming.
* @param None
* @retval Audio error
*/
AUDIO_ErrorTypeDef AUDIO_PLAYER_Stop(void)
{
AudioState = AUDIO_STATE_STOP;
FilePos = 0;
BSP_AUDIO_OUT_Stop(CODEC_PDWN_SW);
f_close(&WavFile);
return AUDIO_ERROR_NONE;
}
/**
* @brief Probes the play back joystick state.
* @param state: Joystick state
* @retval None
*/
void AUDIO_PlaybackProbeKey(JOYState_TypeDef state)
{
/* Handle File List Selection */
if(state == JOY_UP)
{
if(AudioState == AUDIO_STATE_PLAY)
{
AudioState = AUDIO_STATE_VOLUME_UP;
}
}
else if(state == JOY_DOWN)
{
if(AudioState == AUDIO_STATE_PLAY)
{
AudioState = AUDIO_STATE_VOLUME_DOWN;
}
}
else if(state == JOY_RIGHT)
{
AudioState = AUDIO_STATE_NEXT;
}
else if(state == JOY_LEFT)
{
AudioState = AUDIO_STATE_PREVIOUS;
}
else if(state == JOY_SEL)
{
AudioState = (AudioState == AUDIO_STATE_WAIT) ? AUDIO_STATE_RESUME : AUDIO_STATE_PAUSE;
}
}
/**
* @brief Calculates the remaining file size and new position of the pointer.
* @param None
* @retval None
*/
void BSP_AUDIO_OUT_TransferComplete_CallBack(void)
{
if(AudioState == AUDIO_STATE_PLAY)
{
BufferCtl.state = BUFFER_OFFSET_FULL;
BSP_AUDIO_OUT_ChangeBuffer((uint16_t*)&BufferCtl.buff[0], AUDIO_OUT_BUFFER_SIZE /2);
}
}
/**
* @brief Manages the DMA Half Transfer complete interrupt.
* @param None
* @retval None
*/
void BSP_AUDIO_OUT_HalfTransfer_CallBack(void)
{
if(AudioState == AUDIO_STATE_PLAY)
{
BufferCtl.state = BUFFER_OFFSET_HALF;
BSP_AUDIO_OUT_ChangeBuffer((uint16_t*)&BufferCtl.buff[AUDIO_OUT_BUFFER_SIZE /2], AUDIO_OUT_BUFFER_SIZE /2);
}
}
/*******************************************************************************
Static Functions
*******************************************************************************/
/**
* @brief Gets the file info.
* @param file_idx: File index
* @param info: Pointer to WAV file info
* @retval Audio error
*/
static AUDIO_ErrorTypeDef GetFileInfo(uint16_t file_idx, WAVE_FormatTypeDef *info)
{
uint32_t bytesread;
uint32_t duration;
uint8_t str[FILEMGR_FILE_NAME_SIZE + 20];
if(f_open(&WavFile, (char *)FileList.file[file_idx].name, FA_OPEN_EXISTING | FA_READ) == FR_OK)
{
/* Fill the buffer to Send */
if(f_read(&WavFile, info, sizeof(WaveFormat), (void *)&bytesread) == FR_OK)
{
BSP_LCD_SetTextColor(LCD_COLOR_WHITE);
sprintf((char *)str, "Playing file (%d/%d): %s",
file_idx + 1, FileList.ptr,
(char *)FileList.file[file_idx].name);
BSP_LCD_ClearStringLine(4);
BSP_LCD_DisplayStringAtLine(4, str);
BSP_LCD_SetTextColor(LCD_COLOR_CYAN);
sprintf((char *)str, "Sample rate : %d Hz", (int)(info->SampleRate));
BSP_LCD_ClearStringLine(6);
BSP_LCD_DisplayStringAtLine(6, str);
sprintf((char *)str, "Channels number : %d", info->NbrChannels);
BSP_LCD_ClearStringLine(7);
BSP_LCD_DisplayStringAtLine(7, str);
duration = info->FileSize / info->ByteRate;
sprintf((char *)str, "File Size : %d KB [%02d:%02d]", (int)(info->FileSize/1024), (int)(duration/60), (int)(duration%60));
BSP_LCD_ClearStringLine(8);
BSP_LCD_DisplayStringAtLine(8, str);
BSP_LCD_DisplayStringAt(263, LINE(8), (uint8_t *)"[00:00]", LEFT_MODE);
BSP_LCD_SetTextColor(LCD_COLOR_WHITE);
return AUDIO_ERROR_NONE;
}
f_close(&WavFile);
}
return AUDIO_ERROR_IO;
}
/**
* @brief Initializes the Wave player.
* @param AudioFreq: Audio sampling frequency
* @retval None
*/
static uint8_t PlayerInit(uint32_t AudioFreq)
{
/* Initialize the Audio codec and all related peripherals (I2S, I2C, IOExpander, IOs...) */
if(BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_BOTH, uwVolume, AudioFreq) != 0)
{
return 1;
}
else
{
BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
return 0;
}
}
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
from setuptools import setup
requires = [
'click==6.7',
'bucketstore==0.1.1'
]
setup(
name="s3env",
version="0.0.4",
author="Cameron Maske",
description="Manipulate a key/value JSON object file in an S3 bucket through the CLI",
author_email="cameronmaske@gmail.com",
url='https://github.com/cameronmaske/s3env',
py_modules=['s3env'],
license='MIT',
install_requires=requires,
entry_points='''
[console_scripts]
s3env=s3env:cli
''',
)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from nacl import encoding, public
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (ResourceNotFoundError, RequiredArgumentMissingError, ValidationError,
CLIInternalError, UnclassifiedUserFault, AzureResponseError,
ArgumentUsageError, MutuallyExclusiveArgumentError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import _normalize_sku, get_sku_name, retryable_method, raise_missing_token_suggestion
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH)
from ._github_oauth import (get_github_access_token)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise ResourceNotFoundError("WebApp'{}', is not found on RG '{}'.".format(name, resource_group_name))
webapp.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise ValidationError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise MutuallyExclusiveArgumentError('Usage error: --is-linux and --hyper-v cannot be used together.')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'get_backup_configuration', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
subnet_resource_id = _validate_subnet(cmd.cli_ctx, subnet, vnet, resource_group_name)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnet_id_parts = parse_resource_id(subnet_resource_id)
vnet_name = subnet_id_parts['name']
vnet_resource_group = subnet_id_parts['resource_group']
subnet_name = subnet_id_parts['child_name_1']
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
return_vnet = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_swift_virtual_network_connection', slot, swiftVnet)
# reformats the vnet entry, removing unnecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation.".format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
open_page_in_browser(scm_url + '/webssh/host')
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku.upper() in ['I1', 'I2', 'I3', 'I1V2', 'I2V2', 'I3V2']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(runtime_string, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
supports = False
for stack in stacks:
if stack['displayName'].lower() == runtime_string.lower():
if 'github_actions_properties' in stack and stack['github_actions_properties']:
supports = True
return supports
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if stack['displayName'].lower() == app_runtime.lower():
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if (stack['github_actions_properties']['app_runtime'].lower() == app_runtime.lower() and
stack['github_actions_properties']['app_runtime_version'].lower() ==
app_runtime_version.lower()):
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
'use strict'
module.exports = {
up: async (queryInterface, Sequelize) => {
await queryInterface.sequelize.query(`ALTER TABLE "public"."SessionTokens"
DROP CONSTRAINT "SessionTokens_cnodeUserUUID_fkey",
ADD CONSTRAINT "SessionTokens_cnodeUserUUID_fkey" FOREIGN KEY ("cnodeUserUUID")
REFERENCES "public"."CNodeUsers" ("cnodeUserUUID") ON DELETE RESTRICT ON UPDATE CASCADE;`)
},
down: (queryInterface, Sequelize) => {
/*
Add reverting commands here.
Return a promise to correctly handle asynchronicity.
Example:
return queryInterface.dropTable('users');
*/
}
}
|
export default class Async {
constructor() {
this.promises = [];
this.clientPrerender = false;
}
setClientPreRender() {
// Prevents anything that was server rendered from running again.
this.clientPrerender = true;
}
setClientPostRender() {
this.clientPrerender = false;
}
task(cb) {
if (this.clientPrerender) {
return new Promise((resolve) => resolve());
}
const p = new Promise(cb);
this.promises.push(p);
return p;
}
taskAll() {
return new Promise((resolve) => {
const recursiveAll = () => {
const oldLength = this.promises.length;
Promise.all(this.promises).then(() => {
if (oldLength === this.promises.length) resolve();
else recursiveAll();
});
};
recursiveAll();
});
}
}
|
var data = {
"body": "<path opacity=\".3\" d=\"M3 17h18V5H3v12zm5-9h11v2H8V8zm0 4h11v2H8v-2zM5 8h2v2H5V8zm0 4h2v2H5v-2z\" fill=\"currentColor\"/><path d=\"M8 12h11v2H8zm0-4h11v2H8zm13-5H3c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h5v2h8v-2h5c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 14H3V5h18v12zM5 12h2v2H5zm0-4h2v2H5z\" fill=\"currentColor\"/>",
"width": 24,
"height": 24
};
exports.__esModule = true;
exports.default = data;
|
import pytest
import torch
from torch import nn
import torch.nn.functional as F
from syft.generic.pointers.pointer_tensor import PointerTensor
from syft.exceptions import InvalidTensorForRemoteGet
import syft
def test___str__(workers):
bob = workers["bob"]
tensor = torch.Tensor([1, 2, 3, 4])
assert isinstance(tensor.__str__(), str)
tensor_ptr = tensor.send(bob)
assert isinstance(tensor_ptr.__str__(), str)
def test___repr__(workers):
bob = workers["bob"]
tensor = torch.Tensor([1, 2, 3, 4])
assert isinstance(tensor.__repr__(), str)
tensor_ptr = tensor.send(bob)
assert isinstance(tensor_ptr.__repr__(), str)
tensor = torch.Tensor([1, 2, 3, 4]).tag("#my_tag").describe("This is a description")
assert isinstance(tensor.__repr__(), str)
def test_overload_reshape():
tensor = torch.Tensor([1, 2, 3, 4])
tensor_reshaped = tensor.reshape((2, 2))
tensor_matrix = torch.Tensor([[1.0, 2.0], [3.0, 4.0]])
assert (tensor_reshaped == tensor_matrix).all()
def test_owner_default(hook):
tensor = torch.Tensor([1, 2, 3, 4, 5])
assert tensor.owner == hook.local_worker
def test_create_pointer(hook, workers):
bob = workers["bob"]
tensor = torch.Tensor([1, 2, 3, 4, 5])
ptr = tensor.create_pointer(
location=bob, id_at_location=1, register=False, owner=hook.local_worker, ptr_id=2
)
assert ptr.owner == hook.local_worker
assert ptr.location == bob
assert ptr.id_at_location == 1
assert ptr.id == 2
ptr2 = tensor.create_pointer(owner=hook.local_worker)
assert isinstance(ptr2.__str__(), str)
assert isinstance(ptr2.__repr__(), str)
def test_create_pointer_defaults(workers):
bob = workers["bob"]
tensor = torch.Tensor([1, 2, 3, 4, 5])
ptr = tensor.create_pointer(location=bob)
assert ptr.owner == tensor.owner
assert ptr.location == bob
def test_get(workers):
bob = workers["bob"]
tensor = torch.rand(5, 3)
pointer = tensor.send(bob)
assert type(pointer.child) == PointerTensor
assert (pointer.get() == tensor).all()
def test_invalid_remote_get(workers):
bob = workers["bob"]
tensor = torch.rand(5, 3)
pointer = tensor.send(bob)
with pytest.raises(InvalidTensorForRemoteGet):
pointer.remote_get()
def test_remote_get(hook, workers):
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
x = torch.tensor([1, 2, 3, 4, 5])
ptr_ptr_x = x.send(bob).send(alice)
assert ptr_ptr_x.owner == me
assert ptr_ptr_x.location == alice
assert x.id in bob._objects
assert len(bob._objects) == 1
assert len(alice._objects) == 1
ptr_ptr_x.remote_get()
assert len(bob._objects) == 0
assert len(alice._objects) == 1
def test_remote_send(hook, workers):
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
x = torch.tensor([1, 2, 3, 4, 5])
ptr_ptr_x = x.send(bob).remote_send(alice)
assert ptr_ptr_x.owner == me
assert ptr_ptr_x.location == bob
assert x.id in alice._objects
def test_copy():
tensor = torch.rand(5, 3)
coppied_tensor = tensor.copy()
assert (tensor == coppied_tensor).all()
assert tensor is not coppied_tensor
def test_size():
tensor = torch.rand(5, 3)
assert tensor.size() == torch.Size([5, 3])
assert tensor.size() == tensor.shape
assert tensor.size(0) == tensor.shape[0]
# Compare local dim with the remote one
def test_dim(workers):
tensor_local = torch.randn(5, 3)
tensor_remote = tensor_local.send(workers["alice"])
assert tensor_local.dim() == tensor_remote.dim()
def test_does_not_require_large_precision():
x = torch.tensor([[[-1.5, 2.0, 30000000000.0]], [[4.5, 5.0, 6.0]], [[7.0, 8.0, 9.0]]])
base = 10
prec_fractional = 3
max_precision = 62
assert not x._requires_large_precision(max_precision, base, prec_fractional)
def test_requires_large_precision():
x = torch.tensor([[[-1.5, 2.0, 30000000000.0]], [[4.5, 5.0, 6.0]], [[7.0, 8.0, 9.0]]])
base = 10
prec_fractional = 256
max_precision = 62
assert x._requires_large_precision(max_precision, base, prec_fractional)
def test_roll(workers):
x = torch.tensor([1.0, 2.0, 3, 4, 5])
expected = torch.roll(x, -1)
index = torch.tensor([-1.0])
result = torch.roll(x, index)
assert (result == expected).all()
def test_complex_model(workers):
hook = syft.TorchHook(torch)
bob = workers["bob"]
tensor_local = torch.rand(1, 1, 32, 32)
tensor_remote = tensor_local.send(bob)
## Instantiating a model with multiple layer types
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.avg_pool2d(out, 2)
out = out.view(out.shape[0], -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
model_net = Net()
model_net.send(bob)
## Forward on the remote model
pred = model_net(tensor_remote)
|
import { makeStyles } from '@material-ui/core/styles';
const useStyles = makeStyles(() => ({
root: {
display: 'flex',
flexDirection: 'column',
width: 465,
},
modal: {
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
outline: 0,
},
loader: {
display: 'flex',
justifyContent: 'center',
},
actions: {
display: 'flex',
justifyContent: 'flex-end',
},
}));
export default useStyles;
|
/*
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; under version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef NBD_H
#define NBD_H
#include <sys/types.h>
#include "qemu-common.h"
#include "qemu/option.h"
struct nbd_request {
uint32_t magic;
uint32_t type;
uint64_t handle;
uint64_t from;
uint32_t len;
} QEMU_PACKED;
struct nbd_reply {
uint32_t magic;
uint32_t error;
uint64_t handle;
} QEMU_PACKED;
#define NBD_FLAG_HAS_FLAGS (1 << 0) /* Flags are there */
#define NBD_FLAG_READ_ONLY (1 << 1) /* Device is read-only */
#define NBD_FLAG_SEND_FLUSH (1 << 2) /* Send FLUSH */
#define NBD_FLAG_SEND_FUA (1 << 3) /* Send FUA (Force Unit Access) */
#define NBD_FLAG_ROTATIONAL (1 << 4) /* Use elevator algorithm - rotational media */
#define NBD_FLAG_SEND_TRIM (1 << 5) /* Send TRIM (discard) */
/* New-style global flags. */
#define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
/* New-style client flags. */
#define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
/* Reply types. */
#define NBD_REP_ACK (1) /* Data sending finished. */
#define NBD_REP_SERVER (2) /* Export description. */
#define NBD_REP_ERR_UNSUP ((UINT32_C(1) << 31) | 1) /* Unknown option. */
#define NBD_REP_ERR_INVALID ((UINT32_C(1) << 31) | 3) /* Invalid length. */
#define NBD_CMD_MASK_COMMAND 0x0000ffff
#define NBD_CMD_FLAG_FUA (1 << 16)
enum {
NBD_CMD_READ = 0,
NBD_CMD_WRITE = 1,
NBD_CMD_DISC = 2,
NBD_CMD_FLUSH = 3,
NBD_CMD_TRIM = 4
};
#define NBD_DEFAULT_PORT 10809
/* Maximum size of a single READ/WRITE data buffer */
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
ssize_t nbd_wr_sync(int fd, void *buffer, size_t size, bool do_read);
int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
off_t *size, Error **errp);
int nbd_init(int fd, int csock, uint32_t flags, off_t size);
ssize_t nbd_send_request(int csock, struct nbd_request *request);
ssize_t nbd_receive_reply(int csock, struct nbd_reply *reply);
int nbd_client(int fd);
int nbd_disconnect(int fd);
typedef struct NBDExport NBDExport;
typedef struct NBDClient NBDClient;
NBDExport *nbd_export_new(BlockBackend *blk, off_t dev_offset, off_t size,
uint32_t nbdflags, void (*close)(NBDExport *),
Error **errp);
void nbd_export_close(NBDExport *exp);
void nbd_export_get(NBDExport *exp);
void nbd_export_put(NBDExport *exp);
BlockBackend *nbd_export_get_blockdev(NBDExport *exp);
NBDExport *nbd_export_find(const char *name);
void nbd_export_set_name(NBDExport *exp, const char *name);
void nbd_export_close_all(void);
NBDClient *nbd_client_new(NBDExport *exp, int csock,
void (*close)(NBDClient *));
void nbd_client_get(NBDClient *client);
void nbd_client_put(NBDClient *client);
#endif
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from tempest_lib import exceptions as lib_exc
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestServerStackLimits(base.BaseOrchestrationTest):
@test.idempotent_id('ec9bed71-c460-45c9-ab98-295caa9fd76b')
def test_exceed_max_template_size_fails(self):
stack_name = data_utils.rand_name('heat')
fill = 'A' * CONF.orchestration.max_template_size
template = '''
HeatTemplateFormatVersion: '2012-12-12'
Description: '%s'
Outputs:
Foo: bar''' % fill
ex = self.assertRaises(lib_exc.BadRequest, self.create_stack,
stack_name, template)
self.assertIn('Template exceeds maximum allowed size', str(ex))
@test.idempotent_id('d1b83e73-7cad-4a22-9839-036548c5387c')
def test_exceed_max_resources_per_stack(self):
stack_name = data_utils.rand_name('heat')
# Create a big template, one resource more than the limit
template = 'heat_template_version: \'2013-05-23\'\nresources:\n'
rsrc_snippet = ' random%s:\n type: \'OS::Heat::RandomString\'\n'
num_resources = CONF.orchestration.max_resources_per_stack + 1
for i in range(num_resources):
template += rsrc_snippet % i
ex = self.assertRaises(lib_exc.BadRequest, self.create_stack,
stack_name, template)
self.assertIn('Maximum resources per stack exceeded', str(ex))
|
"""
Quiver Homspace
"""
# ****************************************************************************
# Copyright (C) 2012 Jim Stark <jstarx@gmail.com>
# 2013 Simon King <simon.king@uni-jena.de>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details; the full text
# is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.categories.homset import Homset
from sage.quivers.morphism import QuiverRepHom
from sage.misc.cachefunc import cached_method
class QuiverHomSpace(Homset):
r"""
A homomorphism of quiver representations (of one and the same quiver)
is given by specifying, for each vertex of the quiver, a homomorphism
of the spaces assigned to this vertex such that these homomorphisms
commute with the edge maps. This class handles the set of all
such maps, `Hom_Q(M, N)`.
INPUT:
- ``domain`` -- the domain of the homomorphism space
- ``codomain`` -- the codomain of the homomorphism space
OUTPUT:
- :class:`QuiverHomSpace`, the homomorphism space
``Hom_Q(domain, codomain)``
.. NOTE::
The quivers of the domain and codomain must be equal or a
``ValueError`` is raised.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: H = Q.S(QQ, 2).Hom(Q.P(QQ, 1))
sage: H.dimension()
2
sage: H.gens()
[Homomorphism of representations of Multi-digraph on 2 vertices,
Homomorphism of representations of Multi-digraph on 2 vertices]
"""
Element = QuiverRepHom
###########################################################################
# #
# PRIVATE FUNCTIONS #
# These functions are not meant to be seen by the end user. #
# #
###########################################################################
def __init__(self, domain, codomain, category=None):
"""
Initialize ``self``. Type ``QuiverHomSpace?`` for more information.
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: H = Q.S(QQ, 2).Hom(Q.P(QQ, 1))
sage: TestSuite(H).run()
"""
# The data in the class is stored in the following private variables:
#
# * _base
# The base ring of the representations M and N.
# * _codomain
# The QuiverRep object of the codomain N.
# * _domain
# The QuiverRep object of the domain M.
# * _quiver
# The quiver of the representations M and N.
# * _space
# A free module with ambient space.
#
# The free module _space is the homomorphism space. The ambient space
# is k^n where k is the base ring and n is the sum of the dimensions of
# the spaces of homomorphisms between the free modules attached in M
# and N to the vertices of the quiver. Each coordinate represents a
# single entry in one of those matrices.
# Get the quiver and base ring and check that they are the same for
# both modules
if domain._semigroup != codomain._semigroup:
raise ValueError("representations are not over the same quiver")
self._quiver = domain._quiver
self._semigroup = domain._semigroup
# Check that the bases are compatible, and then initialise the homset:
if codomain.base_ring() != domain.base_ring():
raise ValueError("representations are not over the same base ring")
Homset.__init__(self, domain, codomain, category=category,
base=domain.base_ring())
# To compute the Hom Space we set up a 'generic' homomorphism where the
# maps at each vertex are described by matrices whose entries are
# variables. Then the commutativity of edge diagrams gives us a
# system of equations whose solution space is the Hom Space we're
# looking for. The variables will be numbered consecutively starting
# at 0, ordered first by the vertex the matrix occurs at, then by row
# then by column. We'll have to keep track of which variables
# correspond to which matrices.
# eqs will count the number of equations in our system of equations,
# varstart will be a list whose ith entry is the number of the
# variable located at (0, 0) in the matrix assigned to the
# ith vertex. (So varstart[0] will be 0.)
eqs = 0
verts = domain._quiver.vertices()
varstart = [0]*(len(verts) + 1)
# First assign to varstart the dimension of the matrix assigned to the
# previous vertex.
for v in verts:
varstart[verts.index(v) + 1] = domain._spaces[v].dimension()*codomain._spaces[v].dimension()
for e in domain._semigroup._sorted_edges:
eqs += domain._spaces[e[0]].dimension()*codomain._spaces[e[1]].dimension()
# After this cascading sum varstart[v] will be the sum of the
# dimensions of the matrices assigned to vertices ordered before v.
# This is equal to the number of the first variable assigned to v.
for i in range(2, len(varstart)):
varstart[i] += varstart[i-1]
# This will be the coefficient matrix for the system of equations. We
# start with all zeros and will fill in as we go. We think of this
# matrix as acting on the right so the columns correspond to equations,
# the rows correspond to variables, and .kernel() will give a right
# kernel as is needed.
from sage.matrix.constructor import Matrix
coef_mat = Matrix(codomain.base_ring(), varstart[-1], eqs)
# eqn keeps track of what equation we are on. If the maps X and Y are
# assigned to an edge e and A and B are the matrices of variables that
# describe the generic maps at the initial and final vertices of e
# then commutativity of the edge diagram is described by the equation
# AY = XB, or
#
# Sum_k A_ik*Y_kj - Sum_k X_ik*B_kj == 0 for all i and j.
#
# Below we loop through these values of i,j,k and write the
# coefficients of the equation above into the coefficient matrix.
eqn = 0
for e in domain._semigroup._sorted_edges:
X = domain._maps[e].matrix()
Y = codomain._maps[e].matrix()
for i in range(X.nrows()):
for j in range(Y.ncols()):
for k in range(Y.nrows()):
coef_mat[varstart[verts.index(e[0])] + i*Y.nrows() + k, eqn] = Y[k, j]
for k in range(X.ncols()):
coef_mat[varstart[verts.index(e[1])] + k*Y.ncols() + j, eqn] = -X[i, k]
eqn += 1
# Now we can create the hom space
self._space = coef_mat.kernel()
# Bind identity if domain = codomain
if domain is codomain:
self.identity = self._identity
@cached_method
def zero(self):
"""
Return the zero morphism.
.. NOTE::
It is needed to override the method inherited from
the category of modules, because it would create
a morphism that is of the wrong type and does not
comply with :class:`~sage.quivers.morphism.QuiverRepHom`.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: H = Q.S(QQ, 2).Hom(Q.P(QQ, 1))
sage: H.zero() + H.an_element() == H.an_element()
True
sage: isinstance(H.zero(), H.element_class)
True
"""
return self()
def _coerce_map_from_(self, other):
r"""
A coercion exists if and only if ``other``` is also a
QuiverHomSpace and there is a coercion from the domain of ``self``
to the domain of ``other`` and from the codomain of ``other`` to
the codomain of ``self```.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: S = Q.S(QQ, 1)
sage: H1 = P.Hom(S)
sage: H2 = (P/P.radical()).Hom(S)
sage: H1.coerce_map_from(H2) # indirect doctest
Coercion map:
From: Dimension 1 QuiverHomSpace
To: Dimension 1 QuiverHomSpace
"""
if not isinstance(other, QuiverHomSpace):
return False
if not other._domain.has_coerce_map_from(self._domain):
return False
if not self._codomain.has_coerce_map_from(other._codomain):
return False
return True
def __call__(self, *data, **kwds):
r"""
A homomorphism of quiver representations (of one and the same
quiver) is given by specifying, for each vertex of the quiver, a
homomorphism of the spaces assigned to this vertex such that these
homomorphisms commute with the edge maps. The domain and codomain
of the homomorphism are required to be representations over the
same quiver with the same base ring.
INPUT:
Usually, one would provide a single dict, list,
:class:`QuiverRepElement` or :class:`QuiverRepHom` as arguments.
The semantics is as follows:
- list: ``data`` can be a list of images for the generators of
the domain. "Generators" means the output of the ``gens()``
method. An error will be generated if the map so defined
is not equivariant with respect to the action of the quiver.
- dictionary: ``data`` can be a dictionary associating to each
vertex of the quiver either a homomorphism with domain and
codomain the spaces associated to this vertex in the domain
and codomain modules respectively, or a matrix defining such
a homomorphism, or an object that sage can construct such a
matrix from. Not all vertices must be specified, unspecified
vertices are assigned the zero map, and keys not corresponding
to vertices of the quiver are ignored. An error will be
generated if these maps do not commute with the edge maps of
the domain and codomain.
- :class:`QuiverRepElement`: if the domain is a
:class:`QuiverRep_with_path_basis` then ``data`` can be a single
:class:`QuiverRepElement` belonging to the codomain. The map
is then defined by sending each path, ``p``, in the basis
to ``data*p``. If ``data`` is not an element of the codomain or
the domain is not a :class:`QuiverRep_with_path_basis` then
an error will be generated.
- :class:`QuiverRepHom`: the input can also be a map `f : D \to C`
such that there is a coercion from the domain of ``self`` to ``D``
and from ``C`` to the codomain of ``self``. The composition
of these maps is the result.
If there additionally are keyword arguments or if a
:class:`QuiverRepHom` can not be created from the data, then the
default call method of :class:`~sage.categories.homset.Homset`
is called instead.
OUTPUT:
- :class:`QuiverRepHom`
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: H = S.Hom(M)
With no additional data this creates the zero map::
sage: f = H() # indirect doctest
sage: f.is_zero()
True
We must specify maps at the vertices to get a nonzero
homomorphism. Note that if the dimensions of the spaces assigned
to the domain and codomain of a vertex are equal then Sage will
construct the identity matrix from ``1``::
sage: maps2 = {2:[1, -1], 3:1}
sage: g = H(maps2) # indirect doctest
Here we create the same map by specifying images for the generators::
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: h = H([x, y]) # indirect doctest
sage: g == h
True
Here is an example of the same with a bigger identity matrix::
sage: spaces3 = {2: QQ^2, 3: QQ^2}
sage: maps3 = {(2, 3, 'c'): [[1, 0], [1, 0]]}
sage: S3 = Q.representation(QQ, spaces3, maps3)
sage: h3 = S3.Hom(M)({2: 1, 3: [[1], [0]]})
sage: h3.get_map(2)
Vector space morphism represented by the matrix:
[1 0]
[0 1]
Domain: Vector space of dimension 2 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
If the domain is a module of type :class:`QuiverRep_with_path_basis`
(for example, the indecomposable projectives) we can create maps by
specifying a single image::
sage: Proj = Q.P(GF(7), 3)
sage: Simp = Q.S(GF(7), 3)
sage: im = Simp({3: (1,)})
sage: H2 = Proj.Hom(Simp)
sage: H2(im).is_surjective() # indirect doctest
True
"""
if kwds or len(data) > 1:
return super(Homset, self).__call__(*data, **kwds)
if not data:
return self.natural_map()
data0 = data[0]
if data0 is None or data0 == 0:
data0 = {}
try:
return self.element_class(self._domain, self._codomain, data0)
except (TypeError, ValueError):
return super(QuiverHomSpace, self).__call__(*data, **kwds)
def _repr_(self):
"""
Default string representation.
TESTS::
sage: Q = DiGraph({1:{2:['a']}}).path_semigroup()
sage: Q.P(GF(3), 2).Hom(Q.S(GF(3), 2)) # indirect doctest
Dimension 1 QuiverHomSpace
"""
return "Dimension {} QuiverHomSpace".format(self._space.dimension())
def natural_map(self):
"""
The natural map from domain to codomain.
This is the zero map.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: S.hom(M) # indirect doctest
Homomorphism of representations of Multi-digraph on 3 vertices
sage: S.hom(M) == S.Hom(M).natural_map()
True
"""
return self.element_class(self._domain, self._codomain, {})
def _identity(self):
"""
Return the identity map.
OUTPUT:
- :class:`QuiverRepHom`
EXAMPLES::
sage: Q = DiGraph({1:{2:['a']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: H = P.Hom(P)
sage: f = H.identity() # indirect doctest
sage: f.is_isomorphism()
True
"""
from sage.matrix.constructor import Matrix
maps = {v: Matrix(self._domain._spaces[v].dimension(),
self._domain._spaces[v].dimension(),
self._base.one())
for v in self._quiver}
return self.element_class(self._domain, self._codomain, maps)
###########################################################################
# #
# ACCESS FUNCTIONS #
# These functions are used to view and modify the representation data. #
# #
###########################################################################
def base_ring(self):
"""
Return the base ring of the representations.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: H = Q.S(QQ, 2).Hom(Q.P(QQ, 1))
sage: H.base_ring()
Rational Field
"""
return self._base
def quiver(self):
"""
Return the quiver of the representations.
OUTPUT:
- :class:`DiGraph`, the quiver of the representations
EXAMPLES::
sage: P = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: H = P.S(QQ, 2).Hom(P.P(QQ, 1))
sage: H.quiver() is P.quiver()
True
"""
return self._quiver
def domain(self):
"""
Return the domain of the hom space.
OUTPUT:
- :class:`QuiverRep`, the domain of the Hom space
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: S = Q.S(QQ, 2)
sage: H = S.Hom(Q.P(QQ, 1))
sage: H.domain() is S
True
"""
return self._domain
def codomain(self):
"""
Return the codomain of the hom space.
OUTPUT:
- :class:`QuiverRep`, the codomain of the Hom space
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: H = Q.S(QQ, 2).Hom(P)
sage: H.codomain() is P
True
"""
return self._codomain
###########################################################################
# #
# DATA FUNCTIONS #
# These functions return data collected from the representation. #
# #
###########################################################################
def dimension(self):
"""
Return the dimension of the hom space.
OUTPUT:
- integer, the dimension
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: H = Q.S(QQ, 2).Hom(Q.P(QQ, 1))
sage: H.dimension()
2
"""
return self._space.dimension()
def gens(self):
"""
Return a list of generators of the hom space (as a `k`-vector
space).
OUTPUT:
- list of :class:`QuiverRepHom` objects, the generators
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: H = Q.S(QQ, 2).Hom(Q.P(QQ, 1))
sage: H.gens()
[Homomorphism of representations of Multi-digraph on 2 vertices,
Homomorphism of representations of Multi-digraph on 2 vertices]
"""
return [self.element_class(self._domain, self._codomain, f)
for f in self._space.gens()]
def coordinates(self, hom):
"""
Return the coordinates of the map when expressed in terms of the
generators (i. e., the output of the ``gens`` method) of the
hom space.
INPUT:
- ``hom`` -- :class:`QuiverRepHom`
OUTPUT:
- list, the coordinates of the given map when written in terms of the
generators of the :class:`QuiverHomSpace`
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: S = Q.S(QQ, 2)
sage: P = Q.P(QQ, 1)
sage: H = S.Hom(P)
sage: f = S.hom({2: [[1,-1]]}, P)
sage: H.coordinates(f)
[1, -1]
"""
# Use the coordinates function on space
return self._space.coordinates(hom._vector)
###########################################################################
# #
# CONSTRUCTION FUNCTIONS #
# These functions create and return modules and homomorphisms. #
# #
###########################################################################
def _an_element_(self):
"""
Return a homomorphism in the Hom space.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: S = Q.S(QQ, 2)
sage: P = Q.P(QQ, 1)
sage: H = S.Hom(P)
sage: H.an_element() in H # indirect doctest
True
"""
return self.element_class(self._domain, self._codomain, self._space.an_element())
def left_module(self, basis=False):
"""
Create the QuiverRep of ``self`` as a module over the opposite
quiver.
INPUT:
- ``basis`` - bool. If ``False``, then only the module is
returned. If ``True``, then a tuple is returned. The first
element is the QuiverRep and the second element is a
dictionary which associates to each vertex a list. The
elements of this list are the homomorphisms which correspond to
the basis elements of that vertex in the module.
OUTPUT:
- :class:`QuiverRep` or tuple
.. WARNING::
The codomain of the Hom space must be a left module.
.. NOTE::
The left action of a path `e` on a map `f` is given by
`(ef)(m) = ef(m)`. This gives the Hom space its structure as
a left module over the path algebra. This is then converted to
a right module over the path algebra of the opposite quiver
``Q.reverse()`` and returned.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b'], 3: ['c', 'd']}, 2:{3:['e']}}).path_semigroup()
sage: P = Q.P(GF(3), 3)
sage: A = Q.free_module(GF(3))
sage: H = P.Hom(A)
sage: H.dimension()
6
sage: M, basis_dict = H.left_module(true)
sage: M.dimension_vector()
(4, 1, 1)
sage: Q.reverse().P(GF(3), 3).dimension_vector()
(4, 1, 1)
As lists start indexing at 0 the `i`-th vertex corresponds to the
`(i-1)`-th entry of the dimension vector::
sage: len(basis_dict[2]) == M.dimension_vector()[1]
True
"""
from sage.quivers.representation import QuiverRep
if not self._codomain.is_left_module():
raise ValueError("the codomain must be a left module")
# Create the spaces
spaces = {}
for v in self._quiver:
im_gens = [self([self._codomain.left_edge_action((v, v), f(x))
for x in self._domain.gens()])._vector
for f in self.gens()]
spaces[v] = self._space.submodule(im_gens)
# Create the maps
maps = {}
for e in self._semigroup._sorted_edges:
e_op = (e[1], e[0], e[2])
maps[e_op] = []
for vec in spaces[e[1]].gens():
vec_im = spaces[e_op[1]].coordinate_vector(self([self._codomain.left_edge_action(e, self(vec)(x))
for x in self._domain.gens()])._vector)
maps[e_op].append(vec_im)
# Create and return the module (and the dict if desired)
if basis:
basis_dict = {}
for v in self._quiver:
basis_dict[v] = [self.element_class(self._domain, self._codomain, vec)
for vec in spaces[v].gens()]
return (QuiverRep(self._base, self._semigroup.reverse(), spaces, maps), basis_dict)
else:
return QuiverRep(self._base, self._semigroup.reverse(), spaces, maps)
|
import configuration.settings
import requests
api_base_url = configuration.settings.SUPERJOB_API_BASE_URL
api_access_token = configuration.settings.SUPERJOB_API_ACCESS_TOKEN
def fetch_vacancies(keyword, town=None, catalogues=None, currency=None, period=0, page=0, count=100):
"""Fetch vacancies from SuperJob API, return in json.
Args:
keyword (str):
search the specific vacancies by keyword
town (int, optional):
search vacancies by area. Defaults to None.
Possible ids: https://api.superjob.ru/2.0/towns/
catalogues (int, optional):
search vacancies by profession. Defaults to None.
Possible ids: https://api.superjob.ru/2.0/catalogues/
currency (str, optional):
search vacancies by currency.
Possible values:
- rub — russian ruble
- uah — ukrainian hryvnia
- uzs — uzbekistan som
Defaults to None.
period (int, optional):
search vacancies for given period.
Possible values are
- 1 — for last 1 day
- 3 — for last 3 days
- 7 — for last week
- 0 — for all time
Defaults to 0.
page (int, optional):
page number of the response. Defaults to 0.
count (int, optional):
number of vacancies per page. Defaults to 100.
The args correspondent the fields of the vacancy described
in the api documentation https://api.superjob.ru/#vacancy
Returns:
Response (json): page with the vacancies in json
"""
url = f'{api_base_url}/vacancies'
url_header = {
'X-Api-App-Id': api_access_token
}
url_params = {
'keyword': keyword,
'town': town,
'catalogues': catalogues,
'currency': currency,
'period': period,
'page': page,
'count': count
}
response = requests.get(url, url_params, headers=url_header)
response.raise_for_status()
return response.json()
|
import numpy as np
import cv2 as cv
def agregar_imagen(fondo, imagen, x, y):
filtro_x = -x if x < 0 else 0
filtro_y = -y if y < 0 else 0
x = 0 if x < 0 else x
y = 0 if y < 0 else y
if len(imagen.shape) > 2:
alto, ancho, cantidad_canales = imagen.shape
else:
alto, ancho = imagen.shape
cantidad_canales = 2
fondo_alto, fondo_ancho, _ = fondo.shape
filtro_ancho = fondo_ancho - x if x + ancho > fondo_ancho else ancho
filtro_alto = fondo_alto - y if y + alto > fondo_alto else alto
if cantidad_canales > 2:
imagen_cortada = imagen[filtro_y:filtro_y+filtro_alto,
filtro_x:filtro_x+filtro_ancho, :]
else:
imagen_cortada = imagen[filtro_y:filtro_y+filtro_alto,
filtro_x:filtro_x+filtro_ancho]
alto, ancho = imagen_cortada.shape[0], imagen_cortada.shape[1]
# verificar si la imagen tiene informacion de opacidad
if cantidad_canales == 4:
# normalizar la opacidad
opacidad = imagen_cortada[:,:,3]/255
# alpha blending
# generar una imagen vacia
imagen_3_canales = np.zeros((imagen_cortada.shape[0],
imagen_cortada.shape[1], 3))
# a cada canal multiplicarle la opacidad
imagen_3_canales[:,:,0] = imagen_cortada[:,:,0] * opacidad
imagen_3_canales[:,:,1] = imagen_cortada[:,:,1] * opacidad
imagen_3_canales[:,:,2] = imagen_cortada[:,:,2] * opacidad
# a la imagen de fondo, se le suma
# la imagen con informacion de opacidad
opacidad_3_canales = np.stack([opacidad, opacidad, opacidad],
axis=-1)
fondo[y:y+alto, x:x+ancho, :] = ((1 - opacidad_3_canales) *
fondo[y:y+alto, x:x+ancho, :] +
imagen_3_canales)
elif cantidad_canales == 3:
# reemplazamos la informacion del fondo
fondo[y:y+alto, x:x+ancho, :] = imagen_cortada
else: # asumir que la cantidad de canales es 1
imagen_3_canales = cv.cvtColor(imagen_cortada, cv.COLOR_GRAY2RGB)
fondo[y:y+alto, x:x+ancho, :] = imagen_3_canales
def calcular_margenes(x,y,w,h,MARGEN_X, MARGEN_Y):
pad_w = int(w * MARGEN_X)
pad_h = int(h * MARGEN_Y)
x_pad = x - pad_w
y_pad = y - pad_h
w_pad = w + 2*pad_w
h_pad = h + 2*pad_h
return (x_pad, y_pad, w_pad, h_pad)
# sacado de imutils
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv.warpAffine(image, M, (nW, nH)), M
|
import unittest
import solver
class TestSolution(unittest.TestCase):
def test_solve(self):
self.assertEqual(solver.solve([2, 3, 4]), 12)
self.assertEqual(solver.solve([5, 15]), 15)
self.assertEqual(solver.solve([3, 9, 10]), 90)
self.assertEqual(solver.solve(range(1, 4)), 6)
|
# -*- coding:utf-8 -*-
'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from utils import progress_bar
import matplotlib.pyplot as plt
import numpy as np
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
# 定义是否使用GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 超参数设置
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
train_loss_everyepoch = 0
train_accuracy_everyepoch = 0
test_loss_everyepoch = 0
test_accuracy_everyepoch = 0
# Data
print('==> Preparing data..')
# torchvision.transforms是pytorch中的图像预处理包 一般用Compose把多个步骤整合到一起:
transform_train = transforms.Compose([ # 通过compose将各个变换串联起来
transforms.RandomCrop(32, padding=4), # 先四周填充0,再把图像随机裁剪成32*32
transforms.RandomHorizontalFlip(), # 以0.5的概率水平翻转给定的PIL图像
# transforms.RandomAffine(5.0), # python2.7 没有
# transforms.RandomGrayscale(p=0.1), # 依概率 p 将图片转换为灰度图
# transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.4, hue=0.4),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #R,G,B每层的归一化用到的均值和方差
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
## if you cannot run the program because of "OUT OF MEMORY", you can decrease the batch_size properly.
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
# DataLoader接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入按照batch size封装成Tensor
# 生成一个个batch进行批训练,组成batch的时候顺序打乱取
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
print('SAMPLES:', len(trainset), len(testset))
print('EPOCH:', len(trainloader), len(testloader))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # 类型
# Model
print('==> Building model..')
# 定义卷积神经网络
class DaiNet3(nn.Module): # 我们定义网络时一般是继承的torch.nn.Module创建新的子类
def __init__(self):
super(DaiNet3, self).__init__() # 第二、三行都是python类继承的基本操作,此写法应该是python2.7的继承格式,但python3里写这个好像也可以
self.layer1 = nn.Sequential(
nn.Conv2d(3, 12, kernel_size = 5, padding=2),
nn.Dropout(0.5),
nn.BatchNorm2d(12),
nn.ReLU(),
nn.MaxPool2d(2, 2))
self.layer2 = nn.Sequential(
nn.Conv2d(12, 24, kernel_size=3, padding=1),
nn.BatchNorm2d(24),
nn.ReLU(),
# nn.Dropout(0.5),
nn.MaxPool2d(2, 2))
self.layer3 = nn.Sequential(
nn.Conv2d(24, 12, kernel_size=3, padding=1),
nn.BatchNorm2d(12),
nn.ReLU(),
## nn.Dropout(0.5),
nn.MaxPool2d(2, 2))
self.fc1 = nn.Linear(12 * 4 * 4, 128) # 接着三个全连接层 Linear(in_features, out_features, bias=True)
self.fc2 = nn.Linear(128, 84) # 输入样本的大小 输出样本的大小 若设置为False这层不学习偏置 默认True
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# fully connect
x = x.view(-1, 12 * 4 * 4) # .view( )是一个tensor的方法,使得tensor改变size但是元素的总数是不变的。
# 第一个参数-1是说这个参数由另一个参数确定, 比如矩阵在元素总数一定的情况下,确定列数就能确定行数。
# 那么为什么这里只关心列数不关心行数呢,因为马上就要进入全连接层了,而全连接层说白了就是矩阵乘法,
# 你会发现第一个全连接层的首参数是16*5*5,所以要保证能够相乘,在矩阵乘法之前就要把x调到正确的size
# 更多的Tensor方法参考Tensor: http://pytorch.org/docs/0.3.0/tensors.html
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = DaiNet3()
## use model
# net = VGG('VGG19')
# net = ResNet18()
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
## use pretrained model
# import torchvision.models as models
# # net = models.resnet50(pretrained=True)
# net = models.vgg16(pretrained=True)
# net.fc = nn.Linear(2048, 10)
netname = 'DaiNet3'
writer_train = SummaryWriter(comment='DaiNet3_train') # 提供一个 comment 参数,将使用 runs/日期时间-comment 路径来保存日志
writer_test = SummaryWriter(comment='DaiNet3_test')
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load(os.path.join('./checkpoint', netname, 'ckpt.t7'))
#checkpoint = torch.load('./checkpoint/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss() # 损失函数为交叉熵,多用于多分类问题
# 优化方式为mini-batch momentum-SGD SGD梯度优化方式---随机梯度下降 ,并采用L2正则化(权重衰减)
#optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizer = optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=5e-4)
# Training 训练网络
def train(epoch):
print('\nTrain Epoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad() # 梯度清零 zero the parameter gradients
outputs = net(inputs) # forward
loss = criterion(outputs, targets) # loss 计算损失值,criterion我们在第三步里面定义了
loss.backward() # 执行反向传播 backward 就是在实现反向传播,自动计算所有的梯度
optimizer.step() # 更新参数 update weights 当执行反向传播之后,把优化器的参数进行更新,以便进行下一轮
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
train_loss_everyepoch = train_loss/(batch_idx+1)
train_accuracy_everyepoch = 100.*correct/total
return (train_loss_everyepoch,train_accuracy_everyepoch)
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
test_error_everyepoch = test_loss/(batch_idx+1)
test_accuracy_everyepoch = 100.*correct/total
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
if not os.path.isdir('checkpoint/'+netname):
os.mkdir('checkpoint/'+netname)
torch.save(state, './checkpoint/'+netname + '/ckpt.t7')
best_acc = acc
return (test_error_everyepoch,test_accuracy_everyepoch)
# 训练网络
for epoch in range(start_epoch, start_epoch+2000):
(train_loss_everyepoch,train_accuracy_everyepoch) = train(epoch)
# if (epoch+1)%5 == 0: # 每5次epoch测试一次
(test_loss_everyepoch,test_accuracy_everyepoch) = test(epoch)
writer_train.add_scalar('loss', train_loss_everyepoch, global_step= epoch)
writer_train.add_scalar('accuracy', train_accuracy_everyepoch, global_step= epoch)
writer_test.add_scalar('loss', test_loss_everyepoch, global_step= epoch)
writer_test.add_scalar('accuracy', test_accuracy_everyepoch, global_step= epoch)
|
module.exports = {
extends: ["stylelint-config-standard"],
rules: {
"no-descending-specificity": null,
indentation: null,
},
}
|
/* mbed Microcontroller Library
*******************************************************************************
* Copyright (c) 2016, STMicroelectronics
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
*/
#ifndef MBED_PIN_DEVICE_H
#define MBED_PIN_DEVICE_H
#include "cmsis_gcc.h"
#ifdef STM32F4
#include "stm32f4xx_ll_gpio.h"
#elif defined(STM32F1)
#include "stm32f1xx_ll_gpio.h"
#else
// eg F1 is different, but F0 the same
#error "TODO"
#endif
#include "mbed_pinmap.h"
extern const uint32_t ll_pin_defines[16];
/* Family specific implementations */
static inline void stm_pin_DisconnectDebug(uint16_t pin)
{
#ifdef STM32F1
// Enable AFIO clock
__HAL_RCC_AFIO_CLK_ENABLE();
// Disconnect JTAG-DP + SW-DP signals.
// Warning: Need to reconnect under reset
if ((pin == PA_13) || (pin == PA_14))
{
__HAL_AFIO_REMAP_SWJ_DISABLE(); // JTAG-DP Disabled and SW-DP Disabled
}
if ((pin == PA_15) || (pin == PB_3) || (pin == PB_4))
{
__HAL_AFIO_REMAP_SWJ_NOJTAG(); // JTAG-DP Disabled and SW-DP enabled
}
#endif
}
static inline void stm_pin_PullConfig(GPIO_TypeDef *gpio, uint32_t ll_pin, uint32_t pull_config)
{
#ifdef STM32F1
uint32_t function = LL_GPIO_GetPinMode(gpio, ll_pin);
switch (pull_config)
{
case GPIO_PULLUP:
if (function == LL_GPIO_MODE_FLOATING)
LL_GPIO_SetPinMode(gpio, ll_pin, LL_GPIO_MODE_INPUT);
LL_GPIO_SetPinPull(gpio, ll_pin, LL_GPIO_PULL_UP);
break;
case GPIO_PULLDOWN:
if (function == LL_GPIO_MODE_FLOATING)
LL_GPIO_SetPinMode(gpio, ll_pin, LL_GPIO_MODE_INPUT);
LL_GPIO_SetPinPull(gpio, ll_pin, LL_GPIO_PULL_DOWN);
break;
default:
/* Input+NoPull = Floating for F1 family */
if (function == LL_GPIO_MODE_INPUT)
LL_GPIO_SetPinMode(gpio, ll_pin, LL_GPIO_MODE_FLOATING);
break;
}
#else
switch (pull_config)
{
case GPIO_PULLUP:
LL_GPIO_SetPinPull(gpio, ll_pin, LL_GPIO_PULL_UP);
break;
case GPIO_PULLDOWN:
LL_GPIO_SetPinPull(gpio, ll_pin, LL_GPIO_PULL_DOWN);
break;
default:
LL_GPIO_SetPinPull(gpio, ll_pin, LL_GPIO_PULL_NO);
break;
}
#endif
}
static inline void stm_pin_SetAFPin(GPIO_TypeDef *gpio, uint16_t pin, uint32_t afnum)
{
#ifdef STM32F1
// Enable AFIO clock
__HAL_RCC_AFIO_CLK_ENABLE();
if (afnum > 0)
{
switch (afnum)
{
case 1: // Remap SPI1
__HAL_AFIO_REMAP_SPI1_ENABLE();
break;
case 2: // Remap I2C1
__HAL_AFIO_REMAP_I2C1_ENABLE();
break;
case 3: // Remap USART1
__HAL_AFIO_REMAP_USART1_ENABLE();
break;
case 4: // Remap USART2
__HAL_AFIO_REMAP_USART2_ENABLE();
break;
case 5: // Partial Remap USART3
__HAL_AFIO_REMAP_USART3_PARTIAL();
break;
case 6: // Partial Remap TIM1
__HAL_AFIO_REMAP_TIM1_PARTIAL();
break;
case 7: // Partial Remap TIM3
__HAL_AFIO_REMAP_TIM3_PARTIAL();
break;
case 8: // Full Remap TIM2
__HAL_AFIO_REMAP_TIM2_ENABLE();
break;
case 9: // Full Remap TIM3
__HAL_AFIO_REMAP_TIM3_ENABLE();
break;
#if defined(AFIO_MAPR_CAN_REMAP_REMAP1)
case 10: // CAN_RX mapped to PB8, CAN_TX mapped to PB9
__HAL_AFIO_REMAP_CAN1_2();
break;
#endif
default:
break;
}
}
#else
uint32_t ll_pin = ll_pin_defines[STM_PIN(pin)];
if (STM_PIN(pin) > 7)
LL_GPIO_SetAFPin_8_15(gpio, ll_pin, afnum);
else
LL_GPIO_SetAFPin_0_7(gpio, ll_pin, afnum);
#endif
}
#endif
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
import math
import multiprocessing
import platform
import queue
import random
import threading
import time
from typing import Callable
import numpy as np
from ..logger import get_logger
from ..random.rng import _random_seed_generator
from .collator import Collator
from .dataset import Dataset, StreamDataset
from .sampler import MapSampler, Sampler, SequentialSampler, StreamSampler
from .transform import PseudoTransform, Transform
try:
import thread
except:
import _thread as thread
logger = get_logger(__name__)
GLOBAL_TIMEOUT = 5
def raise_timeout_error():
raise RuntimeError("dataloader timeout")
class DataLoader:
__initialized = False
def __init__(
self,
dataset: Dataset,
sampler: Sampler = None,
transform: Transform = None,
collator: Collator = None,
num_workers: int = 0,
timeout: int = 0,
timeout_event: Callable = raise_timeout_error,
divide: bool = False,
):
r"""
Provides a convenient way to iterate on a given dataset.
`DataLoader` combines a dataset with `sampler`, `transform` and `collator`,
make it flexible to get minibatch continually from a dataset.
:type dataset: Dataset
:param dataset: dataset from which to load the minibatch.
:type sampler: Sampler
:param sampler: defines the strategy to sample data from the dataset.
:type transform: Transform
:param transform: defined the transforming strategy for a sampled batch.
Default: None
:type collator: Collator
:param collator: defined the merging strategy for a transformed batch.
Default: None
:type num_workers: int
:param num_workers: the number of sub-process to load, transform and collate
the batch. ``0`` means using single-process. Default: 0
:type timeout: int
:param timeout: if positive, means the timeout value(second) for collecting a
batch from workers. Default: 0
:type timeout_event: Callable
:param timeout_event: callback function triggered by timeout, default to raise
runtime error.
:type divide: bool
:param divide: define the paralleling strategy in multi-processing mode.
``True`` means one batch is divided into :attr:`num_workers` pieces, and
the workers will process these pieces parallelly. ``False`` means
different sub-process will process different batch. Default: False
"""
if num_workers < 0:
raise ValueError("num_workers should not be negative")
if timeout < 0:
raise ValueError("timeout should not be negative")
if divide and num_workers <= 1:
raise ValueError("divide should not be set to True when num_workers <= 1")
self.dataset = dataset
self.num_workers = num_workers
self.timeout = timeout
self.timeout_event = timeout_event
self.divide = divide
if isinstance(dataset, StreamDataset):
self.sampler = sampler if sampler else StreamSampler(batch_size=1)
assert isinstance(
self.sampler, StreamSampler
), "types of dataset and sampler do not match"
else:
assert isinstance(
dataset, Dataset
), "Can not recognize this kind of dataset: %s" % type(dataset)
self.sampler = (
sampler
if sampler
else SequentialSampler(dataset, batch_size=1, drop_last=False)
)
assert isinstance(
self.sampler, MapSampler
), "types of dataset and sampler do not match"
if divide:
if self.sampler.batch_size <= self.num_workers:
raise ValueError(
"batch size must not smaller than num_workers in divide mode."
)
elif self.sampler.batch_size % self.num_workers:
logger.warning(
"batch size is not divisible by num_workers, may lose performance in divide mode."
)
if transform is None:
self.transform = PseudoTransform()
else:
self.transform = transform
if collator is None:
self.collator = Collator()
else:
self.collator = collator
self.__initialized = True
def __iter__(self):
if platform.system() == "Windows" and self.num_workers > 0:
print(
"pyarrow.plasma does not support ParallelDataLoader on windows, changing num_workers to be zero"
)
self.num_workers = 0
if isinstance(self.dataset, StreamDataset):
if not self.num_workers:
return _SerialStreamDataLoaderIter(self)
else:
return _ParallelStreamDataLoaderIter(self)
else:
assert isinstance(
self.dataset, Dataset
), "Can not recognize this kind of dataset: %s" % type(self.dataset)
if not self.num_workers:
return _SerialMapDataLoaderIter(self)
else:
return _ParallelMapDataLoaderIter(self)
def __len__(self):
return len(self.sampler)
class _BaseMapDataLoaderIter:
def __init__(self, loader):
self.dataset = loader.dataset
self.sampler = loader.sampler
self.seed = _random_seed_generator().__next__()
self.transform = loader.transform
self.collator = loader.collator
self.num_workers = loader.num_workers
self.timeout = loader.timeout
self.timeout_event = loader.timeout_event
self.divide = loader.divide
self.num_processed = 0
def _get_next_batch(self):
raise NotImplementedError
def __len__(self):
return len(self.sampler)
def __iter__(self):
return self
def __next__(self):
if self.num_processed >= len(self):
raise StopIteration
minibatch = self._get_next_batch()
self.num_processed += 1
return minibatch
class _SerialMapDataLoaderIter(_BaseMapDataLoaderIter):
def __init__(self, loader):
super(_SerialMapDataLoaderIter, self).__init__(loader)
self.indices_iter = iter(self.sampler)
def _get_next_batch(self):
indices = next(self.indices_iter)
items = [self.dataset[idx] for idx in indices]
trans_items = self.transform.apply_batch(items)
return self.collator.apply(trans_items)
class _ParallelMapDataLoaderIter(_BaseMapDataLoaderIter):
__initialized = False
def __init__(self, loader):
super(_ParallelMapDataLoaderIter, self).__init__(loader)
self.task_queues = [
multiprocessing.Queue(maxsize=2) for _ in range(self.num_workers)
]
self.feed_batch_idx = multiprocessing.Value("i", 0)
self.target_batch_idx = multiprocessing.Value("i", 0)
self.shutdown_flag = multiprocessing.Value("i", 0)
self.trans_data_queues = [
multiprocessing.Queue(maxsize=1) for _ in range(self.num_workers)
]
# use shared-memory queue implemented by pyarrow plasma store.
from ._queue import PlasmaShmQueue
self.batch_queue = PlasmaShmQueue(maxsize=2)
self.task_feeding_worker = multiprocessing.Process(
target=_task_feeding_loop,
args=(
iter(self.sampler),
self.task_queues,
self.num_workers,
self.divide,
self.shutdown_flag,
self.feed_batch_idx,
),
daemon=True,
)
self.task_feeding_worker.start()
self.workers = []
for worker_id in range(self.num_workers):
worker = multiprocessing.Process(
target=_worker_loop,
args=(
self.dataset,
self.task_queues[worker_id],
self.trans_data_queues[worker_id],
self.transform,
self.seed + worker_id + 1,
self.shutdown_flag,
),
daemon=True,
)
worker.start()
self.workers.append(worker)
if self.divide:
self.data_collecting_worker = multiprocessing.Process(
target=_data_gathering_loop,
args=(
self.trans_data_queues,
self.batch_queue,
self.collator,
len(self),
self.num_workers,
self.shutdown_flag,
self.target_batch_idx,
),
daemon=True,
)
else:
self.data_collecting_worker = multiprocessing.Process(
target=_data_selecting_loop,
args=(
self.trans_data_queues,
self.batch_queue,
self.collator,
len(self),
self.num_workers,
self.shutdown_flag,
self.target_batch_idx,
),
daemon=True,
)
self.data_collecting_worker.start()
self.__initialized = True
def _check_workers(self):
# Check the status of each worker.
if not self.data_collecting_worker.is_alive():
exitcode = self.task_feeding_worker.exitcode
if exitcode != 0:
raise RuntimeError("data collecting worker died. {}".format(exitcode))
if not self.task_feeding_worker.is_alive():
exitcode = self.task_feeding_worker.exitcode
if exitcode != 0:
raise RuntimeError("task feeding worker died. {}".format(exitcode))
for worker_id, worker in enumerate(self.workers):
if not worker.is_alive():
exitcode = worker.exitcode
if exitcode != 0:
raise RuntimeError("worker:{} died. {}".format(worker_id, exitcode))
logger.debug("all workers are alive.")
def _get_next_batch(self):
start_time = time.time()
while True:
self._check_workers()
try:
return self.batch_queue.get(timeout=1)
except queue.Empty:
logger.debug("batch queue empty!")
waited_time = time.time() - start_time
if self.timeout > 0:
if waited_time > self.timeout:
raise RuntimeError("get_next_batch timeout!")
def _shutdown(self):
with self.shutdown_flag.get_lock():
self.shutdown_flag.value = 1
if self.task_feeding_worker.is_alive():
self.task_feeding_worker.terminate()
self.task_feeding_worker.join()
if self.data_collecting_worker.is_alive():
self.data_collecting_worker.terminate()
self.data_collecting_worker.join()
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
for q in self.trans_data_queues:
q.cancel_join_thread()
q.close()
for q in self.task_queues:
q.cancel_join_thread()
q.close()
self.batch_queue.cancel_join_thread()
self.batch_queue.close()
def __del__(self):
if self.__initialized:
self._shutdown()
class _BaseStreamDataLoaderIter:
def __init__(self, loader):
self.dataset = loader.dataset
self.sampler = loader.sampler
self.transform = loader.transform
self.collator = loader.collator
self.num_workers = loader.num_workers
self.timeout = loader.timeout
self.timeout_event = loader.timeout_event
def _get_next_batch(self):
raise NotImplementedError
def _process_raw_data(self, raw_data):
assert len(raw_data) == 2 and isinstance(
raw_data[0], bool
), "StreamDataset should provide a binary tuple, the first item indicates whether the data was batched."
if not raw_data[0]:
data = list((x,) for x in raw_data[1])
else:
data = raw_data[1]
ret = []
for idx in range(len(data[0])):
ret.append(tuple(e[idx] for e in data))
return ret
def __iter__(self):
return self
def __next__(self):
return self._get_next_batch()
class _SerialStreamDataLoaderIter(_BaseStreamDataLoaderIter):
def __init__(self, loader):
super().__init__(loader)
self.dataset_iter = iter(self.dataset)
self.idx = 0
self.unused = []
def _try_get_raw_data(self, start_time):
raw_data = None
while not raw_data:
try:
if self.timeout > 0:
timer = threading.Timer(self.timeout, thread.interrupt_main)
timer.start()
raw_data = next(self.dataset_iter)
if self.timeout > 0:
timer.cancel()
except KeyboardInterrupt:
raw_data = self.timeout_event()
except:
if self.timeout > 0:
timer.cancel()
waited_time = time.time() - start_time
if waited_time > self.timeout:
raw_data = self.timeout_event()
return raw_data
def _get_next_batch(self):
ret = []
start_time = time.time()
while len(ret) < self.sampler.batch_size:
if len(self.unused) != 0:
batch_data = self.unused
else:
raw_data = self._try_get_raw_data(start_time)
batch_data = self._process_raw_data(raw_data)
while len(batch_data) != 0 and len(ret) < self.sampler.batch_size:
data = batch_data.pop()
ret.append(self.transform.apply(data))
self.unused = batch_data
return self.collator.apply(ret)
class _ParallelStreamDataLoaderIter(_BaseStreamDataLoaderIter):
__initialized = False
def __init__(self, loader):
super().__init__(loader)
self.shutdown_flag = multiprocessing.Value("i", 0)
self.raw_data_queues = [
multiprocessing.Queue(maxsize=1) for _ in range(self.num_workers)
]
self.trans_data_queues = [
multiprocessing.Queue(maxsize=1) for _ in range(self.num_workers)
]
# shared-memory queue implemented by pyarrow plasma store
from ._queue import PlasmaShmQueue
self.batch_queue = PlasmaShmQueue(maxsize=2)
self.recieve_worker = multiprocessing.Process(
target=self._worker_to_raw_data_queues, daemon=True
)
self.recieve_worker.start()
self.transform_workers = []
for worker_id in range(self.num_workers):
worker = multiprocessing.Process(
target=self._worker_to_trans_data_queues, args=(worker_id,), daemon=True
)
worker.start()
self.transform_workers.append(worker)
self.collect_worker = multiprocessing.Process(
target=self._worker_to_batch_queue, daemon=True
)
self.collect_worker.start()
self.__initialized = True
def _put_raw_data_queues(self, raw_data, qidx):
batch_data = self._process_raw_data(raw_data)
for data in batch_data:
while True:
qidx = qidx % self.num_workers
try:
self.raw_data_queues[qidx].put(data)
break
except queue.Full:
if self.shutdown_flag.value == 1:
break
logger.debug("raw data queue %d is full" % qidx)
finally:
qidx += 1
return qidx
def _worker_to_raw_data_queues(self):
dataset_iter = iter(self.dataset)
qidx = 0
while True:
if self.shutdown_flag.value == 1:
break
raw_data = next(dataset_iter)
qidx = self._put_raw_data_queues(raw_data, qidx)
def _worker_to_trans_data_queues(self, worker_id):
while True:
if self.shutdown_flag.value == 1:
break
try:
data = self.raw_data_queues[worker_id].get(timeout=GLOBAL_TIMEOUT)
except queue.Empty:
continue
trans_data = self.transform.apply(data)
while True:
try:
self.trans_data_queues[worker_id].put(trans_data)
break
except queue.Full:
if self.shutdown_flag.value == 1:
break
logger.debug("batch queue if full")
def _worker_to_batch_queue(self):
cnt = -1
trans_items = []
while True:
if self.shutdown_flag.value == 1:
break
cnt += 1
queue_id = cnt % self.num_workers
try:
trans_item = self.trans_data_queues[queue_id].get(
timeout=GLOBAL_TIMEOUT
)
except queue.Empty:
continue
trans_items.append(trans_item)
if len(trans_items) == self.sampler.batch_size:
batch_data = self.collator.apply(trans_items)
while True:
try:
self.batch_queue.put(batch_data, timeout=1)
break
except queue.Full:
if self.shutdown_flag.value == 1:
break
logger.debug("batch queue is full")
trans_items = []
def _check_workers(self):
if not self.collect_worker.is_alive():
exitcode = self.collect_worker.exitcode
if exitcode != 0:
raise RuntimeError("collator worker died. {}".format(exitcode))
for worker_id, worker in enumerate(self.transform_workers):
if not worker.is_alive():
exitcode = worker.exitcode
if exitcode != 0:
raise RuntimeError(
"worker: {} died. {}".format(worker_id, exitcode)
)
def _get_next_batch(self):
start_time = time.time()
while True:
self._check_workers()
try:
return self.batch_queue.get(timeout=1)
except queue.Empty:
logger.debug("batch queue empty!")
waited_time = time.time() - start_time
if self.timeout > 0 and waited_time > self.timeout:
self._put_raw_data_queues(self.timeout_event(), 0)
def _shutdown(self):
with self.shutdown_flag.get_lock():
self.shutdown_flag.value = 1
if self.recieve_worker.is_alive():
self.recieve_worker.terminate()
self.recieve_worker.join()
if self.collect_worker.is_alive():
self.collect_worker.terminate()
self.collect_worker.join()
for worker in self.transform_workers:
if worker.is_alive():
worker.terminate()
worker.join()
for q in self.raw_data_queues:
q.cancel_join_thread()
q.close()
for q in self.trans_data_queues:
q.cancel_join_thread()
q.close()
self.batch_queue.cancel_join_thread()
self.batch_queue.close()
def __del__(self):
if self.__initialized:
self._shutdown()
def _task_feeding_loop(
indices_iter, task_queues, num_workers, divide, shutdown_flag, feed_batch_idx
):
# Feed the indices into the task queues
while True:
if shutdown_flag.value == 1:
break
batch_idx = feed_batch_idx.value
try:
indices = next(indices_iter)
except StopIteration:
break
if divide:
# make sure all task_queues is ready for put
while any([q.full() for q in task_queues]):
if shutdown_flag.value == 1:
return
# divide into small pieces, feed to different workers.
sub_num = math.ceil(len(indices) / num_workers)
for worker_id in range(num_workers):
sub_indices = indices[worker_id * sub_num : (worker_id + 1) * sub_num]
task_queues[worker_id].put((batch_idx, sub_indices))
else:
# distribute tasks to different workers uniformly.
target_id = batch_idx % num_workers
while task_queues[target_id].full():
if shutdown_flag.value == 1:
return
task_queues[target_id].put((batch_idx, indices))
with feed_batch_idx.get_lock():
feed_batch_idx.value += 1
def _worker_loop(dataset, task_queue, trans_data_queue, transform, seed, shutdown_flag):
# Get dataset items and do the transform
random.seed(seed)
np.random.seed(seed)
while True:
if shutdown_flag.value == 1:
break
try:
batch_idx, indices = task_queue.get(timeout=GLOBAL_TIMEOUT)
except queue.Empty:
continue
if len(indices) > 0:
items = [dataset[idx] for idx in indices]
trans_items = transform.apply_batch(items)
else:
# in case of incomplete last batch
trans_items = ()
while True:
try:
trans_data_queue.put((batch_idx, trans_items), timeout=1)
break
except queue.Full:
if shutdown_flag.value == 1:
break
logger.debug("batch part queue is full!")
def _data_gathering_loop(
trans_data_queues,
batch_queue,
collator,
length,
num_workers,
shutdown_flag,
target_idx,
):
# Gathering the small pieces of batch data into full batch data
while True:
if shutdown_flag.value == 1:
break
target_batch_idx = target_idx.value
if target_batch_idx >= length:
break
full_trans_items = []
for worker_id in range(num_workers):
while True:
try:
batch_idx, trans_items = trans_data_queues[worker_id].get(
timeout=GLOBAL_TIMEOUT
)
break
except queue.Empty:
if shutdown_flag.value == 1:
break
logger.debug(
"worker:{} data queue get timeout! target batch idx:{}".format(
worker_id, target_batch_idx
)
)
if batch_idx != target_batch_idx:
raise RuntimeError(
"Unexperted batch_idx in data gathering loop. worker_id:{}.".format(
worker_id
)
)
else:
full_trans_items.extend(trans_items)
# Merge different parts into a batch.
full_batch = collator.apply(full_trans_items)
while True:
try:
batch_queue.put(full_batch, timeout=1)
break
except queue.Full:
if shutdown_flag.value == 1:
break
logger.debug("batch queue is full!")
with target_idx.get_lock():
target_idx.value += 1
batch_queue.disconnect_client()
def _data_selecting_loop(
trans_data_queues,
batch_queue,
collator,
length,
num_workers,
shutdown_flag,
target_idx,
):
# Make sure that batch is generated exactly with the same order as generated indices
while True:
if shutdown_flag.value == 1:
break
target_batch_idx = target_idx.value
if target_batch_idx >= length:
break
target_worker_id = target_batch_idx % num_workers
while True:
try:
batch_idx, trans_items = trans_data_queues[target_worker_id].get(
timeout=GLOBAL_TIMEOUT
)
batch_data = collator.apply(trans_items)
break
except queue.Empty:
if shutdown_flag.value == 1:
break
logger.debug(
"worker:{} data queue get timeout! target batch idx:{}".format(
target_worker_id, target_batch_idx
)
)
if batch_idx != target_batch_idx:
raise RuntimeError(
"batch_idx {} mismatch the target_batch_idx {}".format(
batch_idx, target_batch_idx
)
)
while True:
try:
batch_queue.put(batch_data, timeout=1)
break
except queue.Full:
if shutdown_flag.value == 1:
break
logger.debug("batch queue is full!")
with target_idx.get_lock():
target_idx.value += 1
batch_queue.disconnect_client()
|
#! /usr/bin/python2
# -*- coding: utf8 -*-
from __future__ import division
from math import ceil
import pykka, tweepy, time
from warnings import warn
class Fetcher(pykka.ThreadingActor):
"""Fetch friends and tweets for a given ego
On actor per user must be created """
def __init__(self, api, ego_id):
super(Fetcher, self).__init__()
self.api = api
self.ego_id = ego_id
def fetch_data(self, ids):
""" Get timelines with GET statuses/user_timeline
" up to 3,200 of a user’s most recent Tweets, 180 requests / 15 min
" We hydrate users info with 'user' field in this API's answer
" No need of GET users/lookup API
"""
data = []
processed = []
errors = []
try :
for fid in ids:
## Add exception to handle user with no tweets
try:
bulk = self.api.user_timeline(id=fid, count=100)
data.append(self.data_filter(bulk))
processed.append(fid)
except IndexError:
# drop friend id without tweet
# except more general + stock error in errors list {fid, exception} ?
errors.append(fid)
out_msg = {'status':0, 'data':data, \
'unprocessed_friends':None, 'errors':errors}
except tweepy.TweepError as exc:
warn('0xDEADFEED')
unprocessed_friends = set(ids).difference(set(processed))
out_msg = {'status':0xDEADFEED, 'data':data, \
'unprocessed_friends':unprocessed_friends, 'errors':errors}
return out_msg
def data_filter(self, bulk):
## Use closure + list comprehension for faster loop
texts = []
texts_lang = []
texts_urls = []
def insert(tw):
texts.append(tw.text)
texts_lang.append(tw.lang)
texts_urls.append(tw.entities['urls'])
[insert(tw) for tw in bulk]
# Add user description data (not a tweet, but contains similar info)
user = bulk[0].user
texts.append(user.description)
texts_lang.append(user.lang)
texts_urls.append(user.entities['description']['urls'])
return {'ego_id':self.ego_id ,'u_id':user.id, \
'u_screen_name':user.screen_name, 'texts':texts, \
'texts_lang':texts_lang, 'texts_urls':texts_urls }
|
# model settings
_base_ = [
'../_base_/datasets/zhuhai15708_3class.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
#norm_cfg = dict(type='SyncBN', requires_grad=True)
norm_cfg = dict(type='BN',requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='mmcls://mobilenet_v2',
backbone=dict(
#_delete_=True,
type='MobileNetV2',
widen_factor=1.,
strides=(1, 2, 2, 1, 1, 1, 1),
dilations=(1, 1, 1, 2, 2, 4, 4),
out_indices=(0, 1, 2, 3, 4, 5, 6)),
decode_head=dict(
type='CPHeadPlus_V2',
in_channels=320,
in_index=6,
channels=80, #最后分类卷积前的通道数,cpnet与prior_channels一致
prior_channels=80,
prior_size=64, #prior_size与backbone输出特征图大小一致
am_kernel_size=6,
aggress_dilation=2,
groups=1,
# drop_out_ratio=0.1,
num_classes=3,
norm_cfg=norm_cfg,
align_corners=False,
c0_in_channels=24,
c0_channels=24, # decode对第一层降维后的通道数
c1_in_channels=-1, #最后一层拼接cpnet输出,若取消输入-1
c1_channels=0, #decode对最后层降维后的通道数,最后一层拼接cpnet输出,若取消输入-1
detail_index=1,
detail_channels=24,
arm_channels=-1,#不使用输入小于0的值
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, alpha=1, gamma=0,use_pixel_weight=True,pa=2,only_block=True),
loss_prior_decode=dict(type='AffinityLoss', loss_weight=1.0),
loss_detail_loss=dict(type='DetailAggregateLoss', loss_weight=1.0, use_x8=True,only_x1=True),
),
auxiliary_head=dict(
type='FCNHead',
in_channels=64,
in_index=3,
channels=24,
num_convs=1,
concat_input=False,
# drop_out_ratio=0.1,
num_classes=3,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
# model training and testing settings
train_cfg = dict()
test_cfg = dict(mode='whole')
|
import {
SETCREDENTIALS,
SET_AUTH_PATH
} from '../../actions/accounts/accountActions';
const cookies = {}
let login = 'root'
let password = ''
document.cookie.split(';').forEach(cookie => {
const key = cookie.split('=')[0]
const value = cookie.split('=')[1]
cookies.key = value
})
if (cookies.hasOwnProperty ('login') && cookies.hasOwnProperty('user_hash')){
login = cookies['login']
password = cookies['user_hash']
}
const initialState = {
login: login,
password: password,
authPath: '/sign-in',
}
const accountsReducer = (state = initialState, action) => {
switch (action.type){
case SETCREDENTIALS:
return {...state, login: action.login, password: action.password}
case SET_AUTH_PATH:
return {...state, authPath: action.authPath}
default: return state
}
}
export default accountsReducer
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <functional>
#include <ABI45_0_0jsi/ABI45_0_0jsi.h>
namespace ABI45_0_0facebook {
namespace ABI45_0_0React {
using ValueFactory = std::function<jsi::Value(jsi::Runtime &runtime)>;
} // namespace ABI45_0_0React
} // namespace ABI45_0_0facebook
|
/*
Copyright (c) 2011 David Björklund
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/*
* The c++-module.
*/
var binding = require('bindings')('binding');
/*
* Parsers, used to parse a buffer when decompressing.
*/
var parsers = exports.parsers = {
json: function(buffer) {
return JSON.parse(buffer);
},
string: function(buffer) {
return buffer.toString("utf8");
},
raw: function(buffer) {
return buffer;
}
};
/**
* Compress asyncronous.
* If input isn't a string or buffer, automatically convert to buffer by using
* JSON.stringify.
*/
exports.compress = function(input, callback) {
if(!Buffer.isBuffer(input)){
if (!(typeof input === 'string')) {
input = JSON.stringify(input);
}
input = new Buffer(input);
}
binding.compress(input, callback);
};
/**
* Compress syncronous.
* If input isn't a string or buffer, automatically convert to buffer by using
* JSON.stringify.
*/
exports.compressSync = function(input) {
if(!Buffer.isBuffer(input)){
if (!(typeof input === 'string')) {
input = JSON.stringify(input);
}
input = new Buffer(input);
}
var result;
var err;
binding.compressSync(input, function(e, r) {
result = r;
err = e;
});
if(err) {
throw err;
}
return result;
};
/**
* Asyncronous decide if a buffer is compressed in a correct way.
*/
exports.isValidCompressed = binding.isValidCompressed;
/**
* Syncronous decide if a buffer is compressed in a correct way.
*/
exports.isValidCompressedSync = function(input, callback) {
var err, result;
binding.isValidCompressedSync(input, function(e, r) {
err = e;
result = r;
});
if(err) {
throw err;
}
return result;
};
/**
* Asyncronous uncompress previously compressed data.
* A parser can be attached. If no parser is attached, return buffer.
*/
exports.uncompress = function(compressed, parse, callback) {
if (parse == null) {
parse = parsers.raw;
}
binding.uncompress(compressed, function(err, data) {
if (data != null) {
data = parse(data);
}
callback(err, data);
});
};
/**
* Alias to uncompress.
*/
exports.decompress = exports.uncompress;
/**
* Syncronous uncompress previously compressed data.
* A parser can be attached. If no parser is attached, return buffer.
*/
exports.uncompressSync = function(compressed, parse) {
if (!parse) {
parse = parsers.raw;
}
var err, data;
binding.uncompressSync(compressed, function(e, d) {
if (d) {
d = parse(d);
}
err = e;
data = d;
});
if(err) {
throw err;
}
return data;
};
/**
* Alias to decompressSync.
*/
exports.decompressSync = exports.uncompressSync;
|
#!/usr/bin/env python3
from smbus2 import SMBus, i2c_msg
import struct
from time import sleep
def calculateCRC(input):
crc = 0xFF
for i in range (0, 2):
crc = crc ^ input[i]
for j in range(8, 0, -1):
if crc & 0x80:
crc = (crc << 1) ^ 0x31
else:
crc = crc << 1
crc = crc & 0x0000FF
return crc
def checkCRC(result):
for i in range(2, len(result), 3):
data = []
data.append(result[i-2])
data.append(result[i-1])
crc = result[i]
if crc == calculateCRC(data):
crc_result = True
else:
crc_result = False
return crc_result
def bytes_to_int(bytes):
result = 0
for b in bytes:
result = result * 256 + int(b)
return result
def convertPMValues(value):
string_value = str(hex(value)).replace("0x", "")
byte_value = bytes.fromhex(string_value)
return struct.unpack('>f', byte_value)[0]
class SPS30():
SPS_ADDR = 0x69
START_MEAS = [0x00, 0x10]
STOP_MEAS = [0x01, 0x04]
R_DATA_RDY = [0x02, 0x02]
R_VALUES = [0x03, 0x00]
RW_AUTO_CLN = [0x80, 0x04]
START_CLN = [0x56, 0x07]
R_ARTICLE_CD = [0xD0, 0x25]
R_SERIAL_NUM = [0xD0, 0x33]
RESET = [0xD3, 0x04]
R_VERSION = [0xD1, 0x00]
NO_ERROR = 1
ARTICLE_CODE_ERROR = -1
SERIAL_NUMBER_ERROR = -2
AUTO_CLN_INTERVAL_ERROR = -3
DATA_READY_FLAG_ERROR = -4
MEASURED_VALUES_ERROR = -5
dict_values = {"pm1p0" : None,
"pm2p5" : None,
"pm4p0" : None,
"pm10p0" : None,
"nc0p5" : None,
"nc1p0" : None,
"nc2p5" : None,
"nc4p0" : None,
"nc10p0" : None,
"typical": None}
def __init__(self, port):
self.bus = SMBus(port)
def read_article_code(self):
result = []
article_code = []
write = i2c_msg.write(self.SPS_ADDR, self.R_ARTICLE_CD)
self.bus.i2c_rdwr(write)
read = i2c_msg.read(self.SPS_ADDR, 48)
self.bus.i2c_rdwr(read)
for i in range(read.len):
result.append(bytes_to_int(read.buf[i]))
if checkCRC(result):
for i in range (2, len(result), 3):
article_code.append(chr(result[i-2]))
article_code.append(chr(result[i-1]))
return str("".join(article_code))
else:
return self.ARTICLE_CODE_ERROR
def read_device_serial(self):
result = []
device_serial = []
write = i2c_msg.write(self.SPS_ADDR, self.R_SERIAL_NUM)
self.bus.i2c_rdwr(write)
read = i2c_msg.read(self.SPS_ADDR, 48)
self.bus.i2c_rdwr(read)
for i in range(read.len):
result.append(bytes_to_int(read.buf[i]))
if checkCRC(result):
for i in range(2, len(result), 3):
device_serial.append(chr(result[i-2]))
device_serial.append(chr(result[i-1]))
return str("".join(device_serial))
else:
return self.SERIAL_NUMBER_ERROR
def read_device_version(self):
result = []
device_version = []
write = i2c_msg.write(self.SPS_ADDR, self.R_VERSION)
self.bus.i2c_rdwr(write)
read = i2c_msg.read(self.SPS_ADDR, 3)
self.bus.i2c_rdwr(read)
for i in range(read.len):
result.append(bytes_to_int(read.buf[i]))
if checkCRC(result):
for i in range(2, len(result), 3):
device_version.append(chr(result[i-2]))
device_version.append(chr(result[i-1]))
return str("".join(device_version))
else:
return self.SERIAL_NUMBER_ERROR
def read_auto_cleaning_interval(self):
result = []
write = i2c_msg.write(self.SPS_ADDR, self.RW_AUTO_CLN)
self.bus.i2c_rdwr(write)
read = i2c_msg.read(self.SPS_ADDR, 6)
self.bus.i2c_rdwr(read)
for i in range(read.len):
result.append(bytes_to_int(read.buf[i]))
if checkCRC(result):
result = result[0] * pow(2, 24) + result[1] * pow(2, 16) + result[3] * pow(2, 8) + result[4]
return result
else:
return self.AUTO_CLN_INTERVAL_ERROR
def set_auto_cleaning_interval(self, seconds):
self.RW_AUTO_CLN.append((seconds >> 24) & 0xFF)
self.RW_AUTO_CLN.append((seconds >> 16) & 0xFF)
self.RW_AUTO_CLN.append(calculateCRC(self.RW_AUTO_CLN[2:4]))
self.RW_AUTO_CLN.append((seconds >> 8) & 0xFF)
self.RW_AUTO_CLN.append(seconds & 0xFF)
self.RW_AUTO_CLN.append(calculateCRC(self.RW_AUTO_CLN[5:7]))
write = i2c_msg.write(self.SPS_ADDR, self.RW_AUTO_CLN)
self.bus.i2c_rdwr(write)
def start_fan_cleaning(self):
write = i2c_msg.write(self.SPS_ADDR, self.START_CLN)
self.bus.i2c_rdwr(write)
def start_measurement(self):
self.START_MEAS.append(0x03)
self.START_MEAS.append(0x00)
crc = calculateCRC(self.START_MEAS[2:4])
self.START_MEAS.append(crc)
write = i2c_msg.write(self.SPS_ADDR, self.START_MEAS)
self.bus.i2c_rdwr(write)
def stop_measurement(self):
write = i2c_msg.write(self.SPS_ADDR, self.STOP_MEAS)
self.bus.i2c_rdwr(write)
def read_data_ready_flag(self):
result = []
write = i2c_msg.write(self.SPS_ADDR, self.R_DATA_RDY)
self.bus.i2c_rdwr(write)
read = i2c_msg.read(self.SPS_ADDR, 3)
self.bus.i2c_rdwr(read)
for i in range(read.len):
result.append(bytes_to_int(read.buf[i]))
if checkCRC(result):
return result[1]
else:
return self.DATA_READY_FLAG_ERROR
def read_measured_values(self):
result = []
write = i2c_msg.write(self.SPS_ADDR, self.R_VALUES)
self.bus.i2c_rdwr(write)
read = i2c_msg.read(self.SPS_ADDR, 60)
self.bus.i2c_rdwr(read)
for i in range(read.len):
result.append(bytes_to_int(read.buf[i]))
if checkCRC(result):
self.parse_sensor_values(result)
return self.NO_ERROR
else:
return self.MEASURED_VALUES_ERROR
def device_reset(self):
write = i2c_msg.write(self.SPS_ADDR, self.RESET)
self.bus.i2c_rdwr(write)
sleep(1)
def parse_sensor_values(self, input):
index = 0
pm_list = []
for i in range (4, len(input), 6):
value = input[i] + input[i-1] * pow(2, 8) +input[i-3] * pow(2, 16) + input[i-4] * pow(2, 24)
pm_list.append(value)
for i in self.dict_values.keys():
self.dict_values[i] = convertPMValues(pm_list[index])
index += 1
|
"""Playbook Variable Model"""
# third-party
from pydantic import BaseModel, Field
class PlaybookVariableModel(BaseModel):
"""Playbook Variable Model
Parsing the variable into it individual parts:
#App:1234:output!String
"""
app_type: str = Field(
None,
description='The application type (e.g., App|Trigger).',
)
job_id: str = Field(
None,
description='The job id.',
)
key: str = Field(
None,
description='The variable key (e.g., app.api_token).',
)
type: str = Field(
None,
description='The specific variable type (e.g., String, StringArray etc).',
)
|
import sys
import pytest
sys.path.append('../')
from model import *
def setup():
gb = Course('Math','Fall 2017')
joe = Student(gb, 'Joe', 'Davis', 'joe@pdx.edu')
mary = Student(gb, 'Mary', 'Wilcox', 'm2w@pdx.edu')
gb.students.append(joe)
gb.students.append(mary)
quizzes = Category(gb, 'Quizzes', 25)
mid1 = Category(gb, 'Midterm1', 15)
gb.categories.append(quizzes)
gb.categories.append(mid1)
quiz1 = Gradeable(gb, 'Quiz1', quizzes, 15)
quiz2 = Gradeable(gb, 'Quiz2', quizzes, 15)
quiz3 = Gradeable(gb, 'Quiz3', quizzes, 15)
exam1 = Gradeable(gb, 'Exam1', mid1, 15)
exam1_retake = Gradeable(gb, 'Exam1 Retake', mid1, 15, sub_pct=50.0)
for i in range(3):
quiz1.add_question(5)
quiz2.add_question(5)
exam1.add_question(5)
exam1_retake.add_question(5)
gb.gradeables.append(quiz1)
gb.gradeables.append(quiz2)
gb.gradeables.append(quiz3)
gb.gradeables.append(exam1)
gb.gradeables.append(exam1_retake)
for question in quiz1.questions:
s = gb.get_score(joe, quiz1, question)
s.value = 4
s = gb.get_score(mary, quiz1, question)
s.value = 5
for question in quiz2.questions:
s = gb.get_score(joe, quiz2, question)
s.value = 4
for question in exam1.questions:
s = gb.get_score(joe, exam1, question)
s.value = 3
s = gb.get_score(mary, exam1, question)
s.value = 4
for question in exam1_retake.questions:
s = gb.get_score(joe, exam1_retake, question)
s.value = 2
s = gb.get_score(mary, exam1_retake, question)
s.value = 1
return gb
def test_quiz_averages_correct():
gb = setup()
quizzes = gb.categories[0]
joe = gb.students[0]
mary = gb.students[1]
assert quizzes.combined_pct(joe) == 24.0*100/30
assert quizzes.combined_pct(mary) == 15*100/30
quizzes.drop_low_n = 1
assert quizzes.combined_pct(joe) == 12.0*100/15
assert quizzes.combined_pct(mary) == 15.0*100/15
def test_midterm_retake_correct():
gb = setup()
mid1 = gb.categories[1]
joe = gb.students[0]
mary = gb.students[1]
assert mid1.combined_pct(joe) == 12*100/15.
assert mid1.combined_pct(mary) == 13.5*100/15.
def test_removing_a_student():
gb = setup()
quizzes = gb.categories[0]
joe = gb.students[0]
mary = gb.students[1]
assert len(gb.gradeables_with_scores()) == 4
assert quizzes.combined_pct(joe) == 24*100/30.
assert quizzes.combined_pct(mary) == 15*100/30.
assert len(gb.scores) == 24
gb.remove_student(joe)
assert len(gb.students) == 1
assert quizzes.drop_low_n == 0
assert len(gb.scores) == 12
assert len(gb.gradeables_with_scores()) == 3
assert quizzes.combined_pct(mary) == 15*100/15.
def test_gradeables_with_scores():
gb = setup()
assert len(gb.gradeables_with_scores()) == 4
quiz2 = gb.gradeables[1]
gb.remove_gradeable(quiz2)
assert len(gb.gradeables_with_scores()) == 3
def setup_simple():
gb = Course('Math','Fall 2017')
joe = Student(gb, 'Joe', 'Davis', 'joe@pdx.edu')
gb.students.append(joe)
homework = Category(gb, 'Homework', 100, 0, 3)
gb.categories.append(homework)
hw1 = Gradeable(gb, 'HW1', homework, 15)
hw1.add_question(15)
gb.gradeables.append(hw1)
s = gb.get_score(joe, hw1, hw1.questions[0])
s.value = 1
return gb
def test_rpt_avg_score_needed_for_grade():
gb = setup_simple()
joe = gb.students[0]
assert abs(joe.avg_score_needed_for_grade(90) - (90*3-100/15)/2) < .00001
def test_rpt_avg_score_needed_for_grade2():
gb = setup_simple()
joe = gb.students[0]
hw1 = gb.gradeables[0]
s = gb.get_score(joe, hw1, hw1.questions[0])
s.value = 15
def setup_simple2():
gb = Course('Math','Fall 2017')
joe = Student(gb, 'Joe', 'Davis', 'joe@pdx.edu')
gb.students.append(joe)
homework = Category(gb, 'Homework', 40, 0, 3)
exam1 = Category(gb, 'Exam1', 20, 0, 1)
final = Category(gb, 'Final', 30, 0, 1)
gb.categories.append(homework)
gb.categories.append(exam1)
gb.categories.append(final)
hw1 = Gradeable(gb, 'HW1', homework, 15)
hw1.add_question(15)
ex1 = Gradeable(gb, 'Exam1', exam1, 100)
ex1.add_question(100)
finl = Gradeable(gb, 'Final', final, 100)
finl.add_question(100)
gb.gradeables.append(hw1)
gb.gradeables.append(ex1)
gb.gradeables.append(finl)
s = gb.get_score(joe, hw1, hw1.questions[0])
s.value = 1
s = gb.get_score(joe, ex1, ex1.questions[0])
s.value = 25
return gb
def test_rpt_avg_score_needed_for_grade3():
gb = setup_simple2()
hw, ex1, finl = gb.categories
assert hw.actual_ct() == 1
assert ex1.actual_ct() == 1
assert finl.actual_ct() == 0
hope_factor = 1/(0.40*(2/3) + 0.3)
assert abs(gb.hope_factor() - hope_factor) < .00001
joe = gb.students[0]
cats = gb.categories_with_scores()
assert len(cats) == 2
partial_est = (.40*(1/3)*(1/15*100) + .20*(1/1)*25)
assert abs(joe.partial_est_grade() - partial_est) < .00001
expected = (90 - partial_est)*hope_factor
assert abs(joe.avg_score_needed_for_grade(90) - (90 - partial_est)*hope_factor) < .00001
def test_category_with_gradeable_pcts():
gb = Course('Math','Fall 2017')
cat = Category(gb, name='Exams', pct_of_grade=100, est_ct = 3, gradeable_pcts=[40,35,25])
gb.categories.append(cat)
exam1 = Gradeable(gb,name='Exam 1', category=cat, total_pts=50)
exam1.add_question(50)
gb.gradeables.append(exam1)
exam2 = Gradeable(gb,name='Exam 2', category=cat, total_pts=50)
exam2.add_question(50)
gb.gradeables.append(exam2)
exam3 = Gradeable(gb,name='Exam 3', category=cat, total_pts=50)
exam3.add_question(50)
gb.gradeables.append(exam3)
joe = Student(gb,first='Joe',last='Crow',email='joe@pdx.edu')
gb.students.append(joe)
s = gb.get_score(joe, exam1, exam1.questions[0])
s.value = 45
gpcts = [g.adjusted_score(joe) * 100 / g.total_pts for g in cat.gradeables_with_scores()]
assert(sorted(gpcts) == [90])
assert(cat.gradeable_pcts[:1] == [25])
assert(cat.combined_pct(joe) == 90)
assert(joe.grade() == 90)
assert(gb.hope_factor() == 1.0/0.75)
assert(joe.avg_score_needed_for_grade(90) == 90)
s = gb.get_score(joe, exam2, exam2.questions[0])
s.value = 10
gpcts = [g.adjusted_score(joe) * 100 / g.total_pts for g in cat.gradeables_with_scores()]
assert(sorted(gpcts) == [20, 90])
assert(cat.gradeable_pcts[:2] == [25,35])
assert(cat.combined_pct(joe) == 3650/60)
assert(joe.grade() == 3650/60)
assert(gb.hope_factor() == 1.0/0.40)
assert(joe.avg_score_needed_for_grade(80) == pytest.approx(108.75))
s = gb.get_score(joe, exam3, exam3.questions[0])
s.value = 40
gpcts = [g.adjusted_score(joe) * 100 / g.total_pts for g in cat.gradeables_with_scores()]
assert(sorted(gpcts) == [20,80,90])
assert(cat.gradeable_pcts == [25,35,40])
assert(cat.combined_pct(joe) == 69)
assert(joe.grade() == 69)
assert(gb.hope_factor() == None)
assert(joe.avg_score_needed_for_grade(80) == None)
|
var searchData=
[
['window',['window',['../class_a_p_i_explorer_1_1_a_p_i_explorer.html#a451f3cdc177e58021e590f51bc836a49',1,'APIExplorer.APIExplorer.window()'],['../class_d_c2126_a_1_1dc2126a_gui.html#a03d2568ac17a48e5d049946cb2039dd6',1,'DC2126A.dc2126aGui.window()'],['../class_hr_listener_1_1hr_listener_gui.html#a3db7e640f712c6c4ace458167fed27e5',1,'HrListener.hrListenerGui.window()'],['../class_l_b_r_connection_1_1_l_b_r_connection_gui.html#a061f7507e16329a2cd43a0c964433177',1,'LBRConnection.LBRConnectionGui.window()'],['../class_l_e_d_ping_1_1_l_e_d_ping_app.html#a8b542a2c03f49f0c6f4b25bb0ae2f643',1,'LEDPing.LEDPingApp.window()'],['../class_mgr_listener_1_1notif_gui.html#a311f0d16383a1ca5dd291144ad550b67',1,'MgrListener.notifGui.window()'],['../class_pk_gen_1_1_pk_gen_gui.html#a5e62ec041430ae2ce7b1f53cff5dac1b',1,'PkGen.PkGenGui.window()'],['../class_sensor_data_receiver_1_1data_gui.html#aa5b507bfe961086783768932ba541092',1,'SensorDataReceiver.dataGui.window()'],['../class_temp_monitor_1_1_temp_monitor_gui.html#a25b0c7d85648cd2d525c2df430482e67',1,'TempMonitor.TempMonitorGui.window()'],['../class_upstream_1_1_upstream_gui.html#ab8e7b1eadf17499c3235244f9728e68e',1,'Upstream.UpstreamGui.window()'],['../class_xively_1_1xively_gui.html#aced3102ce09112b350790b119d5a4938',1,'Xively.xivelyGui.window()']]],
['window_5ftitle',['WINDOW_TITLE',['../namespace_d_c2126_a.html#a1532a9c50aed117ee26124c7b447bbe6',1,'DC2126A']]],
['wkp_5fdc2126a',['WKP_DC2126A',['../namespace_d_c2126_a.html#ad575a30a80d2ed0ad30000c56ac8ae77',1,'DC2126A']]]
];
|
var layers = module.exports = {};
var attribution = '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap közreműködők</a>';
/**
* Default mapnik tiles from http://www.openstreetmap.org
*/
layers.mapnik = L.tileLayer('http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
maxZoom: 19,
subdomains: 'abc',
attribution: attribution,
detectRetina: true
});
/**
* OpenCycleMap tiles from http://www.opencyclemap.org/
*/
layers.cycleMap = L.tileLayer('http://{s}.tile.opencyclemap.org/cycle/{z}/{x}/{y}.png', {
layerId: 'C',
maxZoom: 18,
subdomains: 'abc',
attribution: attribution,
detectRetina: true
});
/**
* MapQuest tiles from http://open.mapquest.com/
*/
layers.mapquest = L.tileLayer('http://otile{s}.mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.png', {
layerId: 'Q',
maxZoom: 18,
subdomains: '1234',
attribution: attribution,
detectRetina: true
});
/**
* Transport tiles from http://www.thunderforest.com/transport/ (url from Opencyclemap)
*/
layers.transport = L.tileLayer('http://{s}.tile2.opencyclemap.org/transport/{z}/{x}/{y}.png', {
layerId: 'T',
maxZoom: 18,
subdomains: 'abc',
attribution: attribution,
detectRetina: true
});
/**
* Humanitarian tiles from http://hot.openstreetmap.org/ (url from OpenStreetmap.org)
*/
layers.humanitarian = L.tileLayer('http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png', {
layerId: 'H',
maxZoom: 20,
subdomains: 'abc',
attribution: attribution,
detectRetina: true
});
layers.getById = function (layerId) {
switch (layerId) {
case 'C':
return layers.cycleMap;
case 'Q':
return layers.mapquest;
case 'T':
return layers.transport;
case 'H':
return layers.humanitarian;
default:
return layers.mapnik;
}
};
|
(function($) {
"use strict"; // Start of use strict
// Smooth scrolling using jQuery easing
$('a.js-scroll-trigger[href*="#"]:not([href="#"])').click(function() {
if (location.pathname.replace(/^\//, '') == this.pathname.replace(/^\//, '') && location.hostname == this.hostname) {
var target = $(this.hash);
target = target.length ? target : $('[name=' + this.hash.slice(1) + ']');
if (target.length) {
$('html, body').animate({
scrollTop: (target.offset().top - 70)
}, 1000, "easeInOutExpo");
return false;
}
}
});
// Scroll to top button appear
$(document).scroll(function() {
var scrollDistance = $(this).scrollTop();
if (scrollDistance > 100) {
$('.scroll-to-top').fadeIn();
} else {
$('.scroll-to-top').fadeOut();
}
});
// Closes responsive menu when a scroll trigger link is clicked
$('.js-scroll-trigger').click(function() {
$('.navbar-collapse').collapse('hide');
});
// Activate scrollspy to add active class to navbar items on scroll
$('body').scrollspy({
target: '#mainNav',
offset: 80
});
// Collapse Navbar
var navbarCollapse = function() {
if ($("#mainNav").offset().top > 100) {
$("#mainNav").addClass("navbar-shrink");
} else {
$("#mainNav").removeClass("navbar-shrink");
}
};
// Collapse now if page is not at top
navbarCollapse();
// Collapse the navbar when page is scrolled
$(window).scroll(navbarCollapse);
// Modal popup$(function () {
$('.whatwedo-item').magnificPopup({
type: 'inline',
preloader: false,
focus: '#username',
modal: true
});
$(document).on('click', '.whatwedo-modal-dismiss', function(e) {
e.preventDefault();
$.magnificPopup.close();
});
// Floating label headings for the contact form
$(function() {
$("body").on("input propertychange", ".floating-label-form-group", function(e) {
$(this).toggleClass("floating-label-form-group-with-value", !!$(e.target).val());
}).on("focus", ".floating-label-form-group", function() {
$(this).addClass("floating-label-form-group-with-focus");
}).on("blur", ".floating-label-form-group", function() {
$(this).removeClass("floating-label-form-group-with-focus");
});
});
})(jQuery); // End of use strict
|
function App(){
const [todos, setTodos] = React.useState([
{text: 'learn react', isCompleted: false,},
{text: 'meet friend for lunch', isCompleted: false,},
{text: 'build todo app', isCompleted: false,},
]);
const addTodo = text => {
const newTodos = [...todos, {text:text, isCompleted:false}];
setTodos(newTodos);
}
const removeTodo = index => {
let temp = [...todos];
temp.splice(index, 1);
setTodos(temp);
}
return (
<div className="app">
<div className="todo-list">
{todos.map((todo,i) =>
<Todo index={i} key={i} todo={todo} remove={removeTodo}/>)}
<TodoForm addTodo={addTodo} />
</div>
</div>
);
}
ReactDOM.render(<App/>,
document.getElementById('root')
)
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import os
import re
import inspect
def test_if_all_versions_match():
def read_version(fp, regex):
with open(fp, "r") as fd:
content = fd.read()
found = re.findall(regex, content)
if len(found) == 1:
return found[0]
else:
raise Exception("Version not found!")
import osbs
from osbs import __version__
if __version__.endswith('.dev'):
version_without_dev = __version__[:-4]
else:
version_without_dev = __version__
fp = inspect.getfile(osbs)
project_dir = os.path.dirname(os.path.dirname(fp))
specfile = os.path.join(project_dir, "osbs-client.spec")
setup_py = os.path.join(project_dir, "setup.py")
spec_version = read_version(specfile, r"\nVersion:\s*(.+?)\s*\n")
setup_py_version = read_version(setup_py, r"version=['\"](.+)['\"]")
assert spec_version == version_without_dev
assert setup_py_version == __version__
|
'''Simplified Solution'''
def countApplesAndOranges(s, t, a, b, apples, oranges):
apple_count = sum([1 for apple in apples if apple+a in range(s, t+1)])
orange_count = sum([1 for orange in oranges if orange+b in range(s, t+1)])
print(apple_count)
print(orange_count)
'''Descriptive Solution'''
# def countApplesAndOranges(s, t, a, b, apples, oranges):
# apple_count = orange_count = 0
# for apple in apples:
# if apple+a in range(s, t+1):
# apple_count += 1
# for orange in oranges:
# if orange+b in range(s, t+1):
# orange_count += 1
# print(apple_count)
# print(orange_count)
if __name__ == '__main__':
st = input().split()
s = int(st[0])
t = int(st[1])
ab = input().split()
a = int(ab[0])
b = int(ab[1])
mn = input().split()
m = int(mn[0])
n = int(mn[1])
apples = list(map(int, input().rstrip().split()))
oranges = list(map(int, input().rstrip().split()))
countApplesAndOranges(s, t, a, b, apples, oranges)
|
# encoding: utf-8
"""Unit tests for ckan/logic/auth/delete.py.
"""
import pytest
from six import string_types
import ckan.logic.auth.delete as auth_delete
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
from ckan import model
logic = helpers.logic
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestDeleteAuth:
def test_anon_cant_delete(self):
context = {"user": None, "model": model}
params = {}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("resource_delete", context=context, **params)
def test_no_org_user_cant_delete(self):
user = factories.User()
org = factories.Organization()
dataset = factories.Dataset(
owner_org=org["id"], resources=[factories.Resource()]
)
response = auth_delete.resource_delete(
{"user": user["name"], "model": model},
{"id": dataset["resources"][0]["id"]},
)
assert not response["success"]
def test_org_user_can_delete(self):
user = factories.User()
org_users = [{"name": user["name"], "capacity": "editor"}]
org = factories.Organization(users=org_users)
dataset = factories.Dataset(
owner_org=org["id"], resources=[factories.Resource()], user=user
)
response = auth_delete.resource_delete(
{"user": user["name"], "model": model, "auth_user_obj": user},
{"id": dataset["resources"][0]["id"]},
)
assert response["success"]
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("with_plugins")
def test_anon_cant_delete_2(self):
context = {"user": None, "model": model}
params = {}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"resource_view_delete", context=context, **params
)
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("with_plugins")
def test_no_org_user_cant_delete_2(self):
user = factories.User()
org = factories.Organization()
dataset = factories.Dataset(
owner_org=org["id"], resources=[factories.Resource()]
)
resource_view = factories.ResourceView(
resource_id=dataset["resources"][0]["id"]
)
context = {"user": user["name"], "model": model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"resource_view_delete", context=context, id=resource_view["id"]
)
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("with_plugins")
def test_org_user_can_delete_2(self):
user = factories.User()
org_users = [{"name": user["name"], "capacity": "editor"}]
org = factories.Organization(users=org_users)
dataset = factories.Dataset(
owner_org=org["id"], resources=[factories.Resource()], user=user
)
resource_view = factories.ResourceView(
resource_id=dataset["resources"][0]["id"]
)
context = {"user": user["name"], "model": model}
response = helpers.call_auth(
"resource_view_delete", context=context, id=resource_view["id"]
)
assert response
def test_anon_cant_clear():
context = {"user": None, "model": model}
params = {}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("resource_view_clear", context=context, **params)
@pytest.mark.usefixtures("with_request_context")
def test_normal_user_cant_clear():
user = factories.User()
context = {"user": user["name"], "model": model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("resource_view_clear", context=context)
@pytest.mark.usefixtures("with_request_context")
def test_sysadmin_user_can_clear():
user = factories.User(sysadmin=True)
context = {"user": user["name"], "model": model}
response = helpers.call_auth("resource_view_clear", context=context)
assert response
@pytest.mark.usefixtures("clean_db")
@pytest.mark.ckan_config(u"ckan.auth.allow_dataset_collaborators", True)
class TestPackageMemberDeleteAuth(object):
def _get_context(self, user):
return {
'model': model,
'user': user if isinstance(user, string_types) else user.get('name')
}
def setup(self):
self.org_admin = factories.User()
self.org_editor = factories.User()
self.org_member = factories.User()
self.normal_user = factories.User()
self.org = factories.Organization(
users=[
{'name': self.org_admin['name'], 'capacity': 'admin'},
{'name': self.org_editor['name'], 'capacity': 'editor'},
{'name': self.org_member['name'], 'capacity': 'member'},
]
)
self.dataset = factories.Dataset(owner_org=self.org['id'])
def test_delete_org_admin_is_authorized(self):
context = self._get_context(self.org_admin)
assert helpers.call_auth(
'package_collaborator_delete',
context=context, id=self.dataset['id'])
def test_delete_org_editor_is_not_authorized(self):
context = self._get_context(self.org_editor)
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
'package_collaborator_delete',
context=context, id=self.dataset['id'])
def test_delete_org_member_is_not_authorized(self):
context = self._get_context(self.org_member)
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
'package_collaborator_delete',
context=context, id=self.dataset['id'])
def test_delete_org_admin_from_other_org_is_not_authorized(self):
org_admin2 = factories.User()
factories.Organization(
users=[
{'name': org_admin2['name'], 'capacity': 'admin'},
]
)
context = self._get_context(org_admin2)
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
'package_collaborator_delete',
context=context, id=self.dataset['id'])
def test_delete_missing_org_is_not_authorized(self):
dataset = factories.Dataset(owner_org=None)
context = self._get_context(self.org_admin)
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
'package_collaborator_delete',
context=context, id=dataset['id'])
@pytest.mark.ckan_config('ckan.auth.allow_admin_collaborators', True)
def test_delete_collaborator_admin_is_authorized(self):
user = factories.User()
helpers.call_action(
'package_collaborator_create',
id=self.dataset['id'], user_id=user['id'], capacity='admin')
context = self._get_context(user)
assert helpers.call_auth(
'package_collaborator_delete', context=context, id=self.dataset['id'])
@pytest.mark.parametrize('role', ['editor', 'member'])
def test_delete_collaborator_editor_and_member_are_not_authorized(self, role):
user = factories.User()
helpers.call_action(
'package_collaborator_create',
id=self.dataset['id'], user_id=user['id'], capacity=role)
context = self._get_context(user)
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
'package_collaborator_delete',
context=context, id=self.dataset['id'])
@pytest.mark.ckan_config('ckan.auth.create_dataset_if_not_in_organization', True)
@pytest.mark.ckan_config('ckan.auth.create_unowned_dataset', True)
def test_delete_unowned_datasets(self):
user = factories.User()
dataset = factories.Dataset(user=user)
assert dataset['owner_org'] is None
assert dataset['creator_user_id'] == user['id']
context = self._get_context(user)
assert helpers.call_auth(
'package_collaborator_delete', context=context, id=dataset['id'])
|
#!/usr/bin/env python3
import datetime
import sys
args = sys.argv[1:]
if len(args) < 2:
print("usage: sunkcost <cost> <start_date>")
sys.exit()
cost = float(args[0])
yyyy, mm, dd = args[1].split("-")
startdate = datetime.date(int(yyyy), int(mm), int(dd))
enddate = startdate + datetime.timedelta(days=365)
now = datetime.date.today()
elapsed = (now - startdate).days
proportion = elapsed / 365
proportion_cost = proportion * cost
proportion_left = (1 - proportion) * cost
print(
f"Subscription: £{proportion_cost:.2f} of £{cost:.2f} -- £{proportion_left:.2f} ({(1 - proportion) * 100:.0f}%) remaining")
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import ConfigParser
from hermes_python.hermes import Hermes
from hermes_python.ontology import *
from snipsowm.snipsowm import SnipsOWM
import io
import math
import datetime as dt
from dateutil.parser import parse
CONFIGURATION_ENCODING_FORMAT = "utf-8"
CONFIG_INI = "config.ini"
class SnipsConfigParser(ConfigParser.SafeConfigParser):
def to_dict(self):
return {section : {option_name : option for option_name, option in self.items(section)} for section in self.sections()}
def read_configuration_file(configuration_file):
try:
with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f:
conf_parser = SnipsConfigParser()
conf_parser.readfp(f)
return conf_parser.to_dict()
except (IOError, ConfigParser.Error) as e:
return dict()
def subscribe_intent_callback(hermes, intentMessage):
conf = read_configuration_file(CONFIG_INI)
action_wrapper(hermes, intentMessage, conf)
def action_wrapper(hermes, intentMessage, conf):
""" Write the body of the function that will be executed once the intent is recognized.
In your scope, you have the following objects :
- intentMessage : an object that represents the recognized intent
- hermes : an object with methods to communicate with the MQTT bus following the hermes protocol.
- conf : a dictionary that holds the skills parameters you defined
Refer to the documentation for further details.
"""
# Determine datetime
datetime = None
if intentMessage.slots.forecast_start_datetime:
datetime = intentMessage.slots.forecast_start_datetime[0]
if isinstance(datetime, snips.types.InstantTime):
datetime = (datetime.datetime).replace(tzinfo=None)
elif isinstance(datetime, snips.types.TimeInterval):
datetime = (datetime.end).replace(tzinfo=None)
# Determine granularity
granularity = None
if datetime: # We have an information about the date.
now = dt.datetime.now().replace(tzinfo=None)
delta_days = abs((datetime - now).days)
if delta_days > 10: # There a week difference between today and the date we want the forecast.
granularity = 2 # Give the day of the forecast date, plus the number of the day in the month.
elif delta_days > 5: # There a 10-day difference between today and the date we want the forecast.
granularity = 1 # Give the full date
else:
granularity = 0 # Just give the day of the week
else:
granularity = 0
# Determine condition
condition_name = None
try:
condition_name = intentMessage.slots.forecast_condition_name[0] if intentMessage.slots.forecast_condition_name else None
except Exception:
pass
intentMessage.slots.forecast_locality = intentMessage.slots.forecast_locality[0] if intentMessage.slots.forecast_locality else None
intentMessage.slots.forecast_region = intentMessage.slots.forecast_region[0] if intentMessage.slots.forecast_region else None
intentMessage.slots.forecast_country = intentMessage.slots.forecast_country[0] if intentMessage.slots.forecast_country else None
intentMessage.slots.forecast_geographical_poi = intentMessage.slots.forecast_geographical_poi[0] if intentMessage.slots.forecast_geographical_poi else None
#print "cond: {}, datetime: {}, Locality: {}, granularity: {}".format(condition_name, datetime, snips.intent.forecast_locality, granularity)
snipsowm.speak_condition(snips, condition_name, datetime, granularity=granularity, Locality=snips.intent.forecast_locality, Region=snips.intent.forecast_region, Country=snips.intent.forecast_country, POI=snips.intent.forecast_geographical_poi)
current_session_id = intentMessage.session_id
hermes.publish_end_session(current_session_id, result_sentence)
if __name__ == "__main__":
snipsowm= SnipsOWM("5459abd58e64fe7f121792fabe60fe5c","France","fr_FR")
with Hermes("localhost:1883") as h:
h.subscribe_intent("searchWeatherForecastCondition", subscribe_intent_callback) \
.start()
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace Naming
{
class StoreService::ProcessInnerCreateServiceRequestAsyncOperation : public ProcessRequestAsyncOperation
{
public:
ProcessInnerCreateServiceRequestAsyncOperation(
Transport::MessageUPtr && request,
__in NamingStore &,
__in StoreServiceProperties &,
Common::TimeSpan const timeout,
Common::AsyncCallback const & callback,
Common::AsyncOperationSPtr const & root);
protected:
__declspec(property(get=get_ServiceDescriptor)) PartitionedServiceDescriptor const & ServiceDescriptor;
PartitionedServiceDescriptor const & get_ServiceDescriptor() const { return psd_; }
__declspec(property(get=get_MutableServiceDescriptor, put=set_MutableServiceDescriptor)) PartitionedServiceDescriptor & MutableServiceDescriptor;
PartitionedServiceDescriptor & get_MutableServiceDescriptor() { return psd_; }
void set_MutableServiceDescriptor(PartitionedServiceDescriptor && value) { psd_ = std::move(value); }
void OnCompleted();
DEFINE_PERF_COUNTERS ( NOCreateService )
Common::ErrorCode HarvestRequestMessage(Transport::MessageUPtr &&);
void PerformRequest(Common::AsyncOperationSPtr const &);
private:
void StartCreateService(Common::AsyncOperationSPtr const &);
void OnTentativeWriteComplete(Common::AsyncOperationSPtr const &, bool expectedCompletedSynchronously);
void StartRequestToFM(Common::AsyncOperationSPtr const &);
void OnRequestToFMComplete(Common::AsyncOperationSPtr const &, bool expectedCompletedSynchronously);
void GetServiceDescriptionFromFM(Common::AsyncOperationSPtr const &);
void OnGetServiceDescriptionComplete(Common::AsyncOperationSPtr const &, bool expectedCompletedSynchronously);
void FinishCreateService(Common::AsyncOperationSPtr const &);
void RevertTentativeCreate(Common::AsyncOperationSPtr const &);
void OnRevertTentativeComplete(Common::AsyncOperationSPtr const &, bool expectedCompletedSynchronously);
void OnWriteServiceComplete(Common::AsyncOperationSPtr const &, bool expectedCompletedSynchronously);
mutable PartitionedServiceDescriptor psd_;
bool isRebuildFromFM_;
Common::ErrorCode revertError_;
};
}
|
from rest_framework import serializers
from ..common.aws import *
class AwsEcsTaskDefinitionSerializer(serializers.Serializer):
task_definition_arn = serializers.CharField(
source='aws_ecs_task_definition_arn', max_length=1000,
required=False)
task_definition_infrastructure_website_url = serializers.CharField(
source='aws_ecs_task_definition_infrastructure_website_url',
read_only=True)
allocated_cpu_units = serializers.IntegerField(required=False)
allocated_memory_mb = serializers.IntegerField(required=False)
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import sys
import difflib
import os
from io import IOBase
import re
from . import file_Utils
import os.path
from xml.dom import minidom
from xml.etree import ElementTree
from xml.etree.ElementTree import tostring
from warrior.Framework.OSS import xmltodict
from .print_Utils import print_debug, print_info, print_error, print_warning, print_exception
from collections import OrderedDict
try:
from lxml import etree, objectify
except ImportError as err:
# print_error("Module lxml is not installed, Refer to the exception trace below for more details")
# print_exception(err)
pass
def create_subelement(parent, tag, attrib):
"""Creates a subelement with given tag
and attributes under the parent element """
subelement = ElementTree.SubElement(parent, tag, attrib)
return subelement
def getValuebyTag (filename, tag):
"""Get the value from the tag name from the xml file"""
doc = minidom.parse(filename)
itemlist = doc.getElementsByTagName(tag)[0].toxml()
itemvalue = itemlist.replace('<' + tag + '>', '').replace('</' + tag + '>', '')
return itemvalue
def getValuebyAttribute (filename, attribute, tag):
"""Get the value of the attribute in a tag from the xml file"""
doc = minidom.parse(filename)
itemlist = doc.getElementsByTagName(tag)[0].toxml()
return itemlist.attributes[attribute].value
def get_first_child(node):
"""Gets the first child of a given node
Returns None if there is no child for the node."""
element = None
for child in node:
element = child
return element
def get_last_child(node):
"""Gets the last child of a given node
Returns None if there is no child for the node."""
element = None
for child in node.iter():
element = child
return element
def getNodeCount(filename, node):
"""Get the Number of subnodes under the node specified in the xml file"""
with open (filename, 'rt') as f:
tree = ElementTree.parse(f)
count = 0
for node in tree.findall('.//' + node):
count += 1
return count
def get_tree_from_file(filepath):
""" Get the tree from the xml file"""
if file_Utils.fileExists(filepath):
tree = ElementTree.parse(filepath)
else:
print_error("xml file does not exist in provided path {0}".format(filepath))
tree = False
return tree
def getRoot(filename):
""" Get the Root of the xml file"""
try:
tree = ElementTree.parse(filename)
root = tree.getroot()
except ElementTree.ParseError as msg:
print_error("The xml file: {0} is {1}".format(filename, msg))
print_info("DONE 1")
sys.exit(0)
return root
def convert_element_to_string(element):
"""Converts the provided xml element to string """
# In Python 3, tostring output a bytes, so it has to changed back to string
string = tostring(element).decode(encoding='utf-8')
return string
def getNodeValuebyAttribute (filename, node, attribute):
""" Get Node value from the Attribute from the xml fle"""
value = None
root = getRoot (filename)
element = root.find(node)
if element is not None:
value = element.get(attribute)
return value
# for testdata in root.find(node):
# value = testdata.get(attribute)
# return value
def nodeExists (filename, node):
"""Find whether the Node exists in the xml file"""
count = getNodeCount (filename, node)
status = False
if count > 0:
status = True
return status
def getNodeText(filename, node):
"""Get the Text of the Node"""
root = ElementTree.parse(filename).getroot()
node = root.find(node)
if node is not None:
text = node.text
return text
else: print_warning("node not found")
def get_node(filename, node_name):
"""Gets seraches for a node under the root and returns the node"""
root = ElementTree.parse(filename).getroot()
node = root.find(node_name)
node = root.find(node_name)
if node is not None:
return node
else: return False
def write_tree_to_file(root, file_path):
"""Modify/Write to the xml file(filepath) """
tree = ElementTree.ElementTree(root)
tree.write(file_path)
def create_comment_element(text):
"""
creates and returns a xml comment element
"""
elem = ElementTree.Comment(text)
return elem
def get_matching_firstlevel_children_from_root(filename, child_tag):
"""Takes a xml file as input and returns a list of first
child elements to the root that matches the provided tag """
root = getRoot(filename)
child_list = root.findall(child_tag)
return child_list
def get_matching_firstlevel_children_from_node(node, child_tag):
"""Takes a xml file as input and returns a list of first
child elements to the root that matches the provided tag """
child_list = node.findall(child_tag)
return child_list
def get_node_list_iterative(filename, node_name):
"""Find all matching subelements and returns iterable elements"""
root = ElementTree.parse(filename).getroot()
node_list = root.iterfind(node_name)
return node_list
def getChildNodeTextaslist (parentnode, childnode):
"""Get all child node text of the parent node and return as a list"""
textlist = []
for childnode in parentnode.findall(childnode):
textlist.append(childnode.text)
return textlist
def get_text_from_direct_child(parentnode, childname):
"""Takes a parent node element as input
Searches for the first child with the the provided name under the parent node
If child is present return the text of the child (returned text will be empty if the child has no text
Returns False if child is not found under the parent
"""
childnode = parentnode.find(childname)
if childnode is not None:
return childnode.text
else: return False
def get_child_node_list(parentnode):
"""Returns the list of children under to a provided parent node """
child_nodelist = []
for child in parentnode:
child_nodelist.append(child)
return child_nodelist
def get_attributevalue_from_directchildnode (parentnode, childname, attribute):
""" Takes a parent node element as input
Searches for the first child with the provided name under the parent node
If child is present returns the value for the requested child attribute (the returns None if
the child does not have that attributte)
Returns False if child is not found under the parent
"""
childnode = parentnode.find(childname)
if childnode is not None:
value = childnode.get(attribute)
return value
else: return False
def getNodeListbyAttribute (parentnode, childnode, attribute):
"""Traverse through all the child node from the parent node and get the attribute from each
child node and return all the attrubute as a list"""
textlist = []
for childnode in parentnode.findall(childnode):
text = childnode.get(attribute)
textlist.append(text)
return textlist
def getChildTextbyParentAttribute (datafile, pnode, patt, pattval, cnode):
"""
Seraches XML file for the parent node with a specific value. Finds the child node and returns
its text
datafile = xml file searched
pnode = parent node
patt = parent node attribute
patval = parent node attribute value
cnode = child node
"""
tree = ElementTree.parse(datafile)
root = tree.getroot()
value = False
for node in root.findall(pnode):
attribute = node.get(patt)
if attribute == pattval:
cnode = node.find(cnode)
if cnode is not None:
value = cnode.text
else:
return None
break
return value
def getChildTextbyParentTag (datafile, pnode, cnode):
"""
Seraches XML file for the first parent. Finds the child node and returns its text
datafile = xml file searched
pnode = parent node
cnode = child node
"""
value = False
tree = ElementTree.parse(datafile)
root = tree.getroot()
node = root.find(pnode)
if node is not None:
child = node.find(cnode)
if child is not None:
value = child.text
return value
else:
# print_info("could not find cnode under the given pnode")
return value
else:
# print_info("could not find pnode in the provided file")
return value
def getChildAttributebyParentTag (datafile, pnode, cnode, cattrib):
"""Find the attribute in child node by traversing through the parent node
in the given file
datafile = xml file searched
pnode = parent node
cnode = child node
cattrob = child node attrib
"""
tree = ElementTree.parse(datafile)
root = tree.getroot()
node = root.find(pnode)
if node is not None:
child = node.find(cnode)
if child is not None:
value = child.get(cattrib)
return value
else:
# print_info("could not find cnode under the given pnode")
return False
else:
# print_info("could not find pnode in the provided file")
return False
def getChildTextbyOtherChild (datafile, pnode, cnode, cvalue, rnode):
"""
Searches XML file for the parent node. Finds the 1st child node and checks its value
if value is a match, then search for second child and return its value
datafile = xml file searched
pnode = parent node
cnode = child node
cvalue = child node value
rnode = reference node or False if doesn't exist
"""
tree = ElementTree.parse(datafile)
root = tree.getroot()
rnodev = False
for node in root.findall(pnode):
value = node.find(cnode).text
if value == cvalue:
# print_debug("-D- rnode: '%s'" % rnode)
if node.find(rnode) is not None:
rnodev = node.find(rnode).text
break
return rnodev
def verifyParentandChildrenMatch (datafile, pnode, cnode, cvalue, rnode, rvalue):
"""
Searches XML file for the parent node. Finds the 1st child node and checks its value
if value is a match, then search for second child and check if its value matches
datafile = xml file searched
pnode = parent node
cnode = child node
cvalue = child node value
rnode = reference node
rvalue = refernce node value
"""
tree = ElementTree.parse(datafile)
root = tree.getroot()
status = False
for node in root.findall(pnode):
value = node.find(cnode).text
if value == cvalue:
if node.find(rnode) is not None:
cnodev = node.find(rnode).text
# print_debug("-D- cnodev: '%s', rvalue : '%s'" % (cnodev, rvalue))
if cnodev == rvalue:
# print_debug("-D- BREAK END METHOD verifyParentandChildrenMatch_Status '%s'" % status)
return True
return status
def getNodebyParentandChildrenMatch (datafile, pnode, cnode, cvalue, rnode, rvalue, lnode):
"""
Searches XML file for the parent node. Finds the 1st child node and checks its value
if value is a match, then search for second child and check if its value matches. If
that is a match, then obtain the value of the third child
datafile = xml file searched
pnode = parent node
cnode = child node
cvalue = child node value
rnode = reference node
rvalue = refernce node value
lnode = lastnode
"""
tree = ElementTree.parse(datafile)
root = tree.getroot()
lnodev = False
for node in root.findall(pnode):
value = node.find(cnode).text
if value == cvalue:
if node.find(rnode) is not None:
rnodev = node.find(rnode).text
# print_debug("rnodev : '%s'" % rnodev)
if rnodev == rvalue:
lnodev = node.find(lnode).text
break
print_debug("getNodebyParentandChildrenMatch_Status: %s" % lnodev)
return lnodev
def verifyNodesValueMatch (datafile, pnode, cnode, cvalue, rnode, rvalue, bnode, bvalue, dnode=None, dvalue=None):
"""
Searches XML file for the parent node. Finds the 1st child node and checks its value
if value is a match, then search for second child and check if its value matches
datafile = xml file searched
pnode = parent node
cnode = child node
cvalue = child node value
rnode = reference node
rvalue = refernce node value
"""
try:
tree = ElementTree.parse(datafile)
root = tree.getroot()
except Exception as e:
print_error("unexpected error %s" % str(e))
return False
else:
status = False
for node in root.findall(pnode):
value = node.find(cnode).text
if value != cvalue:
continue
if node.find(rnode) == None:
continue
rnodev = node.find(rnode).text
if rnodev != rvalue:
continue
if node.find(bnode) == None:
continue
bnodev = node.find(bnode).text
if bnodev != bvalue:
continue
if dnode == None:
status = True
# print_debug("BREAK dnode = None, status '%s'" % status)
return status
elif node.find(dnode) is not None:
dnodev = node.find(dnode).text
# print_debug("Values : dnodev :%s, dvalue : %s" % (dnodev, dvalue))
if dnodev == dvalue:
# print_debug("MATCH: dnode : %s, dvalue : %s, dnodev : %s" % (dnode, dvalue, dnodev))
status = True
# print_debug("BREAK END METHOD verifyParentandChildrenMatch_Status %s " % status)
return status
print_debug("FINAL END METHOD verifyParentandChildrenMatch_Status %s" % status)
return status
def getElementsListWithTagAttribValueMatch(datafile, tag, attrib, value):
"""
This method takes an xml document as input and finds all the sub elements (parent/children)
containing specified tag and an attribute with the specified value.
Returns a list of matching elements.
Arguments:
datafile = input xml file to be parsed.
tag = tag value of the sub-element(parent/child) to be searched for.
attrib = attribute name for the sub-element with above given tag should have.
value = attribute value that the sub-element with above given tag, attribute should have.
"""
element_list = []
root = ElementTree.parse(datafile).getroot()
for element in root.iterfind(".//%s[@%s='%s']" % (tag, attrib, value)):
element_list.append(element)
return element_list
def getElementWithTagAttribValueMatch(start, tag, attrib, value):
"""
When start is an xml datafile, it finds the root and first element with:
tag, attrib, value.
Or when it's an xml element, it finds the first child element with:
tag, attrib, value.
If there is not a match, it returns False.
"""
node = False
if isinstance(start, (IOBase, str)):
# check if file exist here
if file_Utils.fileExists(start):
node = ElementTree.parse(start).getroot()
else:
print_warning('The file={0} is not found.'.format(start))
elif isinstance(start, ElementTree.Element):
node = start
if node is not False and node is not None:
elementName = ".//%s[@%s='%s']" % (tag, attrib, value)
element = node.find(elementName)
else:
element = node
return element
def getChildElementsListWithTagAttribValueMatch(parent, tag, attrib, value):
"""
This method takes a parent element as input and finds all the sub elements (children)
containing specified tag and an attribute with the specified value.
Returns a list of child elements.
Arguments:
parent = parent element
tag = tag value of the sub-element(child) to be searched for.
attrib = attribute name for the sub-element with above given tag should have.
value = attribute value that the sub-element with above given tag, attribute should have.
"""
child_elements = parent.findall(".//%s[@%s='%s']" % (tag, attrib, value))
return child_elements
def getElementListWithSpecificXpath(datafile, xpath):
"""
This method takes an xml document as input and finds all the sub elements (parent/children)
containing specified xpath
Returns a list of matching elements.
Arguments:
parent = parent element
xpath = a valid xml path value as supported by python, refer https://docs.python.org/2/library/xml.etree.elementtree.html
"""
element_list = []
root = ElementTree.parse(datafile).getroot()
for element in root.iterfind(xpath):
element_list.append(element)
return element_list
def getElementStringWithSpecificXpath(datafile, xpath):
"""
This method takes an xml document as input and finds the first sub element (parent/children)
containing specified xpath
Returns the element as a string
Arguments:
parent = parent element
xpath = a valid xml path value as supported by python, refer https://docs.python.org/2/library/xml.etree.elementtree.html
"""
root = ElementTree.parse(datafile).getroot()
element = root.find(xpath)
ele_string = ElementTree.tostring(element)
return ele_string
def getConfigElementTextWithSpecificXpath(datafile, xpath):
"""
This method takes an xml document as input and finds the first sub element (parent/children)
containing specified xpath which should be a filepath to a netconf config file
Returns the element text attribute
Arguments:
parent = parent element
xpath = a valid xml path value as supported by python, refer https://docs.python.org/2/library/xml.etree.elementtree.html
"""
root = ElementTree.parse(datafile).getroot()
elem1 = root.find(xpath).text
elem2_root = ElementTree.parse(elem1)
elem2 = elem2_root.find('config')
elem2_string = ElementTree.tostring(elem2)
return elem2_string
def getChildElementWithSpecificXpath(start, xpath):
"""
This method takes a xml file or parent element as input and finds the first child
containing specified xpath
Returns the child element.
Arguments:
start = xml file or parent element
xpath = a valid xml path value as supported by python, refer https://docs.python.org/2/library/xml.etree.elementtree.html
"""
node = False
if isinstance(start, (IOBase, str)):
# check if file exist here
if file_Utils.fileExists(start):
node = ElementTree.parse(start).getroot()
else:
print_warning('The file={0} is not found.'.format(start))
elif isinstance(start, ElementTree.Element):
node = start
if node is not False or node is not None:
element = node.find(xpath)
else:
element = False
return element
def getChildElementsListWithSpecificXpath(parent, xpath):
"""
This method takes a parent element as input and finds all the children
containing specified xpath
Returns a list of child elements.
Arguments:
parent = parent element
xpath = a valid xml path value as supported by python, refer https://docs.python.org/2/library/xml.etree.elementtree.html
"""
child_elements = parent.findall(xpath)
return child_elements
"""Methods to create elements"""
def create_element(tagname="", text="", **kwargs):
"""create an xml element with given name and a dict of attribute"""
elem = ElementTree.Element(tagname)
for key, val in list(kwargs.items()):
elem.set(str(key), str(val))
elem.text = text
return elem
def safe_subelement(parent, tagname, text="", **kwargs):
"""
create or overwrite a child element under the parent
"""
if parent.find(tagname) is not None:
# Overwrite the child
ele = parent.find(tagname)
ele.text = text
ele.attrib = kwargs
else:
ele = ElementTree.SubElement(parent, tagname)
ele.text = text
ele.attrib = kwargs
return ele
""" Below are xml parsing methods using python's minidom module """
def get_document_root(filename):
"""Returns the root element of a xml document """
doc = minidom.parse(filename)
root = doc.documentElement
return root
def get_all_child_nodes(parent_node):
"""Takes a minidom parent node as input and returns a list of all child nodes """
child_node_list = parent_node.childNodes
return child_node_list
def get_elements_by_tagname_ignore_ns(filename, element_tag):
""""Parses an xml using minidom and gets all the elements with matching tag names in the file, ignores namespaces in the tag names """
doc = minidom.parse(filename)
element_list = doc.getElementsByTagNameNS('*', element_tag)
if len(element_list) == 0:
print_info('element with tagname "%s" not found in file' % element_tag)
return False
return element_list
#2015/12/09 ymizugaki add begin
def getValuebyTagFromResponse (response, tag):
"""Given a xml response object, returns the value for a particular tag"""
doc = minidom.parseString(response)
item = doc.getElementsByTagName(tag)
if len(item) != 0:
itemlist = doc.getElementsByTagName(tag)[len(item)-1].toxml()
else:
itemlist = ""
if itemlist == "<" + tag + "/>":
itemvalue = ""
else:
itemvalue = itemlist.replace('<' + tag + '>','').replace('</' + tag + '>', '')
return itemvalue
def get_last_level_children(node, tag):
"""Find and return the last level children"""
ele = None
for sub in node.iter(tag):
ele = sub
return ele
#2015/12/09 ymizugaki add end
def get_element_by_attribute(xml_file, tag_name, attr_name, attr_value):
"""
Gets the element with matching tag_name, attribute name and attribute value
"""
element= ""
doc = minidom.parse(xml_file)
element_list = doc.getElementsByTagName(tag_name)
found = "No"
for element in element_list:
if element.getAttribute(attr_name) == attr_value:
found = "Yes"
break
if found == "Yes":
return element
else:
return False
def get_child_with_matching_tag(parent, tag_name):
""" Find whether Child node with tag = tag_name exists, if exists then return the child node"""
child_node = ""
try:
child_node = parent.getElementsByTagName(tag_name)[0]
except Exception as exception:
child_node = ""
return child_node
def convert_dom_to_string(element):
"""
Converts a dom element into a string
"""
domstring = ""
try:
domstring = str(element.toxml())
except Exception as exception:
print_exception(exception)
return domstring
def get_child_with_matching_tags(parent, tag_name):
"""
Gets the list of elements with matching tag_name
"""
child_node = ""
try:
child_node = parent.getElementsByTagName(tag_name)
except Exception as exception:
child_node = ""
return child_node
def removenms(xml):
"""
It accepts the xml file path or string as input and remove
the name spaces.
Arguments:
xml: An input xml file path or xml string in which the name spaces
needs to be removed
Returns:
xml_string
"""
if os.path.exists(xml):
parser = etree.XMLParser(remove_blank_text=True, remove_comments=True)
tree = etree.parse(xml, parser)
root = tree.getroot()
else:
root = ElementTree.fromstring(xml)
for elem in list(root):
if not hasattr(elem.tag, 'find'):
continue
i = elem.tag.find('}')
if i >= 0:
elem.tag = elem.tag[i+1:]
xml_string = ElementTree.tostring(root, encoding='utf-8', method='xml')
return xml_string
def recursive_delete_among_children(root, element):
"""
It performs a recursive operation among the children in the xml file
and finds the child that matches with the element and deletes it.
Arguments:
1.parent: It takes the root element in the xml file as input
2.element: It is the tag which we want to delete
Returns:
True
"""
childs = root.getchildren()
for child in childs:
if child in element:
childs.remove(child)
else:
recursive_delete_among_children(child, element)
return True
def del_tag_from_element(ele, tag):
"""
Delete a subelement with specific tag from an xml element object
return the deleted subelement if pass
return False if subelement not found
"""
if ele.find(tag) is not None:
ele.remove(ele.find(tag))
return ele
else:
print_warning("cannot found {0} in element".format(str(tag)))
return False
def del_tags_from_xml(xml, tag_list=[]):
"""
It deletes the tags either by their names or xpath
Arguments:
1.xml: It takes xml file path or xml string as input
2.tag_list: It contains list of tags which needs to be removed
Returns:
It returns xml string
"""
if os.path.exists(xml):
tree = ElementTree.parse(xml)
root = tree.getroot()
else:
root = ElementTree.fromstring(xml)
for tag in tag_list:
if 'xpath=' in tag:
tag = tag.strip('xpath=')
req_tags = getChildElementsListWithSpecificXpath(root, tag)
else:
req_tags = getChildElementsListWithSpecificXpath(root, ".//{0}".format(tag))
recursive_delete_among_children(root, req_tags)
xml_string = ElementTree.tostring(root, encoding='utf-8', method='xml')
return xml_string
def del_attributes_from_xml(xml, attrib_list=[]):
"""
It deletes the attributes either by their names or xpath
Arguments:
1.xml: It takes xml file path or xml string as input
2.attrib_list: It contains list of attributes
which needs to be removed
Returns:
It returns xml string
"""
if os.path.exists(xml):
tree = ElementTree.parse(xml)
root = tree.getroot()
else:
root = ElementTree.fromstring(xml)
for attr in attrib_list:
if 'xpath=' in attr:
attr = attr.strip('xpath=')
req_tags = getChildElementsListWithSpecificXpath(root, attr)
else:
req_tags = getChildElementsListWithSpecificXpath(root, ".//*[@{0}]".format(attr))
for ele in req_tags:
ele.attrib.pop(attr)
xml_string = ElementTree.tostring(root, encoding='utf-8', method='xml')
return xml_string
def compare_xml(xml1, xml2, output_file=False, sorted_json=True,
remove_namespaces=False, tag_list=[], attrib_list=[]):
"""
This will compare two xml files or strings by converting to json
and then sorting and by default giving the sorted files
and writing the difference to a diff file.
Arguments:
1. xml1 : The first xml among the two xml's which
needs to be compared
2. xml2 : The second xml among the two xml's
which needs to be compared
3. output_file : It contains the difference between
the two sorted json objects
4. sorted_json : By default we are returning the sorted json
files and if the user selects sorted_json as False,
not returning the fileS
5. remove_namespaces: If the user specifies remove_namespaces
as True will remove namespaces and then compare xml's
6. tag_list: If user specifies tag names in tag_list,
will remove those tags and then compare xml's
7. attrib_list: If user specifies attribute names in the
attrib_list, will remove those attributes and then compare xml's
Returns:
Returns a tuple that contains
comparison status
two sorted json files or two None depends on sorted_json value
output file path or diff_output depends on output_file value
"""
try:
from warrior.Framework.ClassUtils.json_utils_class import JsonUtils
if remove_namespaces:
xml1 = removenms(xml1)
xml2 = removenms(xml2)
xml1 = del_tags_from_xml(xml1, tag_list)
xml2 = del_tags_from_xml(xml2, tag_list)
xml1 = del_attributes_from_xml(xml1, attrib_list)
xml2 = del_attributes_from_xml(xml2, attrib_list)
output1 = json.loads(json.dumps(
(xmltodict.parse(xml1, xml_attribs=True))))
output2 = json.loads(json.dumps(
(xmltodict.parse(xml2, xml_attribs=True))))
sorted_json1 = JsonUtils().sort_json_object(output1)
sorted_json2 = JsonUtils().sort_json_object(output2)
json_obj1 = json.dumps(sorted_json1, indent=4, separators=(',', ':'))
json_obj2 = json.dumps(sorted_json2, indent=4, separators=(',', ':'))
if sorted_json:
sorted_file1 = "sorted_file1.json"
sorted_file2 = "sorted_file2.json"
sorted_file1 = file_Utils.addTimeDate(sorted_file1)
sorted_file2 = file_Utils.addTimeDate(sorted_file2)
f = open(sorted_file1, 'w')
f1 = open(sorted_file2, 'w')
f.write(json_obj1)
f1.write(json_obj2)
f.close()
f1.close()
else:
sorted_file1 = None
sorted_file2 = None
if output1 == output2:
return True, sorted_file1, sorted_file2, None
diff = ("\n".join(
difflib.ndiff(json_obj1.splitlines(), json_obj2.splitlines())))
if output_file:
output_file = file_Utils.addTimeDate(output_file)
te = open(output_file, 'w')
te.write(diff)
te.close()
else:
output_file = diff
return False, sorted_file1, sorted_file2, output_file
except Exception as exception:
print_exception(exception)
return False, None, None, output_file
def get_children_as_dict(parent):
"""For a given parent object, return all children as a dictionary with the childs tag as key"""
child_list = getChildElementsListWithSpecificXpath(parent, "*")
child_dict = {}
for child in child_list:
value = get_children_as_dict(child)
if child.tag not in child_dict:
child_dict[child.tag] = [value] if value != {} else [child.text]
else:
child_dict[child.tag].append(value if value != {} else child.text)
return child_dict
def convert_xml_to_list_of_dict(file_name):
"""
Takes xml file path as input and
converts to list of dictionaries
Arguments:
file_name : It takes xml file path as input
Returns:
list_of_dict: list of dictionaries where keys
are tag names and values are respective text of the tag.
"""
tree = ElementTree.parse(file_name)
root = tree.getroot()
list_of_dict = []
for child in root:
subchild_dict = OrderedDict()
for subchild in child:
subchild_dict[subchild.tag] = subchild.text
list_of_dict.append(subchild_dict)
return list_of_dict
#2016/06/22 ymizugaki add begin
def getValuebyTagFromStringWithXpath(response, xpathString, ns):
"""Given a response object, return the value from the xpath and namespace combination"""
xml = etree.fromstring(response)
item = xml.xpath(xpathString, namespaces=ns)
if len(item) != 0:
itemValue = item[0]
else:
itemValue = ""
return itemValue
def getValueListbyTagFromString(response, tag):
"""Given a response object, return the list of value tag present in the object"""
doc = minidom.parseString(response)
item = doc.getElementsByTagName(tag)
itemlist = []
if len(item) != 0:
for i in item:
temp = i.toxml()
temp = temp.replace("<" + tag + ">", "").replace("</" + tag + ">", "")
itemlist.append(temp)
return itemlist
#2016/06/22 ymizugaki add end
def compare_xml_using_xpath(response, list_of_xpath, list_of_expected_api_responses):
"""
Will get each xpath in list of xpath and get the value of
that xpath in xml response
Compares the value with the expected_api_response
If all values matches returns True else False
"""
status = True
if len(list_of_xpath) != len(list_of_expected_api_responses):
print_error("The number of xpath given is different"
"from the number of expected response"
"Please check the value"
"\nlist_of_xpath: {}"
"\nlist_of_expected_response: {}".format(
list_of_xpath, list_of_expected_api_responses))
status = False
if status:
for index, xpath_pattern in enumerate(list_of_xpath):
xpath = xpath_pattern.strip("xpath=")
value = getValuebyTagFromStringWithXpath(response, xpath, None)
# Equality_match: Check if the expected response is equal to API response
match = True if value == list_of_expected_api_responses[index] else False
# Perform Regex_search if equality match fails
if match is False:
try:
# Regex_search: Check if the expected response pattern is in API response
match = re.search(list_of_expected_api_responses[index], value)
except Exception:
print_warning("Python regex search failed, invalid "
"expected_response_pattern '{}' is "
"provided".format(list_of_expected_api_responses[index]))
if not match:
status = False
print_error("For the given '{0}' the expected response value is '{1}'. "
"It doesn't match or available in the actual response value "
"'{2}'".format(xpath_pattern, list_of_expected_api_responses[index],
value))
return status
def list_path_responses_datafile(datafile, system_name):
"""
Returns the path_list and responses_list
path_list contains list of response_path tags under the comparison_mode
under the system with given system_name in the datafile
responses_list contains list of response_value tags under the
expected_api_response under the system with given system_name in datafile
"""
path_element_list = getElementListWithSpecificXpath(datafile,
"./*[@name='"+system_name+"']/comparison_mode/*")
path_list = [x.text for x in path_element_list]
resp_element_list = getElementListWithSpecificXpath(datafile,
"./*[@name='"+system_name+"']/expected_api_response/*")
responses_list = [x.text for x in resp_element_list]
return path_list, responses_list
|
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "osTest.h"
#include "It_los_event.h"
static VOID TaskF01(VOID)
{
UINT32 ret;
g_testCount++;
ret = LOS_EventRead(&g_pevent, 0x11, LOS_WAITMODE_AND, 3); // 3, The timeout period for reading events.
ICUNIT_GOTO_EQUAL(ret, 0, ret, EXIT);
ICUNIT_GOTO_EQUAL(g_pevent.uwEventID, 0, g_pevent.uwEventID, EXIT);
g_testCount++;
EXIT:
LOS_TaskDelete(g_testTaskID01);
}
static UINT32 Testcase(VOID)
{
UINT32 ret;
TSK_INIT_PARAM_S task1 = { 0 };
LOS_EventInit(&g_pevent);
(void)memset_s(&task1, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
task1.pfnTaskEntry = (TSK_ENTRY_FUNC)TaskF01;
task1.pcName = "EventTsk39";
task1.uwStackSize = TASK_STACK_SIZE_TEST;
task1.usTaskPrio = TASK_PRIO_TEST - 2; // 2, set new task priority, it is higher than the test task.
task1.uwResved = LOS_TASK_STATUS_DETACHED;
g_testCount = 0;
LOS_TaskLock();
ret = LOS_TaskCreate(&g_testTaskID01, &task1);
ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
ICUNIT_GOTO_EQUAL(g_testCount, 0, g_testCount, EXIT);
ret = LOS_EventWrite(&g_pevent, 0x11);
ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
ICUNIT_GOTO_EQUAL(g_testCount, 0, g_testCount, EXIT);
ret = LOS_EventClear(&g_pevent, 0);
ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
LOS_TaskUnlock();
LOS_TaskDelay(1);
ICUNIT_GOTO_EQUAL(g_testCount, 1, g_testCount, EXIT);
LOS_TaskLock();
ret = LOS_EventWrite(&g_pevent, 0x11);
ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
ret = LOS_EventClear(&g_pevent, 0);
ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
LOS_TaskUnlock();
ICUNIT_GOTO_EQUAL(g_testCount, 2, g_testCount, EXIT); // 2, Here, assert that g_testCount is equal to 2.
EXIT:
LOS_TaskDelete(g_testTaskID01);
return LOS_OK;
}
VOID ItLosEvent039(VOID) // IT_Layer_ModuleORFeature_No
{
TEST_ADD_CASE("ItLosEvent039", Testcase, TEST_LOS, TEST_EVENT, TEST_LEVEL0, TEST_FUNCTION);
}
|
"""
Workflow Job
------------------
The Query class represents a workflow job model from the RESTFul Workflow Service API
"""
import configparser
import json
import datetime
import os
import botocore.session
import dateutil
import time
import logging
from typing import Callable, Union, Optional, Dict, List
from . import RUNNING_STATUSES, FINISHED_STATUSES, FAILED_STATUSES
from .exceptions import JobError
from ...exceptions import ServerError
from ...session import ServiceSession
log = logging.getLogger(__name__)
class WorkflowJob:
"""
Proxy object for a serverside workflow job.
This object can be queried for the current status of a workflow job and will
automatically refresh from server until the job is finished (or failed)
"""
def __init__(self, session: ServiceSession, job_id: int, job: Dict):
self.session = session
self.job = job
self.job_id = self.job["job_id"]
self.links = self.job["links"]
def __repr__(self) -> str:
return json.dumps(self.job)
@property
def duration(self) -> str:
"""
Stringified duration of job for human consumption
"""
if self.complete_date:
complete_date = self.complete_date
elif self.status in RUNNING_STATUSES:
complete_date = datetime.datetime.utcnow()
elif self.status_date:
complete_date = self.status_date
else:
return "-"
ret = complete_date - self.submit_date
# remove microseconds since no one wants them
ret = ret - datetime.timedelta(microseconds=ret.microseconds)
return ret
@property
def finished(self) -> bool:
"""
Has the job finished (might be failed)
"""
if self.status in RUNNING_STATUSES:
self.refresh()
return self.status in FINISHED_STATUSES
@property
def running(self) -> bool:
"""
Is the job currently running
"""
if self.status in RUNNING_STATUSES:
self.refresh()
return self.status in RUNNING_STATUSES
@property
def failed(self) -> bool:
"""
Is the job in a failed state
"""
if self.status in RUNNING_STATUSES:
self.refresh()
return self.status in FAILED_STATUSES
@property
def done(self) -> bool:
"""
Has the job completed successfully
"""
if self.status in RUNNING_STATUSES:
self.refresh()
return self.status == "COMPLETED"
def refresh(self) -> None:
"""
Refresh the local cache of the serverside job object
"""
self.job = self.session.get(self.links["self"]).json()
def resume(self) -> None:
"""
Rerun a job that has previously failed
"""
_ = self.session.put(self.links["self"])
self.refresh()
def wait(self, max_seconds: Optional[int] = None, poll_period: float = 0.5):
"""
Wait for a running job to complete.
This is similar to the wait method in the Query Service and will wait by default
indefinitely for a workflow job to complete and poll the server regularly to update
the local status. When the job is completed (or failed) the method will return the
workflow job object.
:param max_seconds: raise an exception if the job runs longer than this
:param poll_period: Number of seconds to wait between polling (max 10 seconds)
:returns: WorkflowJob
:raises: :exc:`JobError`
"""
if not self.running:
return self
log.info("Waiting for job %s to complete...", self.job_id)
start_time = time.time()
duration = 0.0
period = poll_period
is_running = self.running
while is_running:
time.sleep(period)
duration = time.time() - start_time
is_running = self.running
# cancel the wait if the executor pod is in trouble after 30 seconds of waiting to start.
# it most likely means that the nextflow script has a syntax error or something.
if self.status == "PENDING" and (
max_seconds and duration > max_seconds or duration > 30.0
):
log.info("Job has been PENDING for %.0fsec. Inspecting it...", duration)
curr_status = self.inspect()
executor_pod = None
for pod in curr_status["pods"]:
if pod["metadata"]["labels"]["app-name"] == "nextflow-executor":
executor_pod = pod
break
if not executor_pod:
log.warning(
"Job is still pending after %.0fsec and executor pod is missing",
duration,
)
raise JobError(
f"Job is still pending after {duration:.0f}s and executor pod is missing. View logs or inspect job for failures."
)
if not executor_pod["status"]["container_statuses"][0]["state"][
"running"
]:
log.warning(
"Job is still pending after %.0fsec and executor pod is not in running state",
duration,
)
raise JobError(
f"Job is still pending after {duration:.0f}s and executor pod is not in running state. View logs or inspect job for failures."
)
if is_running and max_seconds and duration > max_seconds:
raise JobError(
f"Job {self.job_id} has exceeded wait time {max_seconds}s and we will not wait any longer. It is currently {self.status}."
)
period = min(period + 0.5, 10.0)
if self.status == "DONE":
log.info(
"Job %s completed in %.2f sec and returned %s rows",
self.job_id,
duration,
self.line_count,
)
else:
log.info(
"Job %s has status %s after %.2f sec",
self.job_id,
self.status,
duration,
)
return self
def cancel(
self, status: Optional[str] = None, status_message: Optional[str] = None
) -> Optional[str]:
"""
Cancel a running job
:param status: status of the cancelled job. Defaults to CANCELLED
:returns: Status message from the server
"""
data: Dict = {}
data["status"] = status or "CANCELLED"
data["status_message"] = status_message
resp = self.session.delete(self.links["self"], json=data)
status_message = resp.json()["status_message"]
return status_message
def inspect(self) -> Dict:
"""
Inspect a failed job for debugging.
Returns unfiltered pod and node information from the kubernetes system
:returns: Dictionary containing low-level debugging information
:raises: :exc:`ServerError` if the server is not configured for inspection capabilities
"""
try:
url = self.links["inspect"]
except KeyError:
raise ServerError("Server does not support inspect functionality")
resp = self.session.get(url)
return resp.json()
def cost(self, recalculate: bool = False) -> Dict:
"""
Get the estimates cost of this job.
:param recalculate: If set to True then the cost will be recalculated.
"""
url = self.links["cost"]
data = {}
if recalculate:
data["recalculate"] = "true"
resp = self.session.get(url, json=data)
return resp.json()
def processes(
self,
process_id: Optional[int] = None,
is_all: bool = False,
limit: int = 50,
status: Optional[str] = None,
) -> List[Dict]:
"""
Get a list of all nextflow processes in this job
:param process_id: process_id to show
:param is_all: Show all processes, otherwise show only running processes
:param limit: Maximum number of processes to return
:param status: Filter processes by status
"""
url = self.links["processes"]
if process_id:
url += "/%s" % process_id
resp = self.session.get(url)
return [resp.json()]
else:
data: Dict = {"limit": limit}
if is_all:
data["all"] = 1
if status:
data["status"] = status
resp = self.session.get(url, json=data)
return resp.json()["processes"]
def events(self, limit: int = 50) -> List[Dict]:
"""
Get a list of events reported by Nextflow for this job
:param limit: Maximum number of events to return
"""
url = self.links["events"]
data = {"limit": limit}
resp = self.session.get(url, json=data)
return resp.json()["events"]
def log_groups(self) -> Dict:
"""
Get available log groups
:returns: Dictionary with log group name and server url to the log group
"""
logs_url = self.links["logs"]
resp = self.session.get(logs_url)
return resp.json()["links"]
def logs(self, log_group: str = "pod", log_filter: Optional[str] = None) -> str:
"""
Get text logs for the specified log group
:param log_group: Name of the log group to view
:param log_filter: Optional filter to apply to the logs
:raises: :exc:`ServerError` If the log group is not available
"""
groups = self.log_groups()
url = None
for k, v in groups.items():
if k.startswith(log_group):
url = v
break
if not url:
raise ServerError(f"Log Group '{log_group}' is not available.")
if log_filter:
url += "?filter=%s" % log_filter
logs = self.session.get(url).text
return logs
def __getattr__(self, name):
try:
val = self.job[name]
except KeyError:
raise AttributeError
if name.endswith("_date") and val:
val = dateutil.parser.parse(val)
return val
|
import React, { Component } from 'react';
import github from '../github-logo.png';
import linkedin from '../linkedin-icon.png';
import './contact.css'
class Contact extends Component {
constructor(props) {
super(props);
this.state = {
user_name: "",
user_email: "",
subject: "",
message: "",
toggleModal:false
};
}
handleChanges=(e)=>{
this.setState({[e.target.name]:e.target.value})
}
resetForm = (e) => {
this.setState({
user_name: "",
user_email: "",
subject: "",
message: ""
})
}
handleToggle = () => {
this.setState({toggleModal:!this.state.toggleModal})
setTimeout(this.resetForm, 2000);
};
render() {
return (
<div className= 'contact-container' Id="contact" ref={this.props.contactRef}>
<h4 style= {{color:"white"}}>Send me an email!</h4>
<h5>Have me make something for you or request my resume</h5>
<div className={
this.state.toggleModal===true
? 'modal-display'
: 'modal-hidden'
}>
<h4>Message Sent! I will reply to you soon.</h4>
<h5>Thank you</h5>
<button className="modal-close" onClick={this.handleToggle}>Close</button>
</div>
<form className="contact-form" id="contact-form">
<div className="form-top">
<input
type="hidden"
name="contact_number"
/>
<input
type="text"
placeholder=" Name"
name="user_name"
className="name-input"
value={this.state.user_name}
onChange={this.handleChanges}
/>
<input
type="email"
placeholder=" Email"
name="user_email"
className="email-input"
value={this.state.user_email}
onChange={this.handleChanges}
/>
</div>
<input
type="text"
placeholder=" Subject"
name="subject"
className="subject-input"
value={this.state.subject}
onChange={this.handleChanges}
/>
<textarea
type="text"
name="message"
placeholder=" Message"
className="message-input"
value={this.state.message}
onChange={this.handleChanges}
/>
<div className="form-bottom">
<button onClick={this.resetForm} className="clear-button"> Clear Form</button>
<input
type="submit"
value="Send"
className="submit-button"
onClick={this.handleToggle}
/>
</div>
</form>
<div className="line-break">
<h4 style= {{color:"white"}}>Find me on...</h4>
<div className='links'>
<a href= "https://github.com/RockmanExe" target="_blank" rel="noopener noreferrer">
<img className= 'github-logo' src= {github} alt="github" />
</a>
<a href="https://www.linkedin.com/in/adalberto-may-387103113/" target="_blank" rel="noopener noreferrer" >
<img className= 'linkedin-icon' src= {linkedin} alt="linkedin" />
</a>
</div>
</div>
</div>
)
}
};
export default Contact;
|
"""
Low level access. Makes no effort to parse results.
"""
import os
import pickle # nosec
from typing import Any, Dict, Tuple, cast
from diskcache import Cache
from stackapi import StackAPI
from so_pip import settings as settings
if settings.OUTPUT_FOLDER == "":
raise TypeError("Loaded this module too early.")
# TODO: consider moving to JsonDisk from same library
# not sure if that means python in, json out, so may have to change all client code
_CACHE = Cache(directory=settings.OUTPUT_FOLDER + f"/cache{pickle.DEFAULT_PROTOCOL}/")
if os.environ.get("key", None):
SITE = StackAPI("stackoverflow", key=os.environ["key"])
else:
SITE = StackAPI("stackoverflow")
CACHE_SECONDS = 86400
# @lru_cache(maxsize=1000)
@_CACHE.memoize(expire=CACHE_SECONDS)
def get_json_by_search(query: str, tagged: Tuple[str, ...]) -> Dict[str, Any]:
"""Low level access, returns unprocessed json"""
return cast(
Dict[str, Any],
SITE.fetch(
"search?tagged={tagged}&intitle={intitle}",
tagged=[";".join(tagged)],
intitle=[query],
),
)
def get_json_by_advanced_search(
query: str, tagged: Tuple[str, ...], page: int, minimal: bool = True
) -> Dict[str, Any]:
"""Low level access, returns unprocessed json
example:
/2.2/search/advanced?order=desc&sort=activity&answers=1&body=def&tagged=python&site=stackoverflow
"""
pager = StackAPI("stackoverflow", key=os.environ["key"], max_pages=5000)
# filter = "&filter=!)5IW-1CBJh7IUcXv2R9eY(KE__tA" if minimal else ""
return cast(
Dict[str, Any],
pager.fetch(
"search/advanced?order=desc&sort={sort}&answers={answers}&body={body}&tagged={tagged}",
page=page,
sort=["votes"],
# order=["desc"],
answers=[1],
body=[query],
tagged=[";".join(tagged)],
filter="!BHMIb2uw8ZCNzk.BY)VCLpavh_59fq" if minimal else None
# site="stackoverflow"
),
)
# @lru_cache(maxsize=1000)
@_CACHE.memoize(expire=CACHE_SECONDS)
def get_json_by_question_id(question_id: int) -> Dict[str, Any]:
"""Low level access, returns unprocessed json"""
return cast(
Dict[str, Any],
SITE.fetch(
"questions/{ids}",
ids=[
question_id,
],
# magic string to return more fields
# !9_bDDx5Ia
# answers
# comments
# body
# body_markdown
# tags
filter="!--1nZwHGSSZl",
),
)
# @lru_cache(maxsize=1000)
@_CACHE.memoize(expire=CACHE_SECONDS)
def get_json_by_answer_id(answer_id: int) -> Dict[str, Any]:
"""Low level access, returns unprocessed json"""
return cast(
Dict[str, Any],
SITE.fetch(
"answers/{ids}",
ids=[
answer_id,
],
# magic string to include certain fields.
# need:
# link
# body_markdown
# body
# comments
# tags (of q)
# title (of q)
filter="!3zl2.DbpKHRASLD)i",
),
)
# @lru_cache(maxsize=1000)
@_CACHE.memoize(expire=CACHE_SECONDS)
def get_json_by_user_id(user_id: int) -> Dict[str, Any]:
"""Low level access, returns unprocessed json"""
return cast(
Dict[str, Any],
SITE.fetch(
"users/{ids}",
ids=[
user_id,
],
# just stuff we care about + about_me
filter="!)si8a_4RZpJGdK21mxCq",
),
)
# /2.2/posts/26344315/revisions?site=stackoverflow
# @lru_cache(maxsize=1000)
@_CACHE.memoize(expire=CACHE_SECONDS)
def get_json_revisions_by_post_id(post_id: int) -> Dict[str, Any]:
"""
Low level access, returns unprocessed json
A post id is a question OR an answer id!
"""
return cast(
Dict[str, Any],
SITE.fetch(
"posts/{ids}/revisions",
ids=[
post_id,
],
filter="!9_bDDm2Bc",
),
)
# @lru_cache(maxsize=1000)
@_CACHE.memoize(expire=CACHE_SECONDS)
def get_json_comments_by_post_id(post_id: int) -> Dict[str, Any]:
"""Low level access, returns unprocessed json"""
return cast(
Dict[str, Any],
SITE.fetch(
"posts/{ids}/comments",
ids=[
post_id,
],
# need comment body & body_md, link
filter="!--1nZxT00Un.",
),
)
# @lru_cache(maxsize=1000)
@_CACHE.memoize(expire=CACHE_SECONDS)
def get_json_related_tags(tag: str) -> Dict[str, Any]:
"""Low level access, returns unprocessed json"""
return cast(
Dict[str, Any],
SITE.fetch(
"tags/{ids}/related",
ids=[
tag,
],
),
)
|
#!/usr/bin/env python
import os
import sys
import pymysql
pymysql.install_as_MySQLdb()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.config.development")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
/*
* Copyright (C) 2005, 2006 IBM Corporation
* Copyright (C) 2014, 2015 Intel Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
* Specifications at www.trustedcomputinggroup.org
*
* This device driver implements the TPM interface as defined in
* the TCG TPM Interface Spec version 1.2, revision 1.0.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
#include <acpi/actbl2.h>
#include "tpm.h"
enum tis_access {
TPM_ACCESS_VALID = 0x80,
TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
TPM_ACCESS_REQUEST_PENDING = 0x04,
TPM_ACCESS_REQUEST_USE = 0x02,
};
enum tis_status {
TPM_STS_VALID = 0x80,
TPM_STS_COMMAND_READY = 0x40,
TPM_STS_GO = 0x20,
TPM_STS_DATA_AVAIL = 0x10,
TPM_STS_DATA_EXPECT = 0x08,
};
enum tis_int_flags {
TPM_GLOBAL_INT_ENABLE = 0x80000000,
TPM_INTF_BURST_COUNT_STATIC = 0x100,
TPM_INTF_CMD_READY_INT = 0x080,
TPM_INTF_INT_EDGE_FALLING = 0x040,
TPM_INTF_INT_EDGE_RISING = 0x020,
TPM_INTF_INT_LEVEL_LOW = 0x010,
TPM_INTF_INT_LEVEL_HIGH = 0x008,
TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
TPM_INTF_STS_VALID_INT = 0x002,
TPM_INTF_DATA_AVAIL_INT = 0x001,
};
enum tis_defaults {
TIS_MEM_BASE = 0xFED40000,
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
};
struct tpm_info {
unsigned long start;
unsigned long len;
unsigned int irq;
};
static struct tpm_info tis_default_info = {
.start = TIS_MEM_BASE,
.len = TIS_MEM_LEN,
.irq = 0,
};
/* Some timeout values are needed before it is known whether the chip is
* TPM 1.0 or TPM 2.0.
*/
#define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
#define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
#define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
#define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
#define TPM_STS(l) (0x0018 | ((l) << 12))
#define TPM_STS3(l) (0x001b | ((l) << 12))
#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))
struct priv_data {
bool irq_tested;
};
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int has_hid(struct acpi_device *dev, const char *hid)
{
struct acpi_hardware_id *id;
list_for_each_entry(id, &dev->pnp.ids, list)
if (!strcmp(hid, id->id))
return 1;
return 0;
}
static inline int is_itpm(struct acpi_device *dev)
{
return has_hid(dev, "INTC0102");
}
static inline int is_fifo(struct acpi_device *dev)
{
struct acpi_table_tpm2 *tbl;
acpi_status st;
/* TPM 1.2 FIFO */
if (!has_hid(dev, "MSFT0101"))
return 1;
st = acpi_get_table(ACPI_SIG_TPM2, 1,
(struct acpi_table_header **) &tbl);
if (ACPI_FAILURE(st)) {
dev_err(&dev->dev, "failed to get TPM2 ACPI table\n");
return 0;
}
if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO)
return 0;
/* TPM 2.0 FIFO */
return 1;
}
#else
static inline int is_itpm(struct acpi_device *dev)
{
return 0;
}
static inline int is_fifo(struct acpi_device *dev)
{
return 1;
}
#endif
/* Before we attempt to access the TPM we must see that the valid bit is set.
* The specification says that this bit is 0 at reset and remains 0 until the
* 'TPM has gone through its self test and initialization and has established
* correct values in the other bits.' */
static int wait_startup(struct tpm_chip *chip, int l)
{
unsigned long stop = jiffies + chip->vendor.timeout_a;
do {
if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
TPM_ACCESS_VALID)
return 0;
msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -1;
}
static int check_locality(struct tpm_chip *chip, int l)
{
if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
return chip->vendor.locality = l;
return -1;
}
static void release_locality(struct tpm_chip *chip, int l, int force)
{
if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
(TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
(TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
chip->vendor.iobase + TPM_ACCESS(l));
}
static int request_locality(struct tpm_chip *chip, int l)
{
unsigned long stop, timeout;
long rc;
if (check_locality(chip, l) >= 0)
return l;
iowrite8(TPM_ACCESS_REQUEST_USE,
chip->vendor.iobase + TPM_ACCESS(l));
stop = jiffies + chip->vendor.timeout_a;
if (chip->vendor.irq) {
again:
timeout = stop - jiffies;
if ((long)timeout <= 0)
return -1;
rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
(check_locality
(chip, l) >= 0),
timeout);
if (rc > 0)
return l;
if (rc == -ERESTARTSYS && freezing(current)) {
clear_thread_flag(TIF_SIGPENDING);
goto again;
}
} else {
/* wait for burstcount */
do {
if (check_locality(chip, l) >= 0)
return l;
msleep(TPM_TIMEOUT);
}
while (time_before(jiffies, stop));
}
return -1;
}
static u8 tpm_tis_status(struct tpm_chip *chip)
{
return ioread8(chip->vendor.iobase +
TPM_STS(chip->vendor.locality));
}
static void tpm_tis_ready(struct tpm_chip *chip)
{
/* this causes the current command to be aborted */
iowrite8(TPM_STS_COMMAND_READY,
chip->vendor.iobase + TPM_STS(chip->vendor.locality));
}
static int get_burstcount(struct tpm_chip *chip)
{
unsigned long stop;
int burstcnt;
/* wait for burstcount */
/* which timeout value, spec has 2 answers (c & d) */
stop = jiffies + chip->vendor.timeout_d;
do {
burstcnt = ioread8(chip->vendor.iobase +
TPM_STS(chip->vendor.locality) + 1);
burstcnt += ioread8(chip->vendor.iobase +
TPM_STS(chip->vendor.locality) +
2) << 8;
if (burstcnt)
return burstcnt;
msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -EBUSY;
}
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0, burstcnt;
while (size < count &&
wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->vendor.timeout_c,
&chip->vendor.read_queue, true)
== 0) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && size < count; burstcnt--)
buf[size++] = ioread8(chip->vendor.iobase +
TPM_DATA_FIFO(chip->vendor.
locality));
}
return size;
}
static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
int expected, status;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
/* read first 10 bytes, including tag, paramsize, and result */
if ((size =
recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
dev_err(chip->pdev, "Unable to read header\n");
goto out;
}
expected = be32_to_cpu(*(__be32 *) (buf + 2));
if (expected > count) {
size = -EIO;
goto out;
}
if ((size +=
recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE)) < expected) {
dev_err(chip->pdev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->pdev, "Error left over data\n");
size = -EIO;
goto out;
}
out:
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return size;
}
static bool itpm;
module_param(itpm, bool, 0444);
MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc, status, burstcnt;
size_t count = 0;
if (request_locality(chip, 0) < 0)
return -EBUSY;
status = tpm_tis_status(chip);
if ((status & TPM_STS_COMMAND_READY) == 0) {
tpm_tis_ready(chip);
if (wait_for_tpm_stat
(chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
&chip->vendor.int_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
}
while (count < len - 1) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && count < len - 1; burstcnt--) {
iowrite8(buf[count], chip->vendor.iobase +
TPM_DATA_FIFO(chip->vendor.locality));
count++;
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
goto out_err;
}
}
/* write last byte */
iowrite8(buf[count],
chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if ((status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
goto out_err;
}
return 0;
out_err:
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return rc;
}
static void disable_interrupts(struct tpm_chip *chip)
{
u32 intmask;
intmask =
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
intmask &= ~TPM_GLOBAL_INT_ENABLE;
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
free_irq(chip->vendor.irq, chip);
chip->vendor.irq = 0;
}
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc;
u32 ordinal;
unsigned long dur;
rc = tpm_tis_send_data(chip, buf, len);
if (rc < 0)
return rc;
/* go and do it */
iowrite8(TPM_STS_GO,
chip->vendor.iobase + TPM_STS(chip->vendor.locality));
if (chip->vendor.irq) {
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (chip->flags & TPM_CHIP_FLAG_TPM2)
dur = tpm2_calc_ordinal_duration(chip, ordinal);
else
dur = tpm_calc_ordinal_duration(chip, ordinal);
if (wait_for_tpm_stat
(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
&chip->vendor.read_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
}
return len;
out_err:
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return rc;
}
static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc, irq;
struct priv_data *priv = chip->vendor.priv;
if (!chip->vendor.irq || priv->irq_tested)
return tpm_tis_send_main(chip, buf, len);
/* Verify receipt of the expected IRQ */
irq = chip->vendor.irq;
chip->vendor.irq = 0;
rc = tpm_tis_send_main(chip, buf, len);
chip->vendor.irq = irq;
if (!priv->irq_tested)
msleep(1);
if (!priv->irq_tested) {
disable_interrupts(chip);
dev_err(chip->pdev,
FW_BUG "TPM interrupt not working, polling instead\n");
}
priv->irq_tested = true;
return rc;
}
struct tis_vendor_timeout_override {
u32 did_vid;
unsigned long timeout_us[4];
};
static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
/* Atmel 3204 */
{ 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
(TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
};
static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
unsigned long *timeout_cap)
{
int i;
u32 did_vid;
did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
if (vendor_timeout_overrides[i].did_vid != did_vid)
continue;
memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
sizeof(vendor_timeout_overrides[i].timeout_us));
return true;
}
return false;
}
/*
* Early probing for iTPM with STS_DATA_EXPECT flaw.
* Try sending command without itpm flag set and if that
* fails, repeat with itpm flag set.
*/
static int probe_itpm(struct tpm_chip *chip)
{
int rc = 0;
u8 cmd_getticks[] = {
0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0xf1
};
size_t len = sizeof(cmd_getticks);
bool rem_itpm = itpm;
u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
/* probe only iTPMS */
if (vendor != TPM_VID_INTEL)
return 0;
itpm = false;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0)
goto out;
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
itpm = true;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
dev_info(chip->pdev, "Detected an iTPM.\n");
rc = 1;
} else
rc = -EFAULT;
out:
itpm = rem_itpm;
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return rc;
}
static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
{
switch (chip->vendor.manufacturer_id) {
case TPM_VID_WINBOND:
return ((status == TPM_STS_VALID) ||
(status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
case TPM_VID_STM:
return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
default:
return (status == TPM_STS_COMMAND_READY);
}
}
static const struct tpm_class_ops tpm_tis = {
.status = tpm_tis_status,
.recv = tpm_tis_recv,
.send = tpm_tis_send,
.cancel = tpm_tis_ready,
.update_timeouts = tpm_tis_update_timeouts,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
};
static irqreturn_t tis_int_probe(int irq, void *dev_id)
{
struct tpm_chip *chip = dev_id;
u32 interrupt;
interrupt = ioread32(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
if (interrupt == 0)
return IRQ_NONE;
chip->vendor.probed_irq = irq;
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
return IRQ_HANDLED;
}
static irqreturn_t tis_int_handler(int dummy, void *dev_id)
{
struct tpm_chip *chip = dev_id;
u32 interrupt;
int i;
interrupt = ioread32(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
if (interrupt == 0)
return IRQ_NONE;
((struct priv_data *)chip->vendor.priv)->irq_tested = true;
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
wake_up_interruptible(&chip->vendor.read_queue);
if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
for (i = 0; i < 5; i++)
if (check_locality(chip, i) >= 0)
break;
if (interrupt &
(TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
TPM_INTF_CMD_READY_INT))
wake_up_interruptible(&chip->vendor.int_queue);
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
return IRQ_HANDLED;
}
static bool interrupts = true;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
static void tpm_tis_remove(struct tpm_chip *chip)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_CLEAR);
iowrite32(~TPM_GLOBAL_INT_ENABLE &
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.
locality)),
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
release_locality(chip, chip->vendor.locality, 1);
}
static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
acpi_handle acpi_dev_handle)
{
u32 vendor, intfcaps, intmask;
int rc, i, irq_s, irq_e, probe;
int irq_r = -1;
struct tpm_chip *chip;
struct priv_data *priv;
priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
chip = tpmm_chip_alloc(dev, &tpm_tis);
if (IS_ERR(chip))
return PTR_ERR(chip);
chip->vendor.priv = priv;
#ifdef CONFIG_ACPI
chip->acpi_dev_handle = acpi_dev_handle;
#endif
chip->vendor.iobase = devm_ioremap(dev, tpm_info->start, tpm_info->len);
if (!chip->vendor.iobase)
return -EIO;
/* Maximum timeouts */
chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX;
chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX;
chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX;
chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX;
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
}
if (request_locality(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
}
rc = tpm2_probe(chip);
if (rc)
goto out_err;
vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
chip->vendor.manufacturer_id = vendor;
dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
if (!itpm) {
probe = probe_itpm(chip);
if (probe < 0) {
rc = -ENODEV;
goto out_err;
}
itpm = !!probe;
}
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
/* Figure out the capabilities */
intfcaps =
ioread32(chip->vendor.iobase +
TPM_INTF_CAPS(chip->vendor.locality));
dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
intfcaps);
if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
dev_dbg(dev, "\tBurst Count Static\n");
if (intfcaps & TPM_INTF_CMD_READY_INT)
dev_dbg(dev, "\tCommand Ready Int Support\n");
if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
dev_dbg(dev, "\tInterrupt Edge Falling\n");
if (intfcaps & TPM_INTF_INT_EDGE_RISING)
dev_dbg(dev, "\tInterrupt Edge Rising\n");
if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
dev_dbg(dev, "\tInterrupt Level Low\n");
if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
dev_dbg(dev, "\tInterrupt Level High\n");
if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
dev_dbg(dev, "\tLocality Change Int Support\n");
if (intfcaps & TPM_INTF_STS_VALID_INT)
dev_dbg(dev, "\tSts Valid Int Support\n");
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
intmask =
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT;
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
if (interrupts)
chip->vendor.irq = tpm_info->irq;
if (interrupts && !chip->vendor.irq) {
irq_s =
ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
irq_r = irq_s;
if (irq_s) {
irq_e = irq_s;
} else {
irq_s = 3;
irq_e = 15;
}
for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
iowrite8(i, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (devm_request_irq
(dev, i, tis_int_probe, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
"Unable to request irq: %d for probe\n",
i);
continue;
}
/* Clear all existing */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn on */
iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
chip->vendor.probed_irq = 0;
/* Generate Interrupts */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_gen_interrupt(chip);
else
tpm_gen_interrupt(chip);
chip->vendor.irq = chip->vendor.probed_irq;
/* free_irq will call into tis_int_probe;
clear all irqs we haven't seen while doing
tpm_gen_interrupt */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn off */
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
devm_free_irq(dev, i, chip);
}
}
if (chip->vendor.irq) {
iowrite8(chip->vendor.irq,
chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (devm_request_irq
(dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;
} else {
/* Clear all existing */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn on */
iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
}
} else if (irq_r != -1)
iowrite8(irq_r, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
chip->vendor.duration[TPM_SHORT] =
msecs_to_jiffies(TPM2_DURATION_SHORT);
chip->vendor.duration[TPM_MEDIUM] =
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->vendor.duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
rc = tpm2_do_selftest(chip);
if (rc == TPM2_RC_INITIALIZE) {
dev_warn(dev, "Firmware has not started TPM\n");
rc = tpm2_startup(chip, TPM2_SU_CLEAR);
if (!rc)
rc = tpm2_do_selftest(chip);
}
if (rc) {
dev_err(dev, "TPM self test failed\n");
if (rc > 0)
rc = -ENODEV;
goto out_err;
}
} else {
if (tpm_get_timeouts(chip)) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV;
goto out_err;
}
if (tpm_do_selftest(chip)) {
dev_err(dev, "TPM self test failed\n");
rc = -ENODEV;
goto out_err;
}
}
return tpm_chip_register(chip);
out_err:
tpm_tis_remove(chip);
return rc;
}
#ifdef CONFIG_PM_SLEEP
static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
{
u32 intmask;
/* reenable interrupts that device may have lost or
BIOS/firmware may have disabled */
iowrite8(chip->vendor.irq, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
intmask =
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
iowrite32(intmask,
chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
}
static int tpm_tis_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
int ret;
if (chip->vendor.irq)
tpm_tis_reenable_interrupts(chip);
ret = tpm_pm_resume(dev);
if (ret)
return ret;
/* TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
tpm_do_selftest(chip);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
#ifdef CONFIG_PNP
static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
{
struct tpm_info tpm_info = tis_default_info;
acpi_handle acpi_dev_handle = NULL;
tpm_info.start = pnp_mem_start(pnp_dev, 0);
tpm_info.len = pnp_mem_len(pnp_dev, 0);
if (pnp_irq_valid(pnp_dev, 0))
tpm_info.irq = pnp_irq(pnp_dev, 0);
else
interrupts = false;
#ifdef CONFIG_ACPI
if (pnp_acpi_device(pnp_dev)) {
if (is_itpm(pnp_acpi_device(pnp_dev)))
itpm = true;
acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
}
#endif
return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle);
}
static struct pnp_device_id tpm_pnp_tbl[] = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
{"IFX0102", 0}, /* Infineon */
{"BCM0101", 0}, /* Broadcom */
{"BCM0102", 0}, /* Broadcom */
{"NSC1200", 0}, /* National */
{"ICO0102", 0}, /* Intel */
/* Add new here */
{"", 0}, /* User Specified */
{"", 0} /* Terminator */
};
MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
static void tpm_tis_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
}
static struct pnp_driver tis_pnp_driver = {
.name = "tpm_tis",
.id_table = tpm_pnp_tbl,
.probe = tpm_tis_pnp_init,
.remove = tpm_tis_pnp_remove,
.driver = {
.pm = &tpm_tis_pm,
},
};
#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
#ifdef CONFIG_ACPI
static int tpm_check_resource(struct acpi_resource *ares, void *data)
{
struct tpm_info *tpm_info = (struct tpm_info *) data;
struct resource res;
if (acpi_dev_resource_interrupt(ares, 0, &res)) {
tpm_info->irq = res.start;
} else if (acpi_dev_resource_memory(ares, &res)) {
tpm_info->start = res.start;
tpm_info->len = resource_size(&res);
}
return 1;
}
static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
{
struct list_head resources;
struct tpm_info tpm_info = tis_default_info;
int ret;
if (!is_fifo(acpi_dev))
return -ENODEV;
INIT_LIST_HEAD(&resources);
ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource,
&tpm_info);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resources);
if (!tpm_info.irq)
interrupts = false;
if (is_itpm(acpi_dev))
itpm = true;
return tpm_tis_init(&acpi_dev->dev, &tpm_info, acpi_dev->handle);
}
static int tpm_tis_acpi_remove(struct acpi_device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
return 0;
}
static struct acpi_device_id tpm_acpi_tbl[] = {
{"MSFT0101", 0}, /* TPM 2.0 */
/* Add new here */
{"", 0}, /* User Specified */
{"", 0} /* Terminator */
};
MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
static struct acpi_driver tis_acpi_driver = {
.name = "tpm_tis",
.ids = tpm_acpi_tbl,
.ops = {
.add = tpm_tis_acpi_init,
.remove = tpm_tis_acpi_remove,
},
.drv = {
.pm = &tpm_tis_pm,
},
};
#endif
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
},
};
static struct platform_device *pdev;
static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
static int __init init_tis(void)
{
int rc;
#ifdef CONFIG_PNP
if (!force) {
rc = pnp_register_driver(&tis_pnp_driver);
if (rc)
return rc;
}
#endif
#ifdef CONFIG_ACPI
if (!force) {
rc = acpi_bus_register_driver(&tis_acpi_driver);
if (rc) {
#ifdef CONFIG_PNP
pnp_unregister_driver(&tis_pnp_driver);
#endif
return rc;
}
}
#endif
if (!force)
return 0;
rc = platform_driver_register(&tis_drv);
if (rc < 0)
return rc;
pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0);
if (IS_ERR(pdev)) {
rc = PTR_ERR(pdev);
goto err_dev;
}
rc = tpm_tis_init(&pdev->dev, &tis_default_info, NULL);
if (rc)
goto err_init;
return 0;
err_init:
platform_device_unregister(pdev);
err_dev:
platform_driver_unregister(&tis_drv);
return rc;
}
static void __exit cleanup_tis(void)
{
struct tpm_chip *chip;
#if defined(CONFIG_PNP) || defined(CONFIG_ACPI)
if (!force) {
#ifdef CONFIG_ACPI
acpi_bus_unregister_driver(&tis_acpi_driver);
#endif
#ifdef CONFIG_PNP
pnp_unregister_driver(&tis_pnp_driver);
#endif
return;
}
#endif
chip = dev_get_drvdata(&pdev->dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
platform_device_unregister(pdev);
platform_driver_unregister(&tis_drv);
}
module_init(init_tis);
module_exit(cleanup_tis);
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Raphael\Desktop\Git\PyManatee\pyleecan\GUI\Dialog\DMachineSetup\SWSlot\PWSlot28\PWSlot28.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PWSlot28(object):
def setupUi(self, PWSlot28):
PWSlot28.setObjectName("PWSlot28")
PWSlot28.resize(630, 470)
PWSlot28.setMinimumSize(QtCore.QSize(630, 470))
PWSlot28.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.horizontalLayout = QtWidgets.QHBoxLayout(PWSlot28)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.img_slot = QtWidgets.QLabel(PWSlot28)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.img_slot.sizePolicy().hasHeightForWidth())
self.img_slot.setSizePolicy(sizePolicy)
self.img_slot.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.img_slot.setText("")
self.img_slot.setPixmap(
QtGui.QPixmap(":/images/images/MachineSetup/WSlot/Slot 28.PNG")
)
self.img_slot.setScaledContents(True)
self.img_slot.setObjectName("img_slot")
self.verticalLayout_2.addWidget(self.img_slot)
self.txt_constraint = QtWidgets.QTextEdit(PWSlot28)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.txt_constraint.sizePolicy().hasHeightForWidth()
)
self.txt_constraint.setSizePolicy(sizePolicy)
self.txt_constraint.setMaximumSize(QtCore.QSize(16777215, 70))
self.txt_constraint.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.txt_constraint.setTextInteractionFlags(
QtCore.Qt.TextSelectableByKeyboard | QtCore.Qt.TextSelectableByMouse
)
self.txt_constraint.setObjectName("txt_constraint")
self.verticalLayout_2.addWidget(self.txt_constraint)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.in_R1 = QtWidgets.QLabel(PWSlot28)
self.in_R1.setObjectName("in_R1")
self.gridLayout.addWidget(self.in_R1, 4, 0, 1, 1)
self.lf_R1 = FloatEdit(PWSlot28)
self.lf_R1.setObjectName("lf_R1")
self.gridLayout.addWidget(self.lf_R1, 4, 1, 1, 1)
self.unit_R1 = QtWidgets.QLabel(PWSlot28)
self.unit_R1.setObjectName("unit_R1")
self.gridLayout.addWidget(self.unit_R1, 4, 2, 1, 1)
self.unit_H3 = QtWidgets.QLabel(PWSlot28)
self.unit_H3.setObjectName("unit_H3")
self.gridLayout.addWidget(self.unit_H3, 3, 2, 1, 1)
self.lf_H3 = FloatEdit(PWSlot28)
self.lf_H3.setObjectName("lf_H3")
self.gridLayout.addWidget(self.lf_H3, 3, 1, 1, 1)
self.in_H3 = QtWidgets.QLabel(PWSlot28)
self.in_H3.setObjectName("in_H3")
self.gridLayout.addWidget(self.in_H3, 3, 0, 1, 1)
self.unit_H0 = QtWidgets.QLabel(PWSlot28)
self.unit_H0.setObjectName("unit_H0")
self.gridLayout.addWidget(self.unit_H0, 2, 2, 1, 1)
self.lf_H0 = FloatEdit(PWSlot28)
self.lf_H0.setObjectName("lf_H0")
self.gridLayout.addWidget(self.lf_H0, 2, 1, 1, 1)
self.unit_W0 = QtWidgets.QLabel(PWSlot28)
self.unit_W0.setObjectName("unit_W0")
self.gridLayout.addWidget(self.unit_W0, 0, 2, 1, 1)
self.lf_W0 = FloatEdit(PWSlot28)
self.lf_W0.setObjectName("lf_W0")
self.gridLayout.addWidget(self.lf_W0, 0, 1, 1, 1)
self.in_H0 = QtWidgets.QLabel(PWSlot28)
self.in_H0.setObjectName("in_H0")
self.gridLayout.addWidget(self.in_H0, 2, 0, 1, 1)
self.in_W0 = QtWidgets.QLabel(PWSlot28)
self.in_W0.setObjectName("in_W0")
self.gridLayout.addWidget(self.in_W0, 0, 0, 1, 1)
self.lf_W3 = FloatEdit(PWSlot28)
self.lf_W3.setObjectName("lf_W3")
self.gridLayout.addWidget(self.lf_W3, 1, 1, 1, 1)
self.unit_W3 = QtWidgets.QLabel(PWSlot28)
self.unit_W3.setObjectName("unit_W3")
self.gridLayout.addWidget(self.unit_W3, 1, 2, 1, 1)
self.in_W3 = QtWidgets.QLabel(PWSlot28)
self.in_W3.setObjectName("in_W3")
self.gridLayout.addWidget(self.in_W3, 1, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.w_out = WWSlotOut(PWSlot28)
self.w_out.setObjectName("w_out")
self.verticalLayout.addWidget(self.w_out)
self.horizontalLayout.addLayout(self.verticalLayout)
self.retranslateUi(PWSlot28)
QtCore.QMetaObject.connectSlotsByName(PWSlot28)
PWSlot28.setTabOrder(self.lf_W0, self.lf_W3)
PWSlot28.setTabOrder(self.lf_W3, self.lf_H0)
PWSlot28.setTabOrder(self.lf_H0, self.lf_H3)
PWSlot28.setTabOrder(self.lf_H3, self.lf_R1)
PWSlot28.setTabOrder(self.lf_R1, self.txt_constraint)
def retranslateUi(self, PWSlot28):
_translate = QtCore.QCoreApplication.translate
PWSlot28.setWindowTitle(_translate("PWSlot28", "Form"))
self.txt_constraint.setHtml(
_translate(
"PWSlot28",
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n'
'<html><head><meta name="qrichtext" content="1" /><style type="text/css">\n'
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:'MS Shell Dlg 2'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
'<p align="center" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt; font-weight:600; text-decoration: underline;">Constraints :</span></p>\n'
'<p align="center" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:14pt;">W0 < 2*R1</span></p></body></html>',
)
)
self.in_R1.setText(_translate("PWSlot28", "R1:"))
self.unit_R1.setText(_translate("PWSlot28", "m"))
self.unit_H3.setText(_translate("PWSlot28", "m"))
self.in_H3.setText(_translate("PWSlot28", "H3:"))
self.unit_H0.setText(_translate("PWSlot28", "m"))
self.unit_W0.setText(_translate("PWSlot28", "m"))
self.in_H0.setText(_translate("PWSlot28", "H0:"))
self.in_W0.setText(_translate("PWSlot28", "W0:"))
self.unit_W3.setText(_translate("PWSlot28", "m"))
self.in_W3.setText(_translate("PWSlot28", "W3:"))
from pyleecan.GUI.Dialog.DMachineSetup.SWSlot.WWSlotOut.WWSlotOut import WWSlotOut
from pyleecan.GUI.Tools.FloatEdit import FloatEdit
from pyleecan.GUI.Resources import pyleecan_rc
|
import argparse
import logging
import os
from ceph_deploy import hosts
from ceph_deploy.cliutil import priority
from ceph_deploy.lib import remoto
from ceph_deploy.util.constants import default_components
from ceph_deploy.util.paths import gpg
LOG = logging.getLogger(__name__)
def sanitize_args(args):
"""
args may need a bunch of logic to set proper defaults that argparse is
not well suited for.
"""
if args.release is None:
args.release = 'jewel'
args.default_release = True
# XXX This whole dance is because --stable is getting deprecated
if args.stable is not None:
LOG.warning('the --stable flag is deprecated, use --release instead')
args.release = args.stable
# XXX Tango ends here.
return args
def detect_components(args, distro):
"""
Since the package split, now there are various different Ceph components to
install like:
* ceph
* ceph-mon
* ceph-osd
* ceph-mds
This helper function should parse the args that may contain specifics about
these flags and return the default if none are passed in (which is, install
everything)
"""
# the flag that prevents all logic here is the `--repo` flag which is used
# when no packages should be installed, just the repo files, so check for
# that here and return an empty list (which is equivalent to say 'no
# packages should be installed')
if args.repo:
return []
flags = {
'install_osd': 'ceph-osd',
'install_rgw': 'ceph-radosgw',
'install_mds': 'ceph-mds',
'install_mon': 'ceph-mon',
'install_common': 'ceph-common',
'install_tests': 'ceph-test',
}
if distro.is_rpm:
defaults = default_components.rpm
else:
defaults = default_components.deb
# different naming convention for deb than rpm for radosgw
flags['install_rgw'] = 'radosgw'
if args.install_all:
return defaults
else:
components = []
for k, v in flags.items():
if getattr(args, k, False):
components.append(v)
# if we have some components selected from flags then return that,
# otherwise return defaults because no flags and no `--repo` means we
# should get all of them by default
return components or defaults
def install(args):
args = sanitize_args(args)
if args.repo:
return install_repo(args)
if args.version_kind == 'stable':
version = args.release
else:
version = getattr(args, args.version_kind)
version_str = args.version_kind
if version:
version_str += ' version {version}'.format(version=version)
LOG.debug(
'Installing %s on cluster %s hosts %s',
version_str,
args.cluster,
' '.join(args.host),
)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
# XXX this should get removed once ceph packages are split for
# upstream. If default_release is True, it means that the user is
# trying to install on a RHEL machine and should expect to get RHEL
# packages. Otherwise, it will need to specify either a specific
# version, or repo, or a development branch. Other distro users
# should not see any differences.
use_rhceph=args.default_release,
)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
components = detect_components(args, distro)
if distro.init == 'sysvinit' and args.cluster != 'ceph':
LOG.error('refusing to install on host: %s, with custom cluster name: %s' % (
hostname,
args.cluster,
)
)
LOG.error('custom cluster names are not supported on sysvinit hosts')
continue
rlogger = logging.getLogger(hostname)
rlogger.info('installing Ceph on %s' % hostname)
cd_conf = getattr(args, 'cd_conf', None)
# custom repo arguments
repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url
gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url
gpg_fallback = gpg.url('release')
if gpg_url is None and repo_url:
LOG.warning('--gpg-url was not used, will fallback')
LOG.warning('using GPG fallback: %s', gpg_fallback)
gpg_url = gpg_fallback
if args.local_mirror:
if args.username:
hostname = "%s@%s" % (args.username, hostname)
remoto.rsync(hostname, args.local_mirror, '/opt/ceph-deploy/repo', distro.conn.logger, sudo=True)
repo_url = 'file:///opt/ceph-deploy/repo'
gpg_url = 'file:///opt/ceph-deploy/repo/release.asc'
if repo_url: # triggers using a custom repository
# the user used a custom repo url, this should override anything
# we can detect from the configuration, so warn about it
if cd_conf:
if cd_conf.get_default_repo():
rlogger.warning('a default repo was found but it was \
overridden on the CLI')
if args.release in cd_conf.get_repos():
rlogger.warning('a custom repo was found but it was \
overridden on the CLI')
rlogger.info('using custom repository location: %s', repo_url)
distro.mirror_install(
distro,
repo_url,
gpg_url,
args.adjust_repos,
components=components,
)
# Detect and install custom repos here if needed
elif should_use_custom_repo(args, cd_conf, repo_url):
LOG.info('detected valid custom repositories from config file')
custom_repo(distro, args, cd_conf, rlogger)
else: # otherwise a normal installation
distro.install(
distro,
args.version_kind,
version,
args.adjust_repos,
components=components,
)
# Check the ceph version we just installed
hosts.common.ceph_version(distro.conn)
distro.conn.exit()
def should_use_custom_repo(args, cd_conf, repo_url):
"""
A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator.
"""
if repo_url:
# repo_url signals a CLI override, return False immediately
return False
if cd_conf:
if cd_conf.has_repos:
has_valid_release = args.release in cd_conf.get_repos()
has_default_repo = cd_conf.get_default_repo()
if has_valid_release or has_default_repo:
return True
return False
def custom_repo(distro, args, cd_conf, rlogger, install_ceph=None):
"""
A custom repo install helper that will go through config checks to retrieve
repos (and any extra repos defined) and install those
``cd_conf`` is the object built from argparse that holds the flags and
information needed to determine what metadata from the configuration to be
used.
"""
default_repo = cd_conf.get_default_repo()
components = detect_components(args, distro)
if args.release in cd_conf.get_repos():
LOG.info('will use repository from conf: %s' % args.release)
default_repo = args.release
elif default_repo:
LOG.info('will use default repository: %s' % default_repo)
# At this point we know there is a cd_conf and that it has custom
# repos make sure we were able to detect and actual repo
if not default_repo:
LOG.warning('a ceph-deploy config was found with repos \
but could not default to one')
else:
options = dict(cd_conf.items(default_repo))
options['install_ceph'] = False if install_ceph is False else True
extra_repos = cd_conf.get_list(default_repo, 'extra-repos')
rlogger.info('adding custom repository file')
try:
distro.repo_install(
distro,
default_repo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, default_repo))
for xrepo in extra_repos:
rlogger.info('adding extra repo file: %s.repo' % xrepo)
options = dict(cd_conf.items(xrepo))
try:
distro.repo_install(
distro,
xrepo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, xrepo))
def install_repo(args):
"""
For a user that only wants to install the repository only (and avoid
installing Ceph and its dependencies).
"""
cd_conf = getattr(args, 'cd_conf', None)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
# XXX this should get removed once Ceph packages are split for
# upstream. If default_release is True, it means that the user is
# trying to install on a RHEL machine and should expect to get RHEL
# packages. Otherwise, it will need to specify either a specific
# version, or repo, or a development branch. Other distro users should
# not see any differences.
use_rhceph=args.default_release,
)
rlogger = logging.getLogger(hostname)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
def remove(args, purge):
LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm')
LOG.info('like: librbd1 and librados2')
remove_action = 'Uninstalling'
if purge:
remove_action = 'Purging'
LOG.debug(
'%s on cluster %s hosts %s',
remove_action,
args.cluster,
' '.join(args.host),
)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
use_rhceph=True)
LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(hostname)
rlogger.info('%s Ceph on %s' % (remove_action, hostname))
distro.uninstall(distro, purge=purge)
distro.conn.exit()
def uninstall(args):
remove(args, False)
def purge(args):
remove(args, True)
def purgedata(args):
LOG.debug(
'Purging data from cluster %s hosts %s',
args.cluster,
' '.join(args.host),
)
installed_hosts = []
for hostname in args.host:
distro = hosts.get(hostname, username=args.username)
ceph_is_installed = distro.conn.remote_module.which('ceph')
if ceph_is_installed:
installed_hosts.append(hostname)
distro.conn.exit()
if installed_hosts:
LOG.error("Ceph is still installed on: %s", installed_hosts)
raise RuntimeError("refusing to purge data while Ceph is still installed")
for hostname in args.host:
distro = hosts.get(hostname, username=args.username)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
rlogger = logging.getLogger(hostname)
rlogger.info('purging data on %s' % hostname)
# Try to remove the contents of /var/lib/ceph first, don't worry
# about errors here, we deal with them later on
remoto.process.check(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
]
)
# If we failed in the previous call, then we probably have OSDs
# still mounted, so we unmount them here
if distro.conn.remote_module.path_exists('/var/lib/ceph'):
rlogger.warning(
'OSDs may still be mounted, trying to unmount them'
)
remoto.process.run(
distro.conn,
[
'find', '/var/lib/ceph',
'-mindepth', '1',
'-maxdepth', '2',
'-type', 'd',
'-exec', 'umount', '{}', ';',
]
)
# And now we try again to remove the contents, since OSDs should be
# unmounted, but this time we do check for errors
remoto.process.run(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
]
)
remoto.process.run(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/etc/ceph/',
]
)
distro.conn.exit()
class StoreVersion(argparse.Action):
"""
Like ``"store"`` but also remember which one of the exclusive
options was set.
There are three kinds of versions: stable, testing and dev.
This sets ``version_kind`` to be the right one of the above.
This kludge essentially lets us differentiate explicitly set
values from defaults.
"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
if self.dest == 'release':
self.dest = 'stable'
namespace.version_kind = self.dest
@priority(20)
def make(parser):
"""
Install Ceph packages on remote hosts.
"""
version = parser.add_mutually_exclusive_group()
# XXX deprecated in favor of release
version.add_argument(
'--stable',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='[DEPRECATED] install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--release',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--testing',
nargs=0,
action=StoreVersion,
help='install the latest development release',
)
version.add_argument(
'--dev',
nargs='?',
action=StoreVersion,
const='master',
metavar='BRANCH_OR_TAG',
help='install a bleeding edge build from Git branch\
or tag (default: %(default)s)',
)
version.add_argument(
'--dev-commit',
nargs='?',
action=StoreVersion,
metavar='COMMIT',
help='install a bleeding edge build from Git commit',
)
version.set_defaults(
stable=None, # XXX deprecated in favor of release
release=None, # Set the default release in sanitize_args()
dev='master',
version_kind='stable',
)
parser.add_argument(
'--mon',
dest='install_mon',
action='store_true',
help='install the mon component only',
)
parser.add_argument(
'--mds',
dest='install_mds',
action='store_true',
help='install the mds component only',
)
parser.add_argument(
'--rgw',
dest='install_rgw',
action='store_true',
help='install the rgw component only',
)
parser.add_argument(
'--osd',
dest='install_osd',
action='store_true',
help='install the osd component only',
)
parser.add_argument(
'--tests',
dest='install_tests',
action='store_true',
help='install the testing components',
)
parser.add_argument(
'--cli', '--common',
dest='install_common',
action='store_true',
help='install the common component only',
)
parser.add_argument(
'--all',
dest='install_all',
action='store_true',
help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default',
)
repo = parser.add_mutually_exclusive_group()
repo.add_argument(
'--adjust-repos',
dest='adjust_repos',
action='store_true',
help='install packages modifying source repos',
)
repo.add_argument(
'--no-adjust-repos',
dest='adjust_repos',
action='store_false',
help='install packages without modifying source repos',
)
repo.add_argument(
'--repo',
action='store_true',
help='install repo files only (skips package installation)',
)
repo.set_defaults(
adjust_repos=True,
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to install on',
)
parser.add_argument(
'--local-mirror',
nargs='?',
const='PATH',
default=None,
help='Fetch packages and push them to hosts for a local repo mirror',
)
parser.add_argument(
'--repo-url',
nargs='?',
dest='repo_url',
help='specify a repo URL that mirrors/contains Ceph packages',
)
parser.add_argument(
'--gpg-url',
nargs='?',
dest='gpg_url',
help='specify a GPG key URL to be used with custom repos\
(defaults to ceph.com)'
)
parser.set_defaults(
func=install,
)
@priority(80)
def make_uninstall(parser):
"""
Remove Ceph packages from remote hosts.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to uninstall Ceph from',
)
parser.set_defaults(
func=uninstall,
)
@priority(80)
def make_purge(parser):
"""
Remove Ceph packages from remote hosts and purge all data.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph from',
)
parser.set_defaults(
func=purge,
)
@priority(80)
def make_purge_data(parser):
"""
Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph data from',
)
parser.set_defaults(
func=purgedata,
)
|
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: f; python-indent: 4 -*-
#
# Copyright (c) 2015-2019 Intel, Inc. All rights reserved.
# Copyright (c) 2019 Triad National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
import os, shutil
from urllib.parse import urlparse
from FetchMTTTool import *
from distutils.spawn import find_executable
## @addtogroup Tools
# @{
# @addtogroup Fetch
# @section Git
# Plugin for getting software via Git
# @param url URL to access the repository
# @param username Username required for accessing the repository
# @param password Password required for that user to access the repository
# @param pwfile File where password can be found
# @param branch Branch (if not master) to be downloaded; mutually exclusive with `commit` and `pr`
# @param commit Commit (hash prefix, tag or other commit expression) to be downloaded; mutually exclusive with `branch` and `p`
# @param pr Pull request to be downloaded; mutually exclusive with `branch` and `commit`
# @param subdir Subdirectory of interest in repository
# @param modules_unload Modules to unload
# @param modules Modules to load
# @param modules_swap Modules to swap
# @param retry Number of times to retry on failure. Default is 0.
# @}
class Git(FetchMTTTool):
def __init__(self):
# initialise parent class
FetchMTTTool.__init__(self)
self.activated = False
# track the repos we have processed so we
# don't do them multiple times
self.done = {}
self.options = {}
self.options['url'] = (None, "URL to access the repository")
self.options['username'] = (None, "Username required for accessing the repository")
self.options['password'] = (None, "Password required for that user to access the repository")
self.options['pwfile'] = (None, "File where password can be found")
self.options['branch'] = (None, "Branch (if not master) to be downloaded; mutually exclusive with `commit` and `pr`")
self.options['commit'] = (None, "Commit (hash prefix, tag or other commit expression) to be downloaded; mutually exclusive with `branch` and `pr`")
self.options['pr'] = (None, "Pull request to be downloaded; mutually exclusive with `branch` and `commit`")
self.options['subdir'] = (None, "Subdirectory of interest in repository")
self.options['modules'] = (None, "Modules to load")
self.options['modules_unload'] = (None, "Modules to unload")
self.options['modules_swap'] = (None, "Modules to swap")
self.options['retry'] = (0, "Number of times to retry on failure. Default is 0.")
return
def activate(self):
if not self.activated:
# use the automatic procedure from IPlugin
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def print_name(self):
return "Git"
def print_options(self, testDef, prefix):
lines = testDef.printOptions(self.options)
for line in lines:
print(prefix + line)
return
def execute(self, log, keyvals, testDef):
testDef.logger.verbose_print("Git Execute")
# parse any provided options - these will override the defaults
cmds = {}
testDef.parseOptions(log, self.options, keyvals, cmds)
# check that they gave us a URL
try:
if cmds['url'] is not None:
url = cmds['url']
except KeyError:
log['status'] = 1
log['stderr'] = "No repository URL was provided"
return
testDef.logger.verbose_print("Working repo " + url)
username = cmds['username']
password = None
# see if they gave us a password
try:
if cmds['password'] is not None:
password = cmds['password']
else:
try:
if cmds['pwfile'] is not None:
if os.path.exists(cmds['pwfile']):
f = open(cmds['pwfile'], 'r')
password = f.readline().strip()
f.close()
else:
log['status'] = 1
log['stderr'] = "Password file " + cmds['pwfile'] + " does not exist"
return
except KeyError:
pass
except KeyError:
# if not, did they give us a file where we can find the password
try:
if cmds['pwfile'] is not None:
if os.path.exists(cmds['pwfile']):
f = open(cmds['pwfile'], 'r')
password = f.readline().strip()
f.close()
else:
log['status'] = 1
log['stderr'] = "Password file " + cmds['pwfile'] + " does not exist"
return
except KeyError:
pass
# check for sanity - if a password was given, then
# we must have a username
if password is not None:
if username is None:
log['status'] = 1
log['stderr'] = "Password without username"
return
# find the "//"
(leader,tail) = url.split("//", 1)
# put the username:password into the url
url = leader + "//" + username + ":" + password + "@" + tail
elif username is not None:
# find the "//"
(leader,tail) = url.split("//", 1)
# put the username:password into the url
url = leader + "//" + username + "@" + tail
testDef.logger.verbose_print("Working final repo " + url)
# the path component of the parser output contains
# the name of the repo
repo = os.path.basename(urlparse(url).path)
# check for edge case of path ending in .git
if repo.endswith(".git"):
repo = repo[0:len(repo)-4]
# Apply any requested environment module settings
status,stdout,stderr = testDef.modcmd.applyModules(log['section'], cmds, testDef)
if 0 != status:
log['status'] = status
log['stdout'] = stdout
log['stderr'] = stderr
return
# now look for the executable in our path
if not find_executable("git"):
log['status'] = 1
log['stderr'] = "Executable git not found"
return
# see if they asked for a specific branch
branch = None
try:
if cmds['branch'] is not None:
branch = cmds['branch']
except KeyError:
pass
# or if they asked for a specific commit
commit = None
try:
if cmds['commit'] is not None:
commit = cmds['commit']
except KeyError:
pass
# or if they asked for a specific PR
pr = None
try:
if cmds['pr'] is not None:
pr = cmds['pr']
except KeyError:
pass
# cannot have more than one
if (1 if branch is not None else 0) + (1 if commit is not None else 0) + (1 if pr is not None else 0) > 1:
log['status'] = 1
log['stderr'] = "Cannot specify more than one of a branch, a commit, and a PR"
return
# see if we have already serviced this one
try:
rep = self.done[repo]
if 0 != rep['status']:
log['status'] = rep['status']
log['stderr'] = "Prior attempt to clone or update repo {0} failed".format(repo)
return
# log the status
log['status'] = rep['status']
log['hash'] = rep['hash']
# set the location
try:
try:
sbd = rep['subdir']
log['location'] = rep['location'][:-(len(sbd))]
except:
log['location'] = rep['location']
if cmds['subdir'] is not None:
# check that this subdirectory actually exists
ckdir = os.path.join(log['location'], cmds['subdir'])
if not os.path.exists(ckdir):
log['status'] = 1
log['stderr'] = "Subdirectory " + cmds['subdir'] + " was not found"
return
if not os.path.isdir(ckdir):
log['status'] = 1
log['stderr'] = "Subdirectory " + cmds['subdir'] + " is not a directory"
return
log['location'] = ckdir
except:
pass
if branch is not None:
try:
if branch == rep['branch']:
return
except:
pass
elif commit is not None:
try:
if commit == rep['commit']:
return
except:
pass
elif pr is not None:
try:
if pr == rep['pr']:
return
except:
pass
else:
return
except:
pass
# record our current location
cwd = os.getcwd()
dst = os.path.join(testDef.options['scratchdir'], log['section'].replace(":","_"))
try:
if not os.path.exists(dst): os.mkdir(dst)
except:
log['status'] = 1
log['stderr'] = "Unable to create " + dst
return
retry = cmds['retry']
if retry < 0:
log['status'] = 1
log['stderr'] = "the retry count must be >= 0"
return
try_count = 0
while try_count <= retry:
if try_count > 0:
if 'stderr' in log:
testDef.logger.verbose_print("Error was: %s" % log['stderr'])
testDef.logger.verbose_print("Retrying... (%s retries left)" % (retry - try_count))
try_count += 1
# change to the scratch directory
os.chdir(dst)
# see if this software has already been cloned
results = {}
if os.path.exists(repo):
if not os.path.isdir(repo):
log['status'] = 1
log['stderr'] = "Cannot update or clone repository {0} as a file of that name already exists".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = 1
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
continue
# if they specified a pull request, then just blow it away
# and reinstall
if pr is not None:
shutil.rmtree(repo)
results = testDef.execmd.execute(cmds, ["git", "clone", url], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot clone repository {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['pr'] = pr
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
os.chdir(repo)
ptgt = "pull/"+ pr + "/head:pull_" + pr
results = testDef.execmd.execute(cmds, ["git", "fetch", "origin", ptgt], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot fetch PR {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['pr'] = pr
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
results = testDef.execmd.execute(cmds, ["git", "checkout", "pull_" + pr], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot checkout PR branch {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['pr'] = pr
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
else:
# move to that location
os.chdir(repo)
# if they specified a branch, see if we are on it
if branch is not None:
results = testDef.execmd.execute(cmds, ["git", "branch"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
if isinstance(results['stdout'], list):
if results['stdout']:
t = [line for line in results['stdout'] if line.startswith('*')][0]
else:
t = ''
else:
if results['stdout'].strip():
t = [line for line in results['stdout'].split('\n') if line.startswith('*')][0]
else:
t = ''
if branch not in t:
# we need to whack the current installation and reinstall it
os.chdir(dst)
shutil.rmtree(repo)
results = testDef.execmd.execute(cmds, ["git", "clone", "-b", branch, "--single-branch", url], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot clone repository branch {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['branch'] = branch
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
os.chdir(repo)
else:
# if they want us to leave it as-is, then we are done
try:
if cmds['asis']:
results['status'] = 0
results['stdout'] = None
results['stderr'] = None
except KeyError:
# since it already exists, let's just update it
results = testDef.execmd.execute(cmds, ["git", "pull"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
# if they specified a commit, see if it gives the same hash we're on
elif commit is not None:
results = testDef.execmd.execute(cmds, ["git", "log", "HEAD", "-n1", "--format=%H"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
head_commit_hash, requested_commit_hash = None, None
if isinstance(results['stdout'], list):
if results['stdout']:
head_commit_hash = results['stdout'][0]
else:
if results['stdout'].strip():
head_commit_hash = results['stdout'].split('\n')[0]
# if getting results['stdout'] didn't work (f.e. because of merge_stdout_stderr)
# then don't even try for the requeted commit
if head_commit_hash != None:
results = testDef.execmd.execute(cmds, ["git", "log", commit, "-n1", "--format=%H"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
if isinstance(results['stdout'], list):
if results['stdout']:
requested_commit_hash = results['stdout'][0]
else:
if results['stdout'].strip():
requested_commit_hash = results['stdout'].split('\n')[0]
if head_commit_hash == None or head_commit_hash != requested_commit_hash:
# we need to whack the current installation and reinstall it
os.chdir(dst)
shutil.rmtree(repo)
results = testDef.execmd.execute(cmds, ["git", "clone", "--no-checkout", url], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot clone repository {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['commit'] = commit
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
os.chdir(repo)
results = testDef.execmd.execute(cmds, ["git", "checkout", commit, "--detach"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot checkout commit {0} of repository {1}".format(commit, repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['commit'] = head_commit_hash
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
else:
# if they want us to leave it as-is, then we are done
try:
if cmds['asis']:
results['status'] = 0
results['stdout'] = None
results['stderr'] = None
except KeyError:
# since it already exists, let's just update it
results = testDef.execmd.execute(cmds, ["git", "pull"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
else:
# if they want us to leave it as-is, then we are done
try:
if cmds['asis']:
results['status'] = 0
results['stdout'] = None
results['stderr'] = None
except KeyError:
# since it already exists, let's just update it
results = testDef.execmd.execute(cmds, ["git", "pull"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
else:
# clone it
if branch is not None:
results = testDef.execmd.execute(cmds, ["git", "clone", "-b", branch, "--single-branch", url], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
elif commit is not None:
results = testDef.execmd.execute(cmds, ["git", "clone", "--no-checkout", url], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot clone repository {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['commit'] = commit
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
os.chdir(repo)
results = testDef.execmd.execute(cmds, ["git", "checkout", commit, "--detach"], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
elif pr is not None:
results = testDef.execmd.execute(cmds, ["git", "clone", url], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot clone repository {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['pr'] = pr
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
os.chdir(repo)
ptgt = "pull/"+ pr + "/head:pull_" + pr
results = testDef.execmd.execute(cmds, ["git", "fetch", "origin", ptgt], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = "Cannot fetch PR {0}".format(repo)
# track that we serviced this one
rp = {}
rp['status'] = results['status']
rp['pr'] = pr
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
self.done[repo] = rp
os.chdir(cwd)
if log['status'] == 0:
return
else:
continue
results = testDef.execmd.execute(cmds, ["git", "checkout", "pull_" + pr], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
else:
results = testDef.execmd.execute(cmds, ["git", "clone", url], testDef)
if 0 != results['status']:
log['status'] = results['status']
log['stderr'] = results['stderr']
log['stdout'] = results['stdout']
os.chdir(cwd)
continue
# move into it
os.chdir(repo)
# record the result
log['status'] = results['status']
log['stdout'] = results['stdout']
log['stderr'] = results['stderr']
# get the current hash and record it
hashresult = testDef.execmd.execute(cmds, ["git", "log", "-1", "--oneline"], testDef)
# the hash is the first field before the space
if hashresult['stdout']:
log['hash'] = hashresult['stdout'][0].split()[0]
else:
log['hash'] = None
# log our absolute location so others can find it
log['location'] = os.getcwd()
# if they indicated that a specific subdirectory was
# the target, then modify the location accordingly
cmdlog = 'Fetch CMD: ' + ' '.join(cmds)
testDef.logger.verbose_print(cmdlog)
if cmds['subdir'] is not None:
# check that this subdirectory actually exists
ckdir = os.path.join(log['location'], cmds['subdir'])
if not os.path.exists(ckdir):
log['status'] = 1
log['stderr'] = "Subdirectory " + cmds['subdir'] + " was not found"
status,stdout,stderr = testDef.modcmd.revertModules(log['section'], testDef)
os.chdir(cwd)
continue
if not os.path.isdir(ckdir):
log['status'] = 1
log['stderr'] = "Subdirectory " + cmds['subdir'] + " is not a directory"
status,stdout,stderr = testDef.modcmd.revertModules(log['section'], testDef)
os.chdir(cwd)
continue
log['location'] = ckdir
# track that we serviced this one - save the absolute location so
# any subsequent requests with a different subdir can be pointed to
# the correct location
rp = {}
rp['status'] = results['status']
rp['location'] = log['location']
if pr is not None:
rp['pr'] = pr
elif commit is not None:
rp['commit'] = commit
elif branch is not None:
rp['branch'] = branch
if cmds['subdir'] is not None:
rp['subdir'] = cmds['subdir']
rp['hash'] = log['hash']
self.done[repo] = rp
# Revert any requested environment module settings
status,stdout,stderr = testDef.modcmd.revertModules(log['section'], testDef)
if 0 != status:
log['status'] = status
log['stdout'] = stdout
log['stderr'] = stderr
os.chdir(cwd)
continue
# change back to the original directory
os.chdir(cwd)
break
return
|
#!/usr/bin/python3.7
# UTF8
# Date: Tue 27 Aug 2019 14:27:23 CEST
# Author: Nicolas Flandrois
# Calcul de son IMC
# (Indice de Masse Corporelle)
# Calcul de l'IMC :
# IMC = weight(kg) / height²(m)
# Avec ce résultat, on peut se situer par rapport aux catégories ci-dessous :
# Catégorie de l'IMC (kg/m2) Classification Risque de développer des problèmes de santé
# IMC inférieur à 16 Maigreur extrême Risque de maladie accrue
# IMC compris entre 16 et 19 Maigreur Risque de maladie élevé
# IMC compris entre 20 et 25 Corpulence normale Risque de maladie faible
# IMC compris entre 25 et 30 Embonpoint Risque de maladie accrue
# IMC compris entre 30 et 35 Obésité de classe I Risque de maladie élevé
# IMC compris entre 35 et 40 Obésité de classe II Risque de maladie très élevé
# IMC supérieur à 40 Obésité de classe III Risque de maladie extrêmement élevé
# Définition de l'IMC :
# L’IMC (Indice de Masse Corporelle) est notamment utilisé par les diététiciens pour situer le poids d’une personne par rapport à la norme des personnes de sa taille et de son poids.
from math import pow
print("""L’IMC (Indice de Masse Corporelle) est notamment utilisé par les
diététiciens pour situer le poids d’une personne par rapport à la norme
des personnes de sa taille et de son poids.\n""")
weight = float(input('Poids (kg)? \t'))
height = float(input('Taille (m)? \t'))
imc = round((weight / pow(height, 2)), 2)
def target_weight(imc:int, height:float):
return round((imc * pow(height, 2)), 2)
print(f'\nVotre IMC est de: \t{imc}\n')
if int(imc) in range(0, 16):
print('\tMaigreur extrême \t Risque de maladie accrue')
elif int(imc) in range(16, 20):
print('\tMaigreur \t Risque de maladie élevé')
elif int(imc) in range(20, 25):
print('\tCorpulence normale \t Risque de maladie faible')
elif int(imc) in range(25, 30):
print('\tEmbonpoint \t Risque de maladie accrue')
elif int(imc) in range(30, 35):
print('\tObésité de classe I \t Risque de maladie élevé')
elif int(imc) in range(35, 40):
print('\tObésité de classe II \t Risque de maladie très élevé')
else:
print('\tObésité de classe III \t Risque de maladie extrêmement élevé')
if int(imc) in range(0, 20):
print('\n Pour être dans la catégorie cible suivante :\n \
>>>> Corpulence normale \t Risque de maladie faible')
print(f"Poids Cible moyen :\t {target_weight(22, height)} kg. \
(Soit {round((target_weight(22, height)) - weight, 2)} kg à gagner.)")
print(f"Poids Cible max/au plus mince :\t {target_weight(20, height)} kg. \
(Soit {round((target_weight(20, height)) - weight, 2)} kg à gagner.)")
print(f"Poids Cible min/au plus gros :\t {target_weight(25, height)} kg. \
(Soit {round((target_weight(25, height)) - weight, 2)} kg à gagner.)")
elif int(imc) in range(20, 25):
print('Votre Objectif est de maintenir votre forme. Vous êtes dans l\'IMC cible.')
else:
print('\n Pour être dans la catégorie cible suivante :\n \
>>>> Corpulence normale \t Risque de maladie faible')
print(f"Poids Cible moyen :\t {target_weight(22, height)} kg. (Soit \
{round((weight - target_weight(22, height)), 2)} kg à perdre.)")
print(f"Poids Cible max/au plus mince :\t {target_weight(20, height)} kg. \
(Soit {round((weight - target_weight(20, height)), 2)} kg à perdre.)")
print(f"Poids Cible min/au plus gros :\t {target_weight(25, height)} kg. \
(Soit {round((weight - target_weight(25, height)), 2)} kg à perdre.)")
print('\n\n\tCeci est une estimation grossière, et ne remplace pas l\'avis \n\
d\'un spécialiste de santé (Votre médecin généraliste, ou d\'un nutrisioniste)')
|
import json
import logging
import os
import tempfile
import asyncio
import platform
import aiohttp
from yarl import URL
import pytest
from aiohttp import ClientSession
from injector import (ClassAssistedBuilder, Injector, Module, inject, provider,
singleton)
from backup.config import Config, Setting
from backup.model import Coordinator
from dev.simulationserver import SimulationServer
from backup.drive import DriveRequests, DriveSource, FolderFinder
from backup.util import GlobalInfo, Estimator, Resolver, DataCache
from backup.ha import HaRequests, HaSource, HaUpdater
from backup.logger import reset
from backup.model import Model
from backup.time import Time
from backup.module import BaseModule
from backup.debugworker import DebugWorker
from backup.creds import Creds
from backup.server import ErrorStore
from backup.ha import AddonStopper
from backup.ui import UiServer
from .faketime import FakeTime
from .helpers import Uploader
from dev.ports import Ports
from dev.simulated_google import SimulatedGoogle
from dev.request_interceptor import RequestInterceptor
from dev.simulated_supervisor import SimulatedSupervisor
@singleton
class FsFaker():
@inject
def __init__(self):
self.bytes_free = 1024 * 1024 * 1024
self.bytes_total = 1024 * 1024 * 1024
self.old_method = None
def start(self):
if platform.system() != "Windows":
self.old_method = os.statvfs
os.statvfs = self._hijack
def stop(self):
if platform.system() != "Windows":
os.statvfs = self.old_method
def _hijack(self, path):
return os.statvfs_result((0, 1, int(self.bytes_total), int(self.bytes_free), 0, 0, 0, 0, 0, 255))
def setFreeBytes(self, bytes_free, bytes_total=1):
self.bytes_free = bytes_free
self.bytes_total = bytes_total
if self.bytes_free > self.bytes_total:
self.bytes_total = self.bytes_free
class ReaderHelper:
def __init__(self, session, ui_port, ingress_port):
self.session = session
self.ui_port = ui_port
self.ingress_port = ingress_port
self.timeout = aiohttp.ClientTimeout(total=20)
def getUrl(self, ingress=True, ssl=False):
if ssl:
protocol = "https"
else:
protocol = "http"
if ingress:
return protocol + "://localhost:" + str(self.ingress_port) + "/"
else:
return protocol + "://localhost:" + str(self.ui_port) + "/"
async def getjson(self, path, status=200, json=None, auth=None, ingress=True, ssl=False, sslcontext=None):
async with self.session.get(self.getUrl(ingress, ssl) + path, json=json, auth=auth, ssl=sslcontext, timeout=self.timeout) as resp:
assert resp.status == status
return await resp.json()
async def get(self, path, status=200, json=None, auth=None, ingress=True, ssl=False):
async with self.session.get(self.getUrl(ingress, ssl) + path, json=json, auth=auth, timeout=self.timeout) as resp:
if resp.status != status:
import logging
logging.getLogger().error(resp.text())
assert resp.status == status
return await resp.text()
async def postjson(self, path, status=200, json=None, ingress=True):
async with self.session.post(self.getUrl(ingress) + path, json=json, timeout=self.timeout) as resp:
assert resp.status == status
return await resp.json()
async def assertError(self, path, error_type="generic_error", status=500, ingress=True, json=None):
logging.getLogger().info("Requesting " + path)
data = await self.getjson(path, status=status, ingress=ingress, json=json)
assert data['error_type'] == error_type
# This module should onyl ever have bindings that can also be satisfied by MainModule
class TestModule(Module):
def __init__(self, config: Config, ports: Ports):
self.ports = ports
self.config = config
@provider
@singleton
def getDriveCreds(self, time: Time) -> Creds:
return Creds(time, "test_client_id", time.now(), "test_access_token", "test_refresh_token", "test_client_secret")
@provider
@singleton
def getTime(self) -> Time:
return FakeTime()
@provider
@singleton
def getPorts(self) -> Ports:
return self.ports
@provider
@singleton
def getConfig(self) -> Config:
return self.config
@pytest.fixture
def event_loop():
if platform.system() == "Windows":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
return asyncio.new_event_loop()
@pytest.fixture
async def generate_config(server_url, ports, cleandir):
return Config.withOverrides({
Setting.DRIVE_URL: server_url,
Setting.HASSIO_URL: server_url + "/",
Setting.HOME_ASSISTANT_URL: server_url + "/core/api/",
Setting.AUTHENTICATE_URL: server_url + "/drive/authorize",
Setting.DRIVE_REFRESH_URL: server_url + "/oauth2/v4/token",
Setting.DRIVE_AUTHORIZE_URL: server_url + "/o/oauth2/v2/auth",
Setting.DRIVE_TOKEN_URL: server_url + "/token",
Setting.REFRESH_URL: server_url + "/drive/refresh",
Setting.ERROR_REPORT_URL: server_url + "/logerror",
Setting.HASSIO_TOKEN: "test_header",
Setting.SECRETS_FILE_PATH: "secrets.yaml",
Setting.CREDENTIALS_FILE_PATH: "credentials.dat",
Setting.FOLDER_FILE_PATH: "folder.dat",
Setting.RETAINED_FILE_PATH: "retained.json",
Setting.ID_FILE_PATH: "id.json",
Setting.DATA_CACHE_FILE_PATH: "data_cache.json",
Setting.STOP_ADDON_STATE_PATH: "stop_addon.json",
Setting.INGRESS_TOKEN_FILE_PATH: "ingress.dat",
Setting.DEFAULT_DRIVE_CLIENT_ID: "test_client_id",
Setting.DEFAULT_DRIVE_CLIENT_SECRET: "test_client_secret",
Setting.BACKUP_DIRECTORY_PATH: cleandir,
Setting.PORT: ports.ui,
Setting.INGRESS_PORT: ports.ingress,
Setting.SNAPSHOT_STARTUP_DELAY_MINUTES: 0,
})
@pytest.fixture
async def injector(cleandir, ports, generate_config):
drive_creds = Creds(FakeTime(), "test_client_id", None, "test_access_token", "test_refresh_token")
with open(os.path.join(cleandir, "secrets.yaml"), "w") as f:
f.write("for_unit_tests: \"password value\"\n")
with open(os.path.join(cleandir, "credentials.dat"), "w") as f:
f.write(json.dumps(drive_creds.serialize()))
return Injector([BaseModule(), TestModule(generate_config, ports)])
@pytest.fixture
async def ui_server(injector, server):
os.mkdir("static")
server = injector.get(UiServer)
await server.run()
yield server
await server.shutdown()
@pytest.fixture
def reader(server, ui_server, session, ui_port, ingress_port):
return ReaderHelper(session, ui_port, ingress_port)
@pytest.fixture
async def uploader(injector: Injector, server_url):
return injector.get(ClassAssistedBuilder[Uploader]).build(host=server_url)
@pytest.fixture
async def google(injector: Injector):
return injector.get(SimulatedGoogle)
@pytest.fixture
async def interceptor(injector: Injector):
return injector.get(RequestInterceptor)
@pytest.fixture
async def supervisor(injector: Injector, server, session):
return injector.get(SimulatedSupervisor)
@pytest.fixture
async def addon_stopper(injector: Injector):
return injector.get(AddonStopper)
@pytest.fixture
async def server(injector, port, drive_creds: Creds, session):
server = injector.get(SimulationServer)
# start the server
logging.getLogger().info("Starting SimulationServer on port " + str(port))
await server.start(port)
yield server
await server.stop()
@pytest.fixture
async def data_cache(injector):
return injector.get(DataCache)
@pytest.fixture
async def session(injector):
async with injector.get(ClientSession) as session:
yield session
@pytest.fixture
async def snapshot(coord, source, dest):
await coord.sync()
assert len(coord.snapshots()) == 1
return coord.snapshots()[0]
@pytest.fixture
async def fs(injector):
faker = injector.get(FsFaker)
faker.start()
yield faker
faker.stop()
@pytest.fixture
async def estimator(injector, fs):
return injector.get(Estimator)
@pytest.fixture
async def error_store(injector):
return injector.get(ErrorStore)
@pytest.fixture
async def model(injector):
return injector.get(Model)
@pytest.fixture
async def global_info(injector):
return injector.get(GlobalInfo)
@pytest.fixture
async def server_url(port):
return "http://localhost:" + str(port)
@pytest.fixture
async def ports(unused_tcp_port_factory):
return Ports(unused_tcp_port_factory(), unused_tcp_port_factory(), unused_tcp_port_factory())
@pytest.fixture
async def port(ports: Ports):
return ports.server
@pytest.fixture
async def ui_url(ports: Ports):
return URL("http://localhost").with_port(ports.ingress)
@pytest.fixture
async def ui_port(ports: Ports):
return ports.ui
@pytest.fixture
async def ingress_port(ports: Ports):
return ports.ingress
@pytest.fixture
async def coord(injector):
return injector.get(Coordinator)
@pytest.fixture()
async def updater(injector):
return injector.get(HaUpdater)
@pytest.fixture()
async def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
return newpath
@pytest.fixture
async def time(injector):
reset()
return injector.get(Time)
@pytest.fixture
async def config(injector):
return injector.get(Config)
@pytest.fixture
async def drive_creds(injector):
return injector.get(Creds)
@pytest.fixture
async def drive(injector, server, session):
return injector.get(DriveSource)
@pytest.fixture
async def ha(injector, server, session):
return injector.get(HaSource)
@pytest.fixture
async def ha_requests(injector, server):
return injector.get(HaRequests)
@pytest.fixture
async def drive_requests(injector, server):
return injector.get(DriveRequests)
@pytest.fixture
async def resolver(injector):
return injector.get(Resolver)
@pytest.fixture
async def client_identifier(injector):
return injector.get(Config).clientIdentifier()
@pytest.fixture
async def debug_worker(injector):
return injector.get(DebugWorker)
@pytest.fixture()
async def folder_finder(injector):
return injector.get(FolderFinder)
|
var ctf_js_exists=void 0!==ctf_js_exists;ctf_js_exists||function(u){function t(){if(window.ctfObject.consentGiven||!window.ctfObject.gdpr)return!0;if("undefined"!=typeof CLI_Cookie)null!==CLI_Cookie.read(CLI_ACCEPT_COOKIE_NAME)&&(window.ctfObject.consentGiven="yes"===CLI_Cookie.read("cookielawinfo-checkbox-non-necessary"));else if(void 0!==window.cnArgs){var u=("; "+document.cookie).split("; cookie_notice_accepted=");if(2===u.length){var t=u.pop().split(";").shift();window.ctfObject.consentGiven="true"===t}}else void 0!==window.cookieconsent?window.ctfObject.consentGiven="allow"===function(u){for(var t=u+"=",e=window.document.cookie.split(";"),n=0;n<e.length;n++){var a=e[n].trim();if(0==a.indexOf(t))return a.substring(t.length,a.length)}return""}("complianz_consent_status"):void 0!==window.Cookiebot?window.ctfObject.consentGiven=Cookiebot.consented:void 0!==window.BorlabsCookie&&(window.ctfObject.consentGiven=window.BorlabsCookie.checkCookieConsent("twitter"));var e=jQuery.Event("ctfcheckconsent");return e.feed=this,jQuery(window).trigger(e),window.ctfObject.consentGiven}function e(t){t.find(".ctf-hide-avatar").length&&!t.find(".ctf-hide-avatar.ctf-no-consent").length||t.find(".ctf-item").addClass("ctf-hide-avatar ctf-no-consent"),u(".ctf-header-img span").length&&u(".ctf-header-img").addClass("ctf-no-consent")}function n(t){c(),t.find(".ctf-item.ctf-no-consent").removeClass("ctf-hide-avatar"),t.find(".ctf-author-avatar").each(function(){u(this).find("span").replaceWith('<img src="'+u(this).find("span").attr("data-avatar")+'" alt="'+u(this).find("span").attr("data-alt")+'" width="48" height="48">')}),t.find(".ctf-header-img").each(function(){u(this).find("span").replaceWith('<img src="'+u(this).find("span").attr("data-avatar")+'" alt="'+u(this).find("span").attr("data-alt")+'" width="48" height="48">')}),t.find(".ctf-no-consent").removeClass("ctf-no-consent"),t.find(".ctf-header .ctf-header-link").on("mouseenter mouseleave",function(u){switch(u.type){case"mouseenter":t.find(".ctf-header .ctf-header-img-hover").fadeIn(200);break;case"mouseleave":t.find(".ctf-header .ctf-header-img-hover").stop().fadeOut(600)}})}function a(){t()&&u(".ctf").each(function(){n(u(this))})}function c(){void 0===window.ctfObject.intentsIncluded&&(window.ctfObject.intentsIncluded=!1),u(".ctf").each(function(){window.ctfObject.intentsIncluded||void 0===u(this).attr("data-ctfintents")||(window.ctfObject.intentsIncluded=!0,function(){if(!window.__twitterIntentHandler){var u=/twitter\.com\/intent\/(\w+)/,t="scrollbars=yes,resizable=yes,toolbar=no,location=yes",e=550,n=420,a=screen.height,c=screen.width;document.addEventListener?document.addEventListener("click",i,!1):document.attachEvent&&document.attachEvent("onclick",i),window.__twitterIntentHandler=!0}function i(i){for(var o,s,d=(i=i||window.event).target||i.srcElement;d&&"a"!==d.nodeName.toLowerCase();)d=d.parentNode;d&&"a"===d.nodeName.toLowerCase()&&d.href&&d.href.match(u)&&(o=Math.round(c/2-e/2),s=0,a>n&&(s=Math.round(a/2-n/2)),window.open(d.href,"intent",t+",width="+e+",height="+n+",left="+o+",top="+s),i.returnValue=!1,i.preventDefault&&i.preventDefault())}}())})}function i(u){var t="content";return u.closest("footer").length?t="footer":u.closest(".header").length||u.closest("header").length?t="header":(u.closest(".sidebar").length||u.closest("aside").length)&&(t="sidebar"),t}window.ctf_init=function(){if(window.ctfObject={},u(".ctf").length&&void 0!==u(".ctf").first().attr("data-ctf-flags")){var a=u(".ctf").first().attr("data-ctf-flags").split(",");if(a.indexOf("gdpr")>-1?(window.ctfObject.consentGiven=!1,window.ctfObject.gdpr=!0):(window.ctfObject.consentGiven=!0,window.ctfObject.gdpr=!1),a.indexOf("locator")>-1){var o=Math.floor(Math.random()*u(".ctf").length);window.ctfObject.locator=1===u(".ctf").length||1===o}else window.ctfObject.locator=!1}else window.ctfObject.consentGiven=!0,window.ctfObject.gdpr=!1,window.ctfObject.locator=!1;function s(a){if(a.addClass("ctf_is_initialized"),window.ctfObject.locator){var c=(a.attr("data-feed-id"),a.attr("data-feed-id")),o=(a.attr("data-postid"),a.attr("data-postid"));jQuery.ajax({url:ctf.ajax_url,type:"post",data:{action:"ctf_do_locator",atts:a.attr("data-ctfshortcode"),feed_id:c,location:i(a),post_id:o},success:function(u){}})}t()?n(a):e(a),a.find(".ctf-item.ctf-new").each(function(){var t,e,n,c,i,o,s=u(this),d=s.find(".ctf-tweet-text-media-wrap"),r=s.find(".ctf-tweet-text").remove(".ctf-tweet-text-media-wrap"),f=" "+r.html();if("true"!=a.attr("data-ctfdisablelinks")&&void 0!==f&&!a.find(".ctf-tweet-text-link").length){var A=a.attr("data-ctflinktextcolor"),l="";A&&(l=A.replace(";","").split("#")[1]),window.ctfLinkify=(t="[a-z\\d.-]+://",e="mailto:",n=new RegExp("(?:\\b[a-z\\d.-]+://[^<>\\s]+|\\b(?:(?:(?:[^\\s!@#$%^&*()_=+[\\]{}\\\\|;:'\",.<>/?]+)\\.)+(?:ac|ad|aero|ae|af|ag|ai|al|am|an|ao|aq|arpa|ar|asia|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|biz|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|cat|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|coop|com|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|dz|ec|edu|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gov|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|info|int|in|io|iq|ir|is|it|je|jm|jobs|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mil|mk|ml|mm|mn|mobi|mo|mp|mq|mr|ms|mt|museum|mu|mv|mw|mx|my|mz|name|na|nc|net|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|org|pa|pe|pf|pg|ph|pk|pl|pm|pn|pro|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tel|tf|tg|th|tj|tk|tl|tm|tn|to|tp|travel|tr|tt|tv|tw|tz|ua|ug|uk|um|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|xn--0zwm56d|xn--11b5bs3a9aj6g|xn--80akhbyknj4f|xn--9t4b11yi5a|xn--deba0ad|xn--g6w251d|xn--hgbk6aj7f53bba|xn--hlcj6aya9esc7a|xn--jxalpdlp|xn--kgbechtv|xn--zckzah|ye|yt|yu|za|zm|zw)|(?:(?:[0-9]|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.){3}(?:[0-9]|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5]))(?:[;/][^#?<>\\s]*)?(?:\\?[^#<>\\s]*)?(?:#[^<>\\s]*)?(?!\\w)|(?:mailto:)?[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:(?:(?:[^\\s!@#$%^&*()_=+[\\]{}\\\\|;:'\",.<>/?]+)\\.)+(?:ac|ad|aero|ae|af|ag|ai|al|am|an|ao|aq|arpa|ar|asia|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|biz|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|cat|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|coop|com|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|dz|ec|edu|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gov|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|info|int|in|io|iq|ir|is|it|je|jm|jobs|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mil|mk|ml|mm|mn|mobi|mo|mp|mq|mr|ms|mt|museum|mu|mv|mw|mx|my|mz|name|na|nc|net|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|org|pa|pe|pf|pg|ph|pk|pl|pm|pn|pro|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tel|tf|tg|th|tj|tk|tl|tm|tn|to|tp|travel|tr|tt|tv|tw|tz|ua|ug|uk|um|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|xn--0zwm56d|xn--11b5bs3a9aj6g|xn--80akhbyknj4f|xn--9t4b11yi5a|xn--deba0ad|xn--g6w251d|xn--hgbk6aj7f53bba|xn--hlcj6aya9esc7a|xn--jxalpdlp|xn--kgbechtv|xn--zckzah|ye|yt|yu|za|zm|zw)|(?:(?:[0-9]|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.){3}(?:[0-9]|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5]))(?:\\?[^#<>\\s]*)?(?:#[^<>\\s]*)?(?!\\w))","ig"),c=new RegExp("^"+t,"i"),i={"'":"`",">":"<",")":"(","]":"[","}":"{","B;":"B+","b:":"b9"},o={callback:function(u,t){return t?'<a href="'+t+'" title="'+t+'" target="_blank">'+u+"</a>":u},punct_regexp:/(?:[!?.,:;'"]|(?:&|&)(?:lt|gt|quot|apos|raquo|laquo|rsaquo|lsaquo);)$/},function(u,t){t=t||{};var a,s,d,r,f,A,l,F,h,m,C,g,w="",B=[];for(s in o)void 0===t[s]&&(t[s]=o[s]);for(;a=n.exec(u);)if(d=a[0],l=(A=n.lastIndex)-d.length,!/[\/:]/.test(u.charAt(l-1))){do{F=d,g=d.substr(-1),(C=i[g])&&(h=d.match(new RegExp("\\"+C+"(?!$)","g")),m=d.match(new RegExp("\\"+g,"g")),(h?h.length:0)<(m?m.length:0)&&(d=d.substr(0,d.length-1),A--)),t.punct_regexp&&(d=d.replace(t.punct_regexp,function(u){return A-=u.length,""}))}while(d.length&&d!==F);r=d,c.test(r)||(r=(-1!==r.indexOf("@")?r.indexOf(e)?e:"":r.indexOf("irc.")?r.indexOf("ftp.")?"http://":"ftp://":"irc://")+r),f!=l&&(B.push([u.slice(f,l)]),f=A),B.push([d,r])}for(B.push([u.substr(f)]),s=0;s<B.length;s++)w+=t.callback.apply(window,B[s]);return w||u}),r.find("a").length||(r.find(".emoji").each(function(){u(this).replaceWith(u(this).attr("alt"))}),f=" "+r.html(),f=ctfLinkify(f));f.length>0&&(f=(f=f.replace(/<br>/g,"<br> ")).replace(/(^|\s)#(\w*[\u0041-\u005A\u0061-\u007A\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]+\w*)/gi,function(u){var t=u.trim();return/^#[0-9A-F]{6}$/i.test(t)?t:' <a href="https://twitter.com/hashtag/'+t.substring(1)+'" target="_blank" rel="nofollow">'+t+"</a>"}));f=f.replace(/[\s][@]+[A-Za-z0-9-_]+/g,function(u){var t=u.trim();return' <a href="https://twitter.com/'+t.substring(1)+'" target="_blank" rel="nofollow">'+t+"</a>"}),r.html(f.trim()),r.append(d),r.find("a").css("color","#"+l),d.css("color","#"+l)}s.find(".ctf-tweet-text a").each(function(){jQuery(this).text().indexOf("http")>-1&&jQuery(this).text().length>63&&jQuery(this).text(jQuery(this).text().substring(0,60)+"...")})}),a.find(".ctf-author-name, .ctf-tweet-date, .ctf-author-screenname, .ctf-twitterlink, .ctf-author-box-link, .ctf-retweet-text, .ctf-quoted-tweet").css("color",a.find(".ctf-tweet-text").css("color")),a.find(".ctf_more").off("click").on("click",function(t){t.preventDefault(),u(this).hide().next(".ctf_remaining").show()}),"function"==typeof ctf_custom_js&&ctf_custom_js(u),a.find(".ctf-author-box-link p:empty").remove()}function d(u,t,e,n,a,c){n.addClass("ctf-loading").append('<div class="ctf-loader"></div>'),n.find(".ctf-loader").css("background-color",n.css("color"));var o=(e.attr("data-feed-id"),e.attr("data-feed-id")),d=(e.attr("data-postid"),e.attr("data-postid"));jQuery.ajax({url:ctf.ajax_url,type:"post",data:{action:"ctf_get_more_posts",last_id_data:u,shortcode_data:t,num_needed:a,persistent_index:c,feed_id:o,location:i(e),post_id:d},success:function(t){""!==u?(-1==t.indexOf("<meta charset")&&e.find(".ctf-item").removeClass("ctf-new").last().after(t),e.find(".ctf-out-of-tweets").length&&(n.hide(),e.find(".ctf-out-of-tweets p").eq(0).fadeIn().end().eq(1).delay(500).fadeIn())):e.find(".ctf-tweets").append(t),n.removeClass("ctf-loading").find(".ctf-loader").remove(),s(e)}})}u(".ctf").length<=u(".ctf_is_initialized").length||(window.ctfObject.consentGiven&&c(),u(".ctf").each(function(){var a=u(this),c=parseInt(a.attr("data-ctfneeded"));a.width()<=480&&a.addClass("ctf-narrow"),a.width()<=320&&a.addClass("ctf-super-narrow"),u(this).hasClass("ctf_is_initialized")||s(a),setTimeout(function(){if(c>0){var u=a.find(".ctf-more"),t=a.find(".ctf-item").last().attr("id"),e=a.find(".ctf-item").length;d(t,a.attr("data-ctfshortcode"),a,u,c,e)}},500),a.find(".ctf-more").on("click",function(){var t=u(this),e=a.find(".ctf-item").last().attr("id"),n=a.find(".ctf-item").length;d(e,a.attr("data-ctfshortcode"),a,t,0,n)}),a.find(".ctf-author-box-link p:empty").remove(),setTimeout(function(){t()?n(a):e(a)},500)}))},jQuery(document).ready(function(u){ctf_init(),u("#cookie-notice a").on("click",function(){setTimeout(function(){a()},1e3)}),u("#cookie-law-info-bar a").on("click",function(){setTimeout(function(){a()},1e3)}),u(".cli-user-preference-checkbox").on("click",function(){a()}),u(window).on("CookiebotOnAccept",function(u){a()}),u(document).on("cmplzAcceptAll",function(u){a()}),u(document).on("cmplzRevoke",function(u){a()}),u(document).on("borlabs-cookie-consent-saved",function(u){a()})})}(jQuery);
|
from sys import maxsize
class Contact:
def __init__(self, contact_firstname=None, contact_lastname=None, all_phones_from_home_page=None, id=None,
contact_homephone=None, contact_mobilephone=None, contact_workphone=None, contact_secondaryphone=None,
contact_address=None, contact_all_emails_from_home_page=None, contact_email=None, contact_email2=None, contact_email3=None ):
self.contact_firstname = contact_firstname
self.contact_lastname = contact_lastname
self.all_phones_from_home_page=all_phones_from_home_page
self.contact_homephone = contact_homephone
self.contact_mobilephone = contact_mobilephone
self.contact_workphone = contact_workphone
self.contact_secondaryphone = contact_secondaryphone
self.contact_address = contact_address
self.id = id
self.contact_all_emails_from_home_page = contact_all_emails_from_home_page
self.contact_email = contact_email
self.contact_email2 = contact_email2
self.contact_email3 = contact_email3
# representation в консоли - стандартная функция - определяет как будет выглядеть обтект при выводе на консоль (строкое представление)
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.contact_firstname, self.contact_lastname, self.contact_address)
#self.all_phones_from_home_page,
#self.contact_all_emails_from_home_page
# стандартная функция equels - принимающая в качестве второго параметра обьект с которым мы должны сравнить текущий обьект self
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and \
self.contact_firstname == other.contact_firstname and \
self.contact_lastname == other.contact_lastname and \
self.contact_address == other.contact_address #and \
#self.all_phones_from_home_page == other.all_phones_from_home_page and \
#self.contact_all_emails_from_home_page == other.contact_all_emails_from_home_page
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize #специальная константа - максимальное чисто которое может использоваться в качестве индекса в списках и считается что для практическиъ целей мпжно использовать как максимальное целое число (import from sys)
#def merge_all_phones_from_home_page(contact):
# return "\n".join(filter(lambda x: x != "",
# map(lambda x: clear(x),
# [contact.contact_homephone, contact.contact_mobilephone, contact.contact_workphone,
# contact.contact_secondaryphone])))
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class EsxiHost(object):
"""
An ESXi host is a node in an SDDC. At a minimum, each SDDC has 3 ESXi hosts
that are used to implement a functioning VMware environment.
In terms of implementation, an ESXi host is a Compute instance that
is configured with the chosen bundle of VMware software.
Notice that an `EsxiHost` object has its own OCID (`id`), and a separate
attribute for the OCID of the Compute instance (`computeInstanceId`).
"""
#: A constant which can be used with the lifecycle_state property of a EsxiHost.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a EsxiHost.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a EsxiHost.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a EsxiHost.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a EsxiHost.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a EsxiHost.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the current_sku property of a EsxiHost.
#: This constant has a value of "HOUR"
CURRENT_SKU_HOUR = "HOUR"
#: A constant which can be used with the current_sku property of a EsxiHost.
#: This constant has a value of "MONTH"
CURRENT_SKU_MONTH = "MONTH"
#: A constant which can be used with the current_sku property of a EsxiHost.
#: This constant has a value of "ONE_YEAR"
CURRENT_SKU_ONE_YEAR = "ONE_YEAR"
#: A constant which can be used with the current_sku property of a EsxiHost.
#: This constant has a value of "THREE_YEARS"
CURRENT_SKU_THREE_YEARS = "THREE_YEARS"
#: A constant which can be used with the next_sku property of a EsxiHost.
#: This constant has a value of "HOUR"
NEXT_SKU_HOUR = "HOUR"
#: A constant which can be used with the next_sku property of a EsxiHost.
#: This constant has a value of "MONTH"
NEXT_SKU_MONTH = "MONTH"
#: A constant which can be used with the next_sku property of a EsxiHost.
#: This constant has a value of "ONE_YEAR"
NEXT_SKU_ONE_YEAR = "ONE_YEAR"
#: A constant which can be used with the next_sku property of a EsxiHost.
#: This constant has a value of "THREE_YEARS"
NEXT_SKU_THREE_YEARS = "THREE_YEARS"
def __init__(self, **kwargs):
"""
Initializes a new EsxiHost object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this EsxiHost.
:type id: str
:param display_name:
The value to assign to the display_name property of this EsxiHost.
:type display_name: str
:param sddc_id:
The value to assign to the sddc_id property of this EsxiHost.
:type sddc_id: str
:param compartment_id:
The value to assign to the compartment_id property of this EsxiHost.
:type compartment_id: str
:param compute_instance_id:
The value to assign to the compute_instance_id property of this EsxiHost.
:type compute_instance_id: str
:param time_created:
The value to assign to the time_created property of this EsxiHost.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this EsxiHost.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this EsxiHost.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param current_sku:
The value to assign to the current_sku property of this EsxiHost.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type current_sku: str
:param next_sku:
The value to assign to the next_sku property of this EsxiHost.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type next_sku: str
:param billing_contract_end_date:
The value to assign to the billing_contract_end_date property of this EsxiHost.
:type billing_contract_end_date: datetime
:param compute_availability_domain:
The value to assign to the compute_availability_domain property of this EsxiHost.
:type compute_availability_domain: str
:param freeform_tags:
The value to assign to the freeform_tags property of this EsxiHost.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this EsxiHost.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'sddc_id': 'str',
'compartment_id': 'str',
'compute_instance_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'current_sku': 'str',
'next_sku': 'str',
'billing_contract_end_date': 'datetime',
'compute_availability_domain': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'sddc_id': 'sddcId',
'compartment_id': 'compartmentId',
'compute_instance_id': 'computeInstanceId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'current_sku': 'currentSku',
'next_sku': 'nextSku',
'billing_contract_end_date': 'billingContractEndDate',
'compute_availability_domain': 'computeAvailabilityDomain',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._id = None
self._display_name = None
self._sddc_id = None
self._compartment_id = None
self._compute_instance_id = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._current_sku = None
self._next_sku = None
self._billing_contract_end_date = None
self._compute_availability_domain = None
self._freeform_tags = None
self._defined_tags = None
@property
def id(self):
"""
**[Required]** Gets the id of this EsxiHost.
The `OCID`__ of the ESXi host.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this EsxiHost.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this EsxiHost.
The `OCID`__ of the ESXi host.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this EsxiHost.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this EsxiHost.
A descriptive name for the ESXi host. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this EsxiHost.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this EsxiHost.
A descriptive name for the ESXi host. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this EsxiHost.
:type: str
"""
self._display_name = display_name
@property
def sddc_id(self):
"""
**[Required]** Gets the sddc_id of this EsxiHost.
The `OCID`__ of the SDDC that the
ESXi host belongs to.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The sddc_id of this EsxiHost.
:rtype: str
"""
return self._sddc_id
@sddc_id.setter
def sddc_id(self, sddc_id):
"""
Sets the sddc_id of this EsxiHost.
The `OCID`__ of the SDDC that the
ESXi host belongs to.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param sddc_id: The sddc_id of this EsxiHost.
:type: str
"""
self._sddc_id = sddc_id
@property
def compartment_id(self):
"""
Gets the compartment_id of this EsxiHost.
The `OCID`__ of the compartment that
contains the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this EsxiHost.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this EsxiHost.
The `OCID`__ of the compartment that
contains the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this EsxiHost.
:type: str
"""
self._compartment_id = compartment_id
@property
def compute_instance_id(self):
"""
Gets the compute_instance_id of this EsxiHost.
In terms of implementation, an ESXi host is a Compute instance that
is configured with the chosen bundle of VMware software. The `computeInstanceId`
is the `OCID`__ of that Compute instance.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compute_instance_id of this EsxiHost.
:rtype: str
"""
return self._compute_instance_id
@compute_instance_id.setter
def compute_instance_id(self, compute_instance_id):
"""
Sets the compute_instance_id of this EsxiHost.
In terms of implementation, an ESXi host is a Compute instance that
is configured with the chosen bundle of VMware software. The `computeInstanceId`
is the `OCID`__ of that Compute instance.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compute_instance_id: The compute_instance_id of this EsxiHost.
:type: str
"""
self._compute_instance_id = compute_instance_id
@property
def time_created(self):
"""
Gets the time_created of this EsxiHost.
The date and time the ESXi host was created, in the format defined by
`RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this EsxiHost.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this EsxiHost.
The date and time the ESXi host was created, in the format defined by
`RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this EsxiHost.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this EsxiHost.
The date and time the ESXi host was updated, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:return: The time_updated of this EsxiHost.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this EsxiHost.
The date and time the ESXi host was updated, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:param time_updated: The time_updated of this EsxiHost.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this EsxiHost.
The current state of the ESXi host.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this EsxiHost.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this EsxiHost.
The current state of the ESXi host.
:param lifecycle_state: The lifecycle_state of this EsxiHost.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def current_sku(self):
"""
**[Required]** Gets the current_sku of this EsxiHost.
The billing option currently used by the ESXi host.
:func:`list_supported_skus`.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The current_sku of this EsxiHost.
:rtype: str
"""
return self._current_sku
@current_sku.setter
def current_sku(self, current_sku):
"""
Sets the current_sku of this EsxiHost.
The billing option currently used by the ESXi host.
:func:`list_supported_skus`.
:param current_sku: The current_sku of this EsxiHost.
:type: str
"""
allowed_values = ["HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"]
if not value_allowed_none_or_none_sentinel(current_sku, allowed_values):
current_sku = 'UNKNOWN_ENUM_VALUE'
self._current_sku = current_sku
@property
def next_sku(self):
"""
**[Required]** Gets the next_sku of this EsxiHost.
The billing option to switch to after the current billing cycle ends.
If `nextSku` is null or empty, `currentSku` continues to the next billing cycle.
:func:`list_supported_skus`.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The next_sku of this EsxiHost.
:rtype: str
"""
return self._next_sku
@next_sku.setter
def next_sku(self, next_sku):
"""
Sets the next_sku of this EsxiHost.
The billing option to switch to after the current billing cycle ends.
If `nextSku` is null or empty, `currentSku` continues to the next billing cycle.
:func:`list_supported_skus`.
:param next_sku: The next_sku of this EsxiHost.
:type: str
"""
allowed_values = ["HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"]
if not value_allowed_none_or_none_sentinel(next_sku, allowed_values):
next_sku = 'UNKNOWN_ENUM_VALUE'
self._next_sku = next_sku
@property
def billing_contract_end_date(self):
"""
**[Required]** Gets the billing_contract_end_date of this EsxiHost.
Current billing cycle end date. If the value in `currentSku` and `nextSku` are different, the value specified in `nextSku`
becomes the new `currentSKU` when the `contractEndDate` is reached.
Example: `2016-08-25T21:10:29.600Z`
:return: The billing_contract_end_date of this EsxiHost.
:rtype: datetime
"""
return self._billing_contract_end_date
@billing_contract_end_date.setter
def billing_contract_end_date(self, billing_contract_end_date):
"""
Sets the billing_contract_end_date of this EsxiHost.
Current billing cycle end date. If the value in `currentSku` and `nextSku` are different, the value specified in `nextSku`
becomes the new `currentSKU` when the `contractEndDate` is reached.
Example: `2016-08-25T21:10:29.600Z`
:param billing_contract_end_date: The billing_contract_end_date of this EsxiHost.
:type: datetime
"""
self._billing_contract_end_date = billing_contract_end_date
@property
def compute_availability_domain(self):
"""
**[Required]** Gets the compute_availability_domain of this EsxiHost.
The availability domain of the ESXi host.
:return: The compute_availability_domain of this EsxiHost.
:rtype: str
"""
return self._compute_availability_domain
@compute_availability_domain.setter
def compute_availability_domain(self, compute_availability_domain):
"""
Sets the compute_availability_domain of this EsxiHost.
The availability domain of the ESXi host.
:param compute_availability_domain: The compute_availability_domain of this EsxiHost.
:type: str
"""
self._compute_availability_domain = compute_availability_domain
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this EsxiHost.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this EsxiHost.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this EsxiHost.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this EsxiHost.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this EsxiHost.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this EsxiHost.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this EsxiHost.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this EsxiHost.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _GridItem = _interopRequireDefault(require("../grid/GridItem"));
var _sortableFactories = require("../sortable/sortable-factories");
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var SortableGridListItem = (0, _sortableFactories.createSortableItem)(_GridItem.default);
SortableGridListItem.displayName = 'SortableGridListItem';
var _default = SortableGridListItem;
exports.default = _default;
|
import hashlib
from pathlib import Path
from typing import List
from urllib.parse import quote, urlencode
import requests
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
_THIS_DIR = Path(__file__).parent
# Color palette from PyTorch Developer Day 2021 Presentation Template
YELLOW = "F9DB78"
GREEN = "70AD47"
BLUE = "00B0F0"
PINK = "FF71DA"
ORANGE = "FF8300"
TEAL = "00E5D1"
GRAY = "7F7F7F"
def _get_cache_path(key, ext):
filename = f"{hashlib.sha256(key).hexdigest()}{ext}"
cache_dir = _THIS_DIR / "gen_images"
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir / filename
def _download(url, path):
response = requests.get(url)
response.raise_for_status()
with open(path, "wb") as file:
file.write(response.content)
def _fetch_image(url):
path = _get_cache_path(url.encode("utf-8"), ext=".svg")
if not path.exists():
_download(url, path)
return str(path.relative_to(_THIS_DIR))
class BaseShield(Image):
def run(self, params, alt, section) -> List[nodes.Node]:
url = f"https://img.shields.io/static/v1?{urlencode(params, quote_via=quote)}"
path = _fetch_image(url)
self.arguments = [path]
self.options["alt"] = alt
if "class" not in self.options:
self.options["class"] = []
self.options["class"].append("shield-badge")
self.options["target"] = f"supported_features.html#{section}"
return super().run()
def _parse_devices(arg: str):
devices = sorted(arg.strip().split())
valid_values = {"CPU", "CUDA"}
if any(val not in valid_values for val in devices):
raise ValueError(
f"One or more device values are not valid. The valid values are {valid_values}. Given value: '{arg}'"
)
return ", ".join(sorted(devices))
def _parse_properties(arg: str):
properties = sorted(arg.strip().split())
valid_values = {"Autograd", "TorchScript"}
if any(val not in valid_values for val in properties):
raise ValueError(
"One or more property values are not valid. "
f"The valid values are {valid_values}. "
f"Given value: '{arg}'"
)
return ", ".join(sorted(properties))
class SupportedDevices(BaseShield):
"""List the supported devices"""
required_arguments = 1
final_argument_whitespace = True
def run(self) -> List[nodes.Node]:
devices = _parse_devices(self.arguments[0])
alt = f"This feature supports the following devices: {devices}"
params = {
"label": "Devices",
"message": devices,
"labelColor": GRAY,
"color": BLUE,
"style": "flat-square",
}
return super().run(params, alt, "devices")
class SupportedProperties(BaseShield):
"""List the supported properties"""
required_arguments = 1
final_argument_whitespace = True
def run(self) -> List[nodes.Node]:
properties = _parse_properties(self.arguments[0])
alt = f"This API supports the following properties: {properties}"
params = {
"label": "Properties",
"message": properties,
"labelColor": GRAY,
"color": GREEN,
"style": "flat-square",
}
return super().run(params, alt, "properties")
|
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "orte_config.h"
#include "opal/types.h"
#include "orte/mca/routed/routed.h"
#include "opal/dss/dss.h"
#include "opal/util/output.h"
#include "opal/util/opal_sos.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/mca/rml/base/base.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "rml_oob.h"
static void
orte_rml_send_msg_callback(int status,
struct orte_process_name_t* peer,
struct iovec* iov,
int count,
orte_rml_tag_t tag,
void* cbdata)
{
orte_rml_oob_msg_t *msg = (orte_rml_oob_msg_t*) cbdata;
orte_rml_oob_msg_header_t *hdr =
(orte_rml_oob_msg_header_t*) iov[0].iov_base;
if (msg->msg_type == ORTE_RML_BLOCKING_SEND) {
OPAL_THREAD_LOCK(&msg->msg_lock);
/* blocking send */
if (status > 0) {
msg->msg_status = status - sizeof(orte_rml_oob_msg_header_t);
} else {
msg->msg_status = status;
}
msg->msg_complete = true;
opal_condition_broadcast(&msg->msg_cond);
OPAL_THREAD_UNLOCK(&msg->msg_lock);
} else if (msg->msg_type == ORTE_RML_NONBLOCKING_IOV_SEND) {
/* non-blocking iovec send */
if (status > 0) {
status -= sizeof(orte_rml_oob_msg_header_t);
}
ORTE_RML_OOB_MSG_HEADER_NTOH(*hdr);
msg->msg_cbfunc.iov(status, peer, iov + 1, count - 1,
hdr->tag, msg->msg_cbdata);
OBJ_RELEASE(msg);
} else if (msg->msg_type == ORTE_RML_NONBLOCKING_BUFFER_SEND) {
/* non-blocking buffer send */
if (status > 0) {
status -= sizeof(orte_rml_oob_msg_header_t);
}
ORTE_RML_OOB_MSG_HEADER_NTOH(*hdr);
msg->msg_cbfunc.buffer(status, peer, msg->user_buffer,
hdr->tag, msg->msg_cbdata);
OBJ_RELEASE(msg->user_buffer);
OBJ_RELEASE(msg);
} else {
abort();
}
}
int
orte_rml_oob_send(orte_process_name_t* peer,
struct iovec *iov,
int count,
int tag,
int flags)
{
orte_rml_oob_msg_t *msg = OBJ_NEW(orte_rml_oob_msg_t);
int ret;
orte_process_name_t next;
int real_tag;
int i;
int bytes = 0;
if (ORTE_RML_TAG_INVALID == tag) {
/* cannot send to an invalid tag */
ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM);
return ORTE_ERR_BAD_PARAM;
}
msg->msg_type = ORTE_RML_BLOCKING_SEND;
flags |= ORTE_RML_FLAG_RECURSIVE_CALLBACK;
next = orte_routed.get_route(peer);
if (next.vpid == ORTE_VPID_INVALID) {
ORTE_ERROR_LOG(ORTE_ERR_ADDRESSEE_UNKNOWN);
opal_output(0, "%s could not get route to %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(peer));
return ORTE_ERR_ADDRESSEE_UNKNOWN;
}
msg->msg_data = (struct iovec *) malloc(sizeof(struct iovec) * (count + 1));
msg->msg_data[0].iov_base = (ompi_iov_base_ptr_t)&msg->msg_header;
msg->msg_data[0].iov_len = sizeof(orte_rml_oob_msg_header_t);
bytes += msg->msg_data[0].iov_len;
for (i = 0 ; i < count ; ++i) {
msg->msg_data[i + 1].iov_base = iov[i].iov_base;
msg->msg_data[i + 1].iov_len = iov[i].iov_len;
bytes += msg->msg_data[i + 1].iov_len;
}
msg->msg_header.origin = *ORTE_PROC_MY_NAME;
msg->msg_header.destination = *peer;
msg->msg_header.tag = tag;
ORTE_RML_OOB_MSG_HEADER_HTON(msg->msg_header);
if (OPAL_EQUAL == orte_util_compare_name_fields(ORTE_NS_CMP_ALL, &next, peer)) {
real_tag = tag;
} else {
real_tag = ORTE_RML_TAG_RML_ROUTE;
}
OPAL_OUTPUT_VERBOSE((1, orte_rml_base_output,
"rml_send %s -> %s (router %s, tag %d, %d)",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(peer),
ORTE_NAME_PRINT(&next),
tag,
real_tag));
ret = orte_rml_oob_module.active_oob->oob_send_nb(&next,
ORTE_PROC_MY_NAME,
msg->msg_data,
count + 1,
real_tag,
flags,
orte_rml_send_msg_callback,
msg);
if (ret < 0) {
ORTE_ERROR_LOG(ret);
opal_output(0, "%s attempted to send to %s: tag %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&next), (int)real_tag);
goto cleanup;
}
OPAL_THREAD_LOCK(&msg->msg_lock);
while (!msg->msg_complete) {
opal_condition_wait(&msg->msg_cond, &msg->msg_lock);
}
ret = msg->msg_status;
OPAL_THREAD_UNLOCK(&msg->msg_lock);
cleanup:
OBJ_RELEASE(msg);
return ret;
}
int
orte_rml_oob_send_nb(orte_process_name_t* peer,
struct iovec* iov,
int count,
orte_rml_tag_t tag,
int flags,
orte_rml_callback_fn_t cbfunc,
void* cbdata)
{
orte_rml_oob_msg_t *msg = OBJ_NEW(orte_rml_oob_msg_t);
int ret;
int real_tag;
orte_process_name_t next;
int i;
int bytes = 0;
if (ORTE_RML_TAG_INVALID == tag) {
/* cannot send to an invalid tag */
ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM);
return ORTE_ERR_BAD_PARAM;
}
msg->msg_type = ORTE_RML_NONBLOCKING_IOV_SEND;
msg->msg_cbfunc.iov = cbfunc;
msg->msg_cbdata = cbdata;
next = orte_routed.get_route(peer);
if (next.vpid == ORTE_VPID_INVALID) {
ORTE_ERROR_LOG(ORTE_ERR_ADDRESSEE_UNKNOWN);
opal_output(0, "%s could not get route to %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(peer));
return ORTE_ERR_ADDRESSEE_UNKNOWN;
}
msg->msg_data = (struct iovec *) malloc(sizeof(struct iovec) * (count + 1));
msg->msg_data[0].iov_base = (ompi_iov_base_ptr_t)&msg->msg_header;
msg->msg_data[0].iov_len = sizeof(orte_rml_oob_msg_header_t);
bytes += msg->msg_data[0].iov_len;
for (i = 0 ; i < count ; ++i) {
msg->msg_data[i + 1].iov_base = iov[i].iov_base;
msg->msg_data[i + 1].iov_len = iov[i].iov_len;
bytes += msg->msg_data[i + 1].iov_len;
}
msg->msg_header.origin = *ORTE_PROC_MY_NAME;
msg->msg_header.destination = *peer;
msg->msg_header.tag = tag;
ORTE_RML_OOB_MSG_HEADER_HTON(msg->msg_header);
if (OPAL_EQUAL == orte_util_compare_name_fields(ORTE_NS_CMP_ALL, &next, peer)) {
real_tag = tag;
} else {
real_tag = ORTE_RML_TAG_RML_ROUTE;
}
OPAL_OUTPUT_VERBOSE((1, orte_rml_base_output,
"rml_send_nb %s -> %s (router %s, tag %d, %d)",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(peer),
ORTE_NAME_PRINT(&next),
tag, real_tag));
ret = orte_rml_oob_module.active_oob->oob_send_nb(&next,
ORTE_PROC_MY_NAME,
msg->msg_data,
count + 1,
real_tag,
flags,
orte_rml_send_msg_callback,
msg);
if (ret < 0) {
ORTE_ERROR_LOG(ret);
opal_output(0, "%s attempted to send to %s: tag %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&next), (int)real_tag);
OBJ_RELEASE(msg);
}
return ret;
}
int
orte_rml_oob_send_buffer(orte_process_name_t* peer,
opal_buffer_t* buffer,
orte_rml_tag_t tag,
int flags)
{
int ret;
void *dataptr;
orte_std_cntr_t datalen;
struct iovec iov[1];
if (ORTE_RML_TAG_INVALID == tag) {
/* cannot send to an invalid tag */
ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM);
return ORTE_ERR_BAD_PARAM;
}
/* first build iovec from buffer information */
ret = opal_dss.unload(buffer, &dataptr, &datalen);
if (ret != ORTE_SUCCESS) return ret;
opal_dss.load(buffer, dataptr, datalen);
iov[0].iov_base = (IOVBASE_TYPE*)dataptr;
iov[0].iov_len = datalen;
return orte_rml_oob_send(peer, iov, 1, tag, flags);
}
int
orte_rml_oob_send_buffer_nb(orte_process_name_t* peer,
opal_buffer_t* buffer,
orte_rml_tag_t tag,
int flags,
orte_rml_buffer_callback_fn_t cbfunc,
void* cbdata)
{
orte_rml_oob_msg_t *msg = OBJ_NEW(orte_rml_oob_msg_t);
void *dataptr;
orte_std_cntr_t datalen;
int ret;
int real_tag;
orte_process_name_t next;
int bytes = 0;
if (ORTE_RML_TAG_INVALID == tag) {
/* cannot send to an invalid tag */
ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM);
return ORTE_ERR_BAD_PARAM;
}
/* first build iovec from buffer information */
ret = opal_dss.unload(buffer, &dataptr, &datalen);
if (ORTE_SUCCESS != ret) {
OBJ_RELEASE(msg);
return ret;
}
opal_dss.load(buffer, dataptr, datalen);
msg->msg_type = ORTE_RML_NONBLOCKING_BUFFER_SEND;
msg->msg_cbfunc.buffer = cbfunc;
msg->msg_cbdata = cbdata;
msg->user_buffer = buffer;
msg->msg_data = (struct iovec *) malloc(sizeof(struct iovec) * 2);
next = orte_routed.get_route(peer);
if (next.vpid == ORTE_VPID_INVALID) {
ORTE_ERROR_LOG(ORTE_ERR_ADDRESSEE_UNKNOWN);
opal_output(0, "%s unable to find address for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(peer));
return ORTE_ERR_ADDRESSEE_UNKNOWN;
}
msg->msg_data[0].iov_base = (ompi_iov_base_ptr_t)&msg->msg_header;
msg->msg_data[0].iov_len = sizeof(orte_rml_oob_msg_header_t);
bytes += msg->msg_data[0].iov_len;
msg->msg_data[1].iov_base = (IOVBASE_TYPE*)dataptr;
msg->msg_data[1].iov_len = datalen;
bytes += msg->msg_data[1].iov_len;
msg->msg_header.origin = *ORTE_PROC_MY_NAME;
msg->msg_header.destination = *peer;
msg->msg_header.tag = tag;
ORTE_RML_OOB_MSG_HEADER_HTON(msg->msg_header);
if (OPAL_EQUAL == orte_util_compare_name_fields(ORTE_NS_CMP_ALL, &next, peer)) {
real_tag = tag;
} else {
real_tag = ORTE_RML_TAG_RML_ROUTE;
}
OBJ_RETAIN(buffer);
OPAL_OUTPUT_VERBOSE((1, orte_rml_base_output,
"rml_send_buffer_nb %s -> %s (router %s, tag %d, %d)",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(peer),
ORTE_NAME_PRINT(&next),
tag, real_tag));
ret = orte_rml_oob_module.active_oob->oob_send_nb(&next,
ORTE_PROC_MY_NAME,
msg->msg_data,
2,
real_tag,
flags,
orte_rml_send_msg_callback,
msg);
if (ret < 0) {
ORTE_ERROR_LOG(ret);
opal_output(0, "%s attempted to send to %s: tag %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&next), (int)real_tag);
OBJ_RELEASE(msg);
OBJ_RELEASE(buffer);
}
return ret;
}
|
/*
* Copyright (c) 2006-2009 Erin Catto http://www.box2d.org
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#ifndef B2_MATH_H
#define B2_MATH_H
#include <Box2D/Common/b2Settings.h>
#include <cmath>
#include <cfloat>
#include <cstddef>
#include <limits>
/// This function is used to ensure that a floating point number is
/// not a NaN or infinity.
inline bool b2IsValid(float32 x)
{
if (x != x)
{
// NaN.
return false;
}
float32 infinity = std::numeric_limits<float32>::infinity();
return -infinity < x && x < infinity;
}
/// This is a approximate yet fast inverse square-root.
inline float32 b2InvSqrt(float32 x)
{
union
{
float32 x;
int32 i;
} convert;
convert.x = x;
float32 xhalf = 0.5f * x;
convert.i = 0x5f3759df - (convert.i >> 1);
x = convert.x;
x = x * (1.5f - xhalf * x * x);
return x;
}
#define b2Sqrt(x) std::sqrt(x)
#define b2Atan2(y, x) std::atan2(y, x)
/// A 2D column vector.
struct b2Vec2
{
/// Default constructor does nothing (for performance).
b2Vec2() {}
/// Construct using coordinates.
b2Vec2(float32 x, float32 y) : x(x), y(y) {}
/// Set this vector to all zeros.
void SetZero() { x = 0.0f; y = 0.0f; }
/// Set this vector to some specified coordinates.
void Set(float32 x_, float32 y_) { x = x_; y = y_; }
/// Negate this vector.
b2Vec2 operator -() const { b2Vec2 v; v.Set(-x, -y); return v; }
/// Read from and indexed element.
float32 operator () (int32 i) const
{
return (&x)[i];
}
/// Write to an indexed element.
float32& operator () (int32 i)
{
return (&x)[i];
}
/// Add a vector to this vector.
void operator += (const b2Vec2& v)
{
x += v.x; y += v.y;
}
/// Subtract a vector from this vector.
void operator -= (const b2Vec2& v)
{
x -= v.x; y -= v.y;
}
/// Multiply this vector by a scalar.
void operator *= (float32 a)
{
x *= a; y *= a;
}
/// Get the length of this vector (the norm).
float32 Length() const
{
return b2Sqrt(x * x + y * y);
}
/// Get the length squared. For performance, use this instead of
/// b2Vec2::Length (if possible).
float32 LengthSquared() const
{
return x * x + y * y;
}
/// Convert this vector into a unit vector. Returns the length.
float32 Normalize()
{
float32 length = Length();
if (length < b2_epsilon)
{
return 0.0f;
}
float32 invLength = 1.0f / length;
x *= invLength;
y *= invLength;
return length;
}
/// Does this vector contain finite coordinates?
bool IsValid() const
{
return b2IsValid(x) && b2IsValid(y);
}
/// Get the skew vector such that dot(skew_vec, other) == cross(vec, other)
b2Vec2 Skew() const
{
return b2Vec2(-y, x);
}
float32 x, y;
};
/// A 2D column vector with 3 elements.
struct b2Vec3
{
/// Default constructor does nothing (for performance).
b2Vec3() {}
/// Construct using coordinates.
b2Vec3(float32 x, float32 y, float32 z) : x(x), y(y), z(z) {}
/// Set this vector to all zeros.
void SetZero() { x = 0.0f; y = 0.0f; z = 0.0f; }
/// Set this vector to some specified coordinates.
void Set(float32 x_, float32 y_, float32 z_) { x = x_; y = y_; z = z_; }
/// Negate this vector.
b2Vec3 operator -() const { b2Vec3 v; v.Set(-x, -y, -z); return v; }
/// Add a vector to this vector.
void operator += (const b2Vec3& v)
{
x += v.x; y += v.y; z += v.z;
}
/// Subtract a vector from this vector.
void operator -= (const b2Vec3& v)
{
x -= v.x; y -= v.y; z -= v.z;
}
/// Multiply this vector by a scalar.
void operator *= (float32 s)
{
x *= s; y *= s; z *= s;
}
float32 x, y, z;
};
/// A 2-by-2 matrix. Stored in column-major order.
struct b2Mat22
{
/// The default constructor does nothing (for performance).
b2Mat22() {}
/// Construct this matrix using columns.
b2Mat22(const b2Vec2& c1, const b2Vec2& c2)
{
ex = c1;
ey = c2;
}
/// Construct this matrix using scalars.
b2Mat22(float32 a11, float32 a12, float32 a21, float32 a22)
{
ex.x = a11; ex.y = a21;
ey.x = a12; ey.y = a22;
}
/// Initialize this matrix using columns.
void Set(const b2Vec2& c1, const b2Vec2& c2)
{
ex = c1;
ey = c2;
}
/// Set this to the identity matrix.
void SetIdentity()
{
ex.x = 1.0f; ey.x = 0.0f;
ex.y = 0.0f; ey.y = 1.0f;
}
/// Set this matrix to all zeros.
void SetZero()
{
ex.x = 0.0f; ey.x = 0.0f;
ex.y = 0.0f; ey.y = 0.0f;
}
b2Mat22 GetInverse() const
{
float32 a = ex.x, b = ey.x, c = ex.y, d = ey.y;
b2Mat22 B;
float32 det = a * d - b * c;
if (det != 0.0f)
{
det = 1.0f / det;
}
B.ex.x = det * d; B.ey.x = -det * b;
B.ex.y = -det * c; B.ey.y = det * a;
return B;
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
b2Vec2 Solve(const b2Vec2& b) const
{
float32 a11 = ex.x, a12 = ey.x, a21 = ex.y, a22 = ey.y;
float32 det = a11 * a22 - a12 * a21;
if (det != 0.0f)
{
det = 1.0f / det;
}
b2Vec2 x;
x.x = det * (a22 * b.x - a12 * b.y);
x.y = det * (a11 * b.y - a21 * b.x);
return x;
}
b2Vec2 ex, ey;
};
/// A 3-by-3 matrix. Stored in column-major order.
struct b2Mat33
{
/// The default constructor does nothing (for performance).
b2Mat33() {}
/// Construct this matrix using columns.
b2Mat33(const b2Vec3& c1, const b2Vec3& c2, const b2Vec3& c3)
{
ex = c1;
ey = c2;
ez = c3;
}
/// Set this matrix to all zeros.
void SetZero()
{
ex.SetZero();
ey.SetZero();
ez.SetZero();
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
b2Vec3 Solve33(const b2Vec3& b) const;
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases. Solve only the upper
/// 2-by-2 matrix equation.
b2Vec2 Solve22(const b2Vec2& b) const;
b2Vec3 ex, ey, ez;
};
/// Rotation
struct b2Rot
{
b2Rot() {}
/// Initialize from an angle in radians
explicit b2Rot(float32 angle)
{
/// TODO_ERIN optimize
s = sinf(angle);
c = cosf(angle);
}
/// Set using an angle in radians.
void Set(float32 angle)
{
/// TODO_ERIN optimize
s = sinf(angle);
c = cosf(angle);
}
/// Set to the identity rotation
void SetIdentity()
{
s = 0.0f;
c = 1.0f;
}
/// Get the angle in radians
float32 GetAngle() const
{
return b2Atan2(s, c);
}
/// Get the x-axis
b2Vec2 GetXAxis() const
{
return b2Vec2(c, s);
}
/// Get the u-axis
b2Vec2 GetYAxis() const
{
return b2Vec2(-s, c);
}
/// Sine and cosine
float32 s, c;
};
/// A transform contains translation and rotation. It is used to represent
/// the position and orientation of rigid frames.
struct b2Transform
{
/// The default constructor does nothing.
b2Transform() {}
/// Initialize using a position vector and a rotation.
b2Transform(const b2Vec2& position, const b2Rot& rotation) : p(position), q(rotation) {}
/// Set this to the identity transform.
void SetIdentity()
{
p.SetZero();
q.SetIdentity();
}
/// Set this based on the position and angle.
void Set(const b2Vec2& position, float32 angle)
{
p = position;
q.Set(angle);
}
b2Vec2 p;
b2Rot q;
};
/// This describes the motion of a body/shape for TOI computation.
/// Shapes are defined with respect to the body origin, which may
/// no coincide with the center of mass. However, to support dynamics
/// we must interpolate the center of mass position.
struct b2Sweep
{
/// Get the interpolated transform at a specific time.
/// @param beta is a factor in [0,1], where 0 indicates alpha0.
void GetTransform(b2Transform* xfb, float32 beta) const;
/// Advance the sweep forward, yielding a new initial state.
/// @param alpha the new initial time.
void Advance(float32 alpha);
/// Normalize the angles.
void Normalize();
b2Vec2 localCenter; ///< local center of mass position
b2Vec2 c0, c; ///< center world positions
float32 a0, a; ///< world angles
/// Fraction of the current time step in the range [0,1]
/// c0 and a0 are the positions at alpha0.
float32 alpha0;
};
/// Useful constant
extern const b2Vec2 b2Vec2_zero;
/// Perform the dot product on two vectors.
inline float32 b2Dot(const b2Vec2& a, const b2Vec2& b)
{
return a.x * b.x + a.y * b.y;
}
/// Perform the cross product on two vectors. In 2D this produces a scalar.
inline float32 b2Cross(const b2Vec2& a, const b2Vec2& b)
{
return a.x * b.y - a.y * b.x;
}
/// Perform the cross product on a vector and a scalar. In 2D this produces
/// a vector.
inline b2Vec2 b2Cross(const b2Vec2& a, float32 s)
{
return b2Vec2(s * a.y, -s * a.x);
}
/// Perform the cross product on a scalar and a vector. In 2D this produces
/// a vector.
inline b2Vec2 b2Cross(float32 s, const b2Vec2& a)
{
return b2Vec2(-s * a.y, s * a.x);
}
/// Multiply a matrix times a vector. If a rotation matrix is provided,
/// then this transforms the vector from one frame to another.
inline b2Vec2 b2Mul(const b2Mat22& A, const b2Vec2& v)
{
return b2Vec2(A.ex.x * v.x + A.ey.x * v.y, A.ex.y * v.x + A.ey.y * v.y);
}
/// Multiply a matrix transpose times a vector. If a rotation matrix is provided,
/// then this transforms the vector from one frame to another (inverse transform).
inline b2Vec2 b2MulT(const b2Mat22& A, const b2Vec2& v)
{
return b2Vec2(b2Dot(v, A.ex), b2Dot(v, A.ey));
}
/// Add two vectors component-wise.
inline b2Vec2 operator + (const b2Vec2& a, const b2Vec2& b)
{
return b2Vec2(a.x + b.x, a.y + b.y);
}
/// Subtract two vectors component-wise.
inline b2Vec2 operator - (const b2Vec2& a, const b2Vec2& b)
{
return b2Vec2(a.x - b.x, a.y - b.y);
}
inline b2Vec2 operator * (float32 s, const b2Vec2& a)
{
return b2Vec2(s * a.x, s * a.y);
}
inline bool operator == (const b2Vec2& a, const b2Vec2& b)
{
return a.x == b.x && a.y == b.y;
}
inline float32 b2Distance(const b2Vec2& a, const b2Vec2& b)
{
b2Vec2 c = a - b;
return c.Length();
}
inline float32 b2DistanceSquared(const b2Vec2& a, const b2Vec2& b)
{
b2Vec2 c = a - b;
return b2Dot(c, c);
}
inline b2Vec3 operator * (float32 s, const b2Vec3& a)
{
return b2Vec3(s * a.x, s * a.y, s * a.z);
}
/// Add two vectors component-wise.
inline b2Vec3 operator + (const b2Vec3& a, const b2Vec3& b)
{
return b2Vec3(a.x + b.x, a.y + b.y, a.z + b.z);
}
/// Subtract two vectors component-wise.
inline b2Vec3 operator - (const b2Vec3& a, const b2Vec3& b)
{
return b2Vec3(a.x - b.x, a.y - b.y, a.z - b.z);
}
/// Perform the dot product on two vectors.
inline float32 b2Dot(const b2Vec3& a, const b2Vec3& b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
/// Perform the cross product on two vectors.
inline b2Vec3 b2Cross(const b2Vec3& a, const b2Vec3& b)
{
return b2Vec3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x);
}
inline b2Mat22 operator + (const b2Mat22& A, const b2Mat22& B)
{
return b2Mat22(A.ex + B.ex, A.ey + B.ey);
}
// A * B
inline b2Mat22 b2Mul(const b2Mat22& A, const b2Mat22& B)
{
return b2Mat22(b2Mul(A, B.ex), b2Mul(A, B.ey));
}
// A^T * B
inline b2Mat22 b2MulT(const b2Mat22& A, const b2Mat22& B)
{
b2Vec2 c1(b2Dot(A.ex, B.ex), b2Dot(A.ey, B.ex));
b2Vec2 c2(b2Dot(A.ex, B.ey), b2Dot(A.ey, B.ey));
return b2Mat22(c1, c2);
}
/// Multiply a matrix times a vector.
inline b2Vec3 b2Mul(const b2Mat33& A, const b2Vec3& v)
{
return v.x * A.ex + v.y * A.ey + v.z * A.ez;
}
/// Multiply two rotations: q * r
inline b2Rot b2Mul(const b2Rot& q, const b2Rot& r)
{
// [qc -qs] * [rc -rs] = [qc*rc-qs*rs -qc*rs-qs*rc]
// [qs qc] [rs rc] [qs*rc+qc*rs -qs*rs+qc*rc]
// s = qs * rc + qc * rs
// c = qc * rc - qs * rs
b2Rot qr;
qr.s = q.s * r.c + q.c * r.s;
qr.c = q.c * r.c - q.s * r.s;
return qr;
}
/// Transpose multiply two rotations: qT * r
inline b2Rot b2MulT(const b2Rot& q, const b2Rot& r)
{
// [ qc qs] * [rc -rs] = [qc*rc-qs*rs -qc*rs-qs*rc]
// [-qs qc] [rs rc] [qs*rc+qc*rs -qs*rs+qc*rc]
// s = qs * rc + qc * rs
// c = qc * rc - qs * rs
b2Rot qr;
qr.s = q.s * r.c + q.c * r.s;
qr.c = q.c * r.c - q.s * r.s;
return qr;
}
/// Rotate a vector
inline b2Vec2 b2Mul(const b2Rot& q, const b2Vec2& v)
{
return b2Vec2(q.c * v.x - q.s * v.y, q.s * v.x + q.c * v.y);
}
/// Inverse rotate a vector
inline b2Vec2 b2MulT(const b2Rot& q, const b2Vec2& v)
{
return b2Vec2(q.c * v.x + q.s * v.y, -q.s * v.x + q.c * v.y);
}
inline b2Vec2 b2Mul(const b2Transform& T, const b2Vec2& v)
{
float32 x = (T.q.c * v.x - T.q.s * v.y) + T.p.x;
float32 y = (T.q.s * v.x + T.q.c * v.y) + T.p.y;
return b2Vec2(x, y);
}
inline b2Vec2 b2MulT(const b2Transform& T, const b2Vec2& v)
{
float32 px = v.x - T.p.x;
float32 py = v.y - T.p.y;
float32 x = (T.q.c * px + T.q.s * py);
float32 y = (-T.q.s * px + T.q.c * py);
return b2Vec2(x, y);
}
// v2 = A.q.Rot(B.q.Rot(v1) + B.p) + A.p
// = (A.q * B.q).Rot(v1) + A.q.Rot(B.p) + A.p
inline b2Transform b2Mul(const b2Transform& A, const b2Transform& B)
{
b2Transform C;
C.q = b2Mul(A.q, B.q);
C.p = b2Mul(A.q, B.p) + A.p;
return C;
}
// v2 = A.q' * (B.q * v1 + B.p - A.p)
// = A.q' * B.q * v1 + A.q' * (B.p - A.p)
inline b2Transform b2MulT(const b2Transform& A, const b2Transform& B)
{
b2Transform C;
C.q = b2MulT(A.q, B.q);
C.p = b2MulT(A.q, B.p - A.p);
return C;
}
template <typename T>
inline T b2Abs(T a)
{
return a > T(0) ? a : -a;
}
inline b2Vec2 b2Abs(const b2Vec2& a)
{
return b2Vec2(b2Abs(a.x), b2Abs(a.y));
}
inline b2Mat22 b2Abs(const b2Mat22& A)
{
return b2Mat22(b2Abs(A.ex), b2Abs(A.ey));
}
template <typename T>
inline T b2Min(T a, T b)
{
return a < b ? a : b;
}
inline b2Vec2 b2Min(const b2Vec2& a, const b2Vec2& b)
{
return b2Vec2(b2Min(a.x, b.x), b2Min(a.y, b.y));
}
template <typename T>
inline T b2Max(T a, T b)
{
return a > b ? a : b;
}
inline b2Vec2 b2Max(const b2Vec2& a, const b2Vec2& b)
{
return b2Vec2(b2Max(a.x, b.x), b2Max(a.y, b.y));
}
template <typename T>
inline T b2Clamp(T a, T low, T high)
{
return b2Max(low, b2Min(a, high));
}
inline b2Vec2 b2Clamp(const b2Vec2& a, const b2Vec2& low, const b2Vec2& high)
{
return b2Max(low, b2Min(a, high));
}
template<typename T> inline void b2Swap(T& a, T& b)
{
T tmp = a;
a = b;
b = tmp;
}
/// "Next Largest Power of 2
/// Given a binary integer value x, the next largest power of 2 can be computed by a SWAR algorithm
/// that recursively "folds" the upper bits into the lower bits. This process yields a bit vector with
/// the same most significant 1 as x, but all 1's below it. Adding 1 to that value yields the next
/// largest power of 2. For a 32-bit value:"
inline uint32 b2NextPowerOfTwo(uint32 x)
{
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
return x + 1;
}
inline bool b2IsPowerOfTwo(uint32 x)
{
bool result = x > 0 && (x & (x - 1)) == 0;
return result;
}
inline void b2Sweep::GetTransform(b2Transform* xf, float32 beta) const
{
xf->p = (1.0f - beta) * c0 + beta * c;
float32 angle = (1.0f - beta) * a0 + beta * a;
xf->q.Set(angle);
// Shift to origin
xf->p -= b2Mul(xf->q, localCenter);
}
inline void b2Sweep::Advance(float32 alpha)
{
b2Assert(alpha0 < 1.0f);
float32 beta = (alpha - alpha0) / (1.0f - alpha0);
c0 = (1.0f - beta) * c0 + beta * c;
a0 = (1.0f - beta) * a0 + beta * a;
alpha0 = alpha;
}
/// Normalize an angle in radians to be between -pi and pi
inline void b2Sweep::Normalize()
{
float32 twoPi = 2.0f * b2_pi;
float32 d = twoPi * floorf(a0 / twoPi);
a0 -= d;
a -= d;
}
#endif
|
"""
Client library for Luncho API.
Use luncho.ts and luncho.py rather than LunchoAPI.ts and others. # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
import json
import atexit
import mimetypes
from multiprocessing.pool import ThreadPool
import io
import os
import re
import typing
from urllib.parse import quote
from urllib3.fields import RequestField
from luncho_python import rest
from luncho_python.configuration import Configuration
from luncho_python.exceptions import ApiTypeError, ApiValueError, ApiException
from luncho_python.model_utils import (
ModelNormal,
ModelSimple,
ModelComposed,
check_allowed_values,
check_validations,
date,
datetime,
deserialize_file,
file_type,
model_to_dict,
none_type,
validate_and_convert_types
)
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None
):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
if header_params['Content-Type'].startswith("multipart"):
post_params = self.parameters_to_multipart(post_params,
(dict) )
# body
if body:
body = self.sanitize_for_serialization(body)
# auth setting
self.update_params_for_auth(header_params, query_params,
auth_settings, resource_path, method, body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8')
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return (return_data)
return return_data
# deserialize response data
if response_type:
if response_type != (file_type,):
encoding = "utf-8"
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
if match:
encoding = match.group(1)
response_data.data = response_data.data.decode(encoding)
return_data = self.deserialize(
response_data,
response_type,
_check_type
)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def parameters_to_multipart(self, params, collection_types):
"""Get parameters as list of tuples, formatting as json if value is collection_types
:param params: Parameters as list of two-tuples
:param dict collection_types: Parameter collection types
:return: Parameters as list of tuple or urllib3.fields.RequestField
"""
new_params = []
if collection_types is None:
collection_types = (dict)
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if isinstance(v, collection_types): # v is instance of collection_type, formatting as application/json
v = json.dumps(v, ensure_ascii=False).encode("utf-8")
field = RequestField(k, v)
field.make_multipart(content_type="application/json; charset=utf-8")
new_params.append(field)
else:
new_params.append((k, v))
return new_params
@classmethod
def sanitize_for_serialization(cls, obj):
"""Prepares data for transmission before it is sent with the rest client
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
If obj is io.IOBase, return the bytes
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if isinstance(obj, (ModelNormal, ModelComposed)):
return {
key: cls.sanitize_for_serialization(val) for key, val in model_to_dict(obj, serialize=True).items()
}
elif isinstance(obj, io.IOBase):
return cls.get_file_data_and_close_file(obj)
elif isinstance(obj, (str, int, float, none_type, bool)):
return obj
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, ModelSimple):
return cls.sanitize_for_serialization(obj.value)
elif isinstance(obj, (list, tuple)):
return [cls.sanitize_for_serialization(item) for item in obj]
if isinstance(obj, dict):
return {key: cls.sanitize_for_serialization(val) for key, val in obj.items()}
raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__))
def deserialize(self, response, response_type, _check_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param _check_type: boolean, whether to check the types of the data
received from the server
:type _check_type: bool
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == (file_type,):
content_disposition = response.getheader("Content-Disposition")
return deserialize_file(response.data, self.configuration,
content_disposition=content_disposition)
# fetch data from response object
try:
received_data = json.loads(response.data)
except ValueError:
received_data = response.data
# store our data under the key of 'received_data' so users have some
# context if they are deserializing a string and the data type is wrong
deserialized_data = validate_and_convert_types(
received_data,
response_type,
['received_data'],
True,
_check_type,
configuration=self.configuration
)
return deserialized_data
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None
):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param files: key -> field name, value -> a list of open file
objects for `multipart/form-data`.
:type files: dict
:param async_req bool: execute request asynchronously
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:type collection_formats: dict, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _check_type: boolean describing if the data back from the server
should have its type checked.
:type _check_type: bool, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_check_type)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _check_type))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
@staticmethod
def get_file_data_and_close_file(file_instance: io.IOBase) -> bytes:
file_data = file_instance.read()
file_instance.close()
return file_data
def files_parameters(self, files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None):
"""Builds form parameters.
:param files: None or a dict with key=param_name and
value is a list of open file objects
:return: List of tuples of form parameters with file data
"""
if files is None:
return []
params = []
for param_name, file_instances in files.items():
if file_instances is None:
# if the file field is nullable, skip None values
continue
for file_instance in file_instances:
if file_instance is None:
# if the file field is nullable, skip None values
continue
if file_instance.closed is True:
raise ApiValueError(
"Cannot read a closed file. The passed in file_type "
"for %s must be open." % param_name
)
filename = os.path.basename(file_instance.name)
filedata = self.get_file_data_and_close_file(file_instance)
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([param_name, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings,
resource_path, method, body):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param resource_path: A string representation of the HTTP request resource path.
:param method: A string representation of the HTTP request method.
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
if auth_setting['type'] != 'http-signature':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
class Endpoint(object):
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only',
'_check_input_type',
'_check_return_type'
])
self.params_map['nullable'].extend(['_request_timeout'])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
extra_types = {
'async_req': (bool,),
'_host_index': (none_type, int),
'_preload_content': (bool,),
'_request_timeout': (none_type, int, (int,), [int]),
'_return_http_data_only': (bool,),
'_check_input_type': (bool,),
'_check_return_type': (bool,)
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map['enum']:
if param in kwargs:
check_allowed_values(
self.allowed_values,
(param,),
kwargs[param]
)
for param in self.params_map['validation']:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param],
configuration=self.api_client.configuration
)
if kwargs['_check_input_type'] is False:
return
for key, value in kwargs.items():
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs['_check_input_type'],
configuration=self.api_client.configuration
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
'body': None,
'collection_format': {},
'file': {},
'form': [],
'header': {},
'path': {},
'query': []
}
for param_name, param_value in kwargs.items():
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == 'body':
params['body'] = param_value
continue
base_name = self.attribute_map[param_name]
if (param_location == 'form' and
self.openapi_types[param_name] == (file_type,)):
params['file'][param_name] = [param_value]
elif (param_location == 'form' and
self.openapi_types[param_name] == ([file_type],)):
# param_value is already a list
params['file'][param_name] = param_value
elif param_location in {'form', 'query'}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {'form', 'query'}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params['collection_format'][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
api_instance = LunchoApi()
api_instance.all_luncho_data # this is an instance of the class Endpoint
api_instance.all_luncho_data() # this invokes api_instance.all_luncho_data.__call__()
which then invokes the callable functions stored in that endpoint at
api_instance.all_luncho_data.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
index = self.api_client.configuration.server_operation_index.get(
self.settings['operation_id'], self.api_client.configuration.server_index
) if kwargs['_host_index'] is None else kwargs['_host_index']
server_variables = self.api_client.configuration.server_operation_variables.get(
self.settings['operation_id'], self.api_client.configuration.server_variables
)
_host = self.api_client.configuration.get_host_from_settings(
index, variables=server_variables, servers=self.settings['servers']
)
except IndexError:
if self.settings['servers']:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(self.settings['servers'])
)
_host = None
for key, value in kwargs.items():
if key not in self.params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" %
(key, self.settings['operation_id'])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (key not in self.params_map['nullable'] and value is None
and kwargs['_check_input_type'] is False):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" %
(key, self.settings['operation_id'])
)
for key in self.params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings['operation_id'])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map['accept']
if accept_headers_list:
params['header']['Accept'] = self.api_client.select_header_accept(
accept_headers_list)
content_type_headers_list = self.headers_map['content_type']
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list)
params['header']['Content-Type'] = header_list
return self.api_client.call_api(
self.settings['endpoint_path'], self.settings['http_method'],
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=self.settings['response_type'],
auth_settings=self.settings['auth'],
async_req=kwargs['async_req'],
_check_type=kwargs['_check_return_type'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
_host=_host,
collection_formats=params['collection_format'])
|
# Copyright (c) 2010-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.common.utils import json, public
from swift3.controllers.base import Controller
from swift3.etree import Element, SubElement, tostring
from swift3.response import HTTPOk, AccessDenied, NoSuchBucket
from swift3.utils import validate_bucket_name
from swift3.cfg import CONF
class ServiceController(Controller):
"""
Handles account level requests.
"""
@public
def GET(self, req):
"""
Handle GET Service request
"""
resp = req.get_response(self.app, query={'format': 'json'})
containers = json.loads(resp.body)
containers = filter(
lambda item: validate_bucket_name(item['name']), containers)
# we don't keep the creation time of a bucket (s3cmd doesn't
# work without that) so we use something bogus.
elem = Element('ListAllMyBucketsResult')
owner = SubElement(elem, 'Owner')
SubElement(owner, 'ID').text = req.user_id
SubElement(owner, 'DisplayName').text = req.user_id
buckets = SubElement(elem, 'Buckets')
for c in containers:
if CONF.s3_acl and CONF.check_bucket_owner:
try:
req.get_response(self.app, 'HEAD', c['name'])
except AccessDenied:
continue
except NoSuchBucket:
continue
bucket = SubElement(buckets, 'Bucket')
SubElement(bucket, 'Name').text = c['name']
SubElement(bucket, 'CreationDate').text = \
'2009-02-03T16:45:09.000Z'
body = tostring(elem)
return HTTPOk(content_type='application/xml', body=body)
|
!function(a){"object"==typeof exports&&"object"==typeof module?a(require("../../lib/codemirror"),require("../../mode/sql/sql")):"function"==typeof define&&define.amd?define(["../../lib/codemirror","../../mode/sql/sql"],a):a(CodeMirror)}(function(a){"use strict";function b(a){return"[object Array]"==Object.prototype.toString.call(a)}function c(b){var c=b.doc.modeOption;return"sql"===c&&(c="text/x-sql"),a.resolveMode(c).keywords}function d(a){return"string"==typeof a?a:a.text}function e(a,c){return b(c)&&(c={columns:c}),c.text||(c.text=a),c}function f(a){var c={};if(b(a))for(var f=a.length-1;f>=0;f--){var g=a[f];c[d(g).toUpperCase()]=e(d(g),g)}else if(a)for(var h in a)c[h.toUpperCase()]=e(h,a[h]);return c}function g(a){return p[a.toUpperCase()]}function h(a){var b={};for(var c in a)a.hasOwnProperty(c)&&(b[c]=a[c]);return b}function i(a,b){var c=a.length,e=d(b).substr(0,c);return a.toUpperCase()===e.toUpperCase()}function j(a,c,d,e){if(b(d))for(var f=0;f<d.length;f++)i(c,d[f])&&a.push(e(d[f]));else for(var g in d)if(d.hasOwnProperty(g)){var h=d[g];h=h&&h!==!0?h.displayText?{text:h.text,displayText:h.displayText}:h.text:g,i(c,h)&&a.push(e(h))}}function k(a){return"."==a.charAt(0)&&(a=a.substr(1)),a.replace(/`/g,"")}function l(a){for(var b=d(a).split("."),c=0;c<b.length;c++)b[c]="`"+b[c]+"`";var e=b.join(".");return"string"==typeof a?e:(a=h(a),a.text=e,a)}function m(a,b,c,d){for(var e=!1,f=[],i=b.start,m=!0;m;)m="."==b.string.charAt(0),e=e||"`"==b.string.charAt(0),i=b.start,f.unshift(k(b.string)),b=d.getTokenAt(t(a.line,b.start)),"."==b.string&&(m=!0,b=d.getTokenAt(t(a.line,b.start)));var n=f.join(".");j(c,n,p,function(a){return e?l(a):a}),j(c,n,q,function(a){return e?l(a):a}),n=f.pop();var r=f.join("."),s=!1,u=r;if(!g(r)){var v=r;r=o(r,d),r!==v&&(s=!0)}var w=g(r);return w&&w.columns&&(w=w.columns),w&&j(c,n,w,function(a){var b=r;return 1==s&&(b=u),"string"==typeof a?a=b+"."+a:(a=h(a),a.text=b+"."+a.text),e?l(a):a}),i}function n(a,b){if(a)for(var c=/[,;]/g,d=a.split(" "),e=0;e<d.length;e++)b(d[e]?d[e].replace(c,""):"")}function o(a,b){for(var c=b.doc,d=c.getValue(),e=a.toUpperCase(),f="",h="",i=[],j={start:t(0,0),end:t(b.lastLine(),b.getLineHandle(b.lastLine()).length)},k=d.indexOf(s.QUERY_DIV);-1!=k;)i.push(c.posFromIndex(k)),k=d.indexOf(s.QUERY_DIV,k+1);i.unshift(t(0,0)),i.push(t(b.lastLine(),b.getLineHandle(b.lastLine()).text.length));for(var l=null,m=b.getCursor(),o=0;o<i.length;o++){if((null==l||u(m,l)>0)&&u(m,i[o])<=0){j={start:l,end:i[o]};break}l=i[o]}for(var p=c.getRange(j.start,j.end,!1),o=0;o<p.length;o++){var q=p[o];if(n(q,function(a){var b=a.toUpperCase();b===e&&g(f)&&(h=f),b!==s.ALIAS_KEYWORD&&(f=a)}),h)break}return h}var p,q,r,s={QUERY_DIV:";",ALIAS_KEYWORD:"AS"},t=a.Pos,u=a.cmpPos;a.registerHelper("hint","sql",function(a,b){p=f(b&&b.tables);var d=b&&b.defaultTable,e=b&&b.disableKeywords;q=d&&g(d),r=c(a),d&&!q&&(q=o(d,a)),q=q||[],q.columns&&(q=q.columns);var h,i,k,l=a.getCursor(),n=[],s=a.getTokenAt(l);return s.end>l.ch&&(s.end=l.ch,s.string=s.string.slice(0,l.ch-s.start)),s.string.match(/^[.`\w@]\w*$/)?(k=s.string,h=s.start,i=s.end):(h=i=l.ch,k=""),"."==k.charAt(0)||"`"==k.charAt(0)?h=m(l,s,n,a):(j(n,k,p,function(a){return a}),j(n,k,q,function(a){return a}),e||j(n,k,r,function(a){return a.toUpperCase()})),{list:n,from:t(l.line,h),to:t(l.line,i)}})});
|
import time
from binascii import hexlify
from past.builtins import xrange
class Ultrasonic(object):
def __init__(self, name, ble, mqtt, protocol, default_topic, id_num, u_trigger_id, s_trigger_id):
self.is_connected = 0
self.name = name
self.id = id_num
self.u_trigger_id = u_trigger_id
self.s_trigger_id = s_trigger_id
self.u_trigger_status = None
self.s_trigger_status = None
self.BLE = ble
self.MQTT = mqtt
self.protocol = protocol
self.default_topic = default_topic
def connected(self):
self.is_connected = 1
print("Ultrasonic" + str(self.id) + " connected")
def disconnected(self):
self.is_connected = 0
print("Ultrasonic" + str(self.id) + " disconnected")
def get_distance(self, topic=None):
packet_size = 0x03
command_id = 0x84
payload_size = 0x01
module_id = self.id - 1
command = bytearray([packet_size, command_id, payload_size, module_id])
distance_cm = 0
if topic is None:
topic = self.default_topic
if self.is_connected == 1:
if self.protocol == "BLE":
self.BLE.write_to_robo(self.BLE.write_uuid, command)
distance = hexlify(self.BLE.read_from_robo())
distance = [distance[i:i + 2] for i in xrange(0, len(distance), 2)]
if len(distance) != 7:
return
distance_cm = int(distance[2], 16) + int(distance[3], 16)*256
return distance_cm
if self.protocol == "MQTT":
command = self.MQTT.get_mqtt_cmd([command_id, payload_size, module_id])
self.MQTT.message = "None"
self.MQTT.publish(topic, command)
while self.MQTT.message[0:2] != '84':
time.sleep(0.01)
distance = self.MQTT.message
distance = [distance[i:i + 2] for i in xrange(0, len(distance), 2)]
if len(distance) != 7:
return
distance_cm = int(distance[2], 16) + int(distance[3], 16)*256
return distance_cm
print(self.name + " is NOT Connected!")
def get_sound(self, topic=None):
packet_size = 0x03
command_id = 0x81
payload_size = 0x01
module_id = self.id - 1
command = bytearray([packet_size, command_id, payload_size, module_id])
if topic is None:
topic = self.default_topic
if self.is_connected == 1:
if self.protocol == "BLE":
self.BLE.write_to_robo(self.BLE.write_uuid, command)
sound = hexlify(self.BLE.read_from_robo())
sound = [sound[i:i+2] for i in xrange(0, len(sound), 2)]
if len(sound) != 4:
return
sound_lvl = int(sound[-2], 16)
return sound_lvl
if self.protocol == "MQTT":
command = self.MQTT.get_mqtt_cmd([command_id, payload_size, module_id])
self.MQTT.publish(topic, command)
sound = self.MQTT.message
if sound is None:
return 0
sound = [sound[i:i + 2] for i in xrange(0, len(sound), 2)]
sound = int(sound[2], 16)
return sound
print(self.name + " is NOT Connected!")
def set_distance_trigger(self, value, comparator, topic=None):
# comparator 0 = less than 1 = greater than
packet_size = 0x06
command_id = 0xB0
payload_size = 0x04
module_id = self.id - 1
command = bytearray([packet_size, command_id, payload_size, self.u_trigger_id, module_id, comparator, value])
if topic is None:
topic = self.default_topic
if self.is_connected == 1:
if self.protocol == "BLE":
self.BLE.write_to_robo(self.BLE.write_uuid, command)
return
if self.protocol == "MQTT":
pass
print(self.name + " is NOT Connected!")
def set_sound_trigger(self, value, comparator, topic=None):
packet_size = 0x06
command_id = 0xB4
payload_size = 0x04
module_id = self.id - 1
command = bytearray([packet_size, command_id, payload_size, self.s_trigger_id, module_id, comparator, value])
if topic is None:
topic = self.default_topic
if self.is_connected == 1:
self.BLE.write_to_robo(self.BLE.write_uuid, command)
# print "Set Sound Trigger to: " + str(value) + " comparator is " + str(comparator)
return
print(self.name + " is NOT Connected!")
def check_sound_trigger(self):
value = self.s_trigger_status
if value is None:
return False
self.s_trigger_status = None
return True
def check_ultrasonic_trigger(self):
value = self.u_trigger_status
if value is None:
return False
self.u_trigger_status = None
return True
def triggered(self, cmd_id, cmd_status):
if cmd_id == self.u_trigger_id:
self.u_trigger_status = cmd_status
return
if cmd_id == self.s_trigger_id:
self.s_trigger_status = cmd_status
|
/**
* Owl Carousel v2.3.4
* Copyright 2013-2018 David Deutsch
* Licensed under: SEE LICENSE IN https://github.com/OwlCarousel2/OwlCarousel2/blob/master/LICENSE
*/
/**
* Owl carousel
* @version 2.3.4
* @author Bartosz Wojciechowski
* @author David Deutsch
* @license The MIT License (MIT)
* @todo Lazy Load Icon
* @todo prevent animationend bubling
* @todo itemsScaleUp
* @todo Test Zepto
* @todo stagePadding calculate wrong active classes
*/
;(function($, window, document, undefined) {
/**
* Creates a carousel.
* @class The Owl Carousel.
* @public
* @param {HTMLElement|jQuery} element - The element to create the carousel for.
* @param {Object} [options] - The options
*/
function Owl(element, options) {
/**
* Current settings for the carousel.
* @public
*/
this.settings = null;
/**
* Current options set by the caller including defaults.
* @public
*/
this.options = $.extend({}, Owl.Defaults, options);
/**
* Plugin element.
* @public
*/
this.$element = $(element);
/**
* Proxied event handlers.
* @protected
*/
this._handlers = {};
/**
* References to the running plugins of this carousel.
* @protected
*/
this._plugins = {};
/**
* Currently suppressed events to prevent them from being retriggered.
* @protected
*/
this._supress = {};
/**
* Absolute current position.
* @protected
*/
this._current = null;
/**
* Animation speed in milliseconds.
* @protected
*/
this._speed = null;
/**
* Coordinates of all items in pixel.
* @todo The name of this member is missleading.
* @protected
*/
this._coordinates = [];
/**
* Current breakpoint.
* @todo Real media queries would be nice.
* @protected
*/
this._breakpoint = null;
/**
* Current width of the plugin element.
*/
this._width = null;
/**
* All real items.
* @protected
*/
this._items = [];
/**
* All cloned items.
* @protected
*/
this._clones = [];
/**
* Merge values of all items.
* @todo Maybe this could be part of a plugin.
* @protected
*/
this._mergers = [];
/**
* Widths of all items.
*/
this._widths = [];
/**
* Invalidated parts within the update process.
* @protected
*/
this._invalidated = {};
/**
* Ordered list of workers for the update process.
* @protected
*/
this._pipe = [];
/**
* Current state information for the drag operation.
* @todo #261
* @protected
*/
this._drag = {
time: null,
target: null,
pointer: null,
stage: {
start: null,
current: null
},
direction: null
};
/**
* Current state information and their tags.
* @type {Object}
* @protected
*/
this._states = {
current: {},
tags: {
'initializing': [ 'busy' ],
'animating': [ 'busy' ],
'dragging': [ 'interacting' ]
}
};
$.each([ 'onResize', 'onThrottledResize' ], $.proxy(function(i, handler) {
this._handlers[handler] = $.proxy(this[handler], this);
}, this));
$.each(Owl.Plugins, $.proxy(function(key, plugin) {
this._plugins[key.charAt(0).toLowerCase() + key.slice(1)]
= new plugin(this);
}, this));
$.each(Owl.Workers, $.proxy(function(priority, worker) {
this._pipe.push({
'filter': worker.filter,
'run': $.proxy(worker.run, this)
});
}, this));
this.setup();
this.initialize();
}
/**
* Default options for the carousel.
* @public
*/
Owl.Defaults = {
items: 3,
loop: false,
center: false,
rewind: false,
checkVisibility: true,
mouseDrag: true,
touchDrag: true,
pullDrag: true,
freeDrag: false,
margin: 0,
stagePadding: 0,
merge: false,
mergeFit: true,
autoWidth: false,
startPosition: 0,
rtl: false,
smartSpeed: 250,
fluidSpeed: false,
dragEndSpeed: false,
responsive: {},
responsiveRefreshRate: 200,
responsiveBaseElement: window,
fallbackEasing: 'swing',
slideTransition: '',
info: false,
nestedItemSelector: false,
itemElement: 'div',
stageElement: 'div',
refreshClass: 'owl-refresh',
loadedClass: 'owl-loaded',
loadingClass: 'owl-loading',
rtlClass: 'owl-rtl',
responsiveClass: 'owl-responsive',
dragClass: 'owl-drag',
itemClass: 'owl-item',
stageClass: 'owl-stage',
stageOuterClass: 'owl-stage-outer',
grabClass: 'owl-grab'
};
/**
* Enumeration for width.
* @public
* @readonly
* @enum {String}
*/
Owl.Width = {
Default: 'default',
Inner: 'inner',
Outer: 'outer'
};
/**
* Enumeration for types.
* @public
* @readonly
* @enum {String}
*/
Owl.Type = {
Event: 'event',
State: 'state'
};
/**
* Contains all registered plugins.
* @public
*/
Owl.Plugins = {};
/**
* List of workers involved in the update process.
*/
Owl.Workers = [ {
filter: [ 'width', 'settings' ],
run: function() {
this._width = this.$element.width();
}
}, {
filter: [ 'width', 'items', 'settings' ],
run: function(cache) {
cache.current = this._items && this._items[this.relative(this._current)];
}
}, {
filter: [ 'items', 'settings' ],
run: function() {
this.$stage.children('.cloned').remove();
}
}, {
filter: [ 'width', 'items', 'settings' ],
run: function(cache) {
var margin = this.settings.margin || '',
grid = !this.settings.autoWidth,
rtl = this.settings.rtl,
css = {
'width': 'auto',
'margin-left': rtl ? margin : '',
'margin-right': rtl ? '' : margin
};
!grid && this.$stage.children().css(css);
cache.css = css;
}
}, {
filter: [ 'width', 'items', 'settings' ],
run: function(cache) {
var width = (this.width() / this.settings.items).toFixed(3) - this.settings.margin,
merge = null,
iterator = this._items.length,
grid = !this.settings.autoWidth,
widths = [];
cache.items = {
merge: false,
width: width
};
while (iterator--) {
merge = this._mergers[iterator];
merge = this.settings.mergeFit && Math.min(merge, this.settings.items) || merge;
cache.items.merge = merge > 1 || cache.items.merge;
widths[iterator] = !grid ? this._items[iterator].width() : width * merge;
}
this._widths = widths;
}
}, {
filter: [ 'items', 'settings' ],
run: function() {
var clones = [],
items = this._items,
settings = this.settings,
// TODO: Should be computed from number of min width items in stage
view = Math.max(settings.items * 2, 4),
size = Math.ceil(items.length / 2) * 2,
repeat = settings.loop && items.length ? settings.rewind ? view : Math.max(view, size) : 0,
append = '',
prepend = '';
repeat /= 2;
while (repeat > 0) {
// Switch to only using appended clones
clones.push(this.normalize(clones.length / 2, true));
append = append + items[clones[clones.length - 1]][0].outerHTML;
clones.push(this.normalize(items.length - 1 - (clones.length - 1) / 2, true));
prepend = items[clones[clones.length - 1]][0].outerHTML + prepend;
repeat -= 1;
}
this._clones = clones;
$(append).addClass('cloned').appendTo(this.$stage);
$(prepend).addClass('cloned').prependTo(this.$stage);
}
}, {
filter: [ 'width', 'items', 'settings' ],
run: function() {
var rtl = this.settings.rtl ? 1 : -1,
size = this._clones.length + this._items.length,
iterator = -1,
previous = 0,
current = 0,
coordinates = [];
while (++iterator < size) {
previous = coordinates[iterator - 1] || 0;
current = this._widths[this.relative(iterator)] + this.settings.margin;
coordinates.push(previous + current * rtl);
}
this._coordinates = coordinates;
}
}, {
filter: [ 'width', 'items', 'settings' ],
run: function() {
var padding = this.settings.stagePadding,
coordinates = this._coordinates,
css = {
'width': Math.ceil(Math.abs(coordinates[coordinates.length - 1])) + padding * 2,
'padding-left': padding || '',
'padding-right': padding || ''
};
this.$stage.css(css);
}
}, {
filter: [ 'width', 'items', 'settings' ],
run: function(cache) {
var iterator = this._coordinates.length,
grid = !this.settings.autoWidth,
items = this.$stage.children();
if (grid && cache.items.merge) {
while (iterator--) {
cache.css.width = this._widths[this.relative(iterator)];
items.eq(iterator).css(cache.css);
}
} else if (grid) {
cache.css.width = cache.items.width;
items.css(cache.css);
}
}
}, {
filter: [ 'items' ],
run: function() {
this._coordinates.length < 1 && this.$stage.removeAttr('style');
}
}, {
filter: [ 'width', 'items', 'settings' ],
run: function(cache) {
cache.current = cache.current ? this.$stage.children().index(cache.current) : 0;
cache.current = Math.max(this.minimum(), Math.min(this.maximum(), cache.current));
this.reset(cache.current);
}
}, {
filter: [ 'position' ],
run: function() {
this.animate(this.coordinates(this._current));
}
}, {
filter: [ 'width', 'position', 'items', 'settings' ],
run: function() {
var rtl = this.settings.rtl ? 1 : -1,
padding = this.settings.stagePadding * 2,
begin = this.coordinates(this.current()) + padding,
end = begin + this.width() * rtl,
inner, outer, matches = [], i, n;
for (i = 0, n = this._coordinates.length; i < n; i++) {
inner = this._coordinates[i - 1] || 0;
outer = Math.abs(this._coordinates[i]) + padding * rtl;
if ((this.op(inner, '<=', begin) && (this.op(inner, '>', end)))
|| (this.op(outer, '<', begin) && this.op(outer, '>', end))) {
matches.push(i);
}
}
this.$stage.children('.active').removeClass('active');
this.$stage.children(':eq(' + matches.join('), :eq(') + ')').addClass('active');
this.$stage.children('.center').removeClass('center');
if (this.settings.center) {
this.$stage.children().eq(this.current()).addClass('center');
}
}
} ];
/**
* Create the stage DOM element
*/
Owl.prototype.initializeStage = function() {
this.$stage = this.$element.find('.' + this.settings.stageClass);
// if the stage is already in the DOM, grab it and skip stage initialization
if (this.$stage.length) {
return;
}
this.$element.addClass(this.options.loadingClass);
// create stage
this.$stage = $('<' + this.settings.stageElement + '>', {
"class": this.settings.stageClass
}).wrap( $( '<div/>', {
"class": this.settings.stageOuterClass
}));
// append stage
this.$element.append(this.$stage.parent());
};
/**
* Create item DOM elements
*/
Owl.prototype.initializeItems = function() {
var $items = this.$element.find('.owl-item');
// if the items are already in the DOM, grab them and skip item initialization
if ($items.length) {
this._items = $items.get().map(function(item) {
return $(item);
});
this._mergers = this._items.map(function() {
return 1;
});
this.refresh();
return;
}
// append content
this.replace(this.$element.children().not(this.$stage.parent()));
// check visibility
if (this.isVisible()) {
// update view
this.refresh();
} else {
// invalidate width
this.invalidate('width');
}
this.$element
.removeClass(this.options.loadingClass)
.addClass(this.options.loadedClass);
};
/**
* Initializes the carousel.
* @protected
*/
Owl.prototype.initialize = function() {
this.enter('initializing');
this.trigger('initialize');
this.$element.toggleClass(this.settings.rtlClass, this.settings.rtl);
if (this.settings.autoWidth && !this.is('pre-loading')) {
var imgs, nestedSelector, width;
imgs = this.$element.find('img');
nestedSelector = this.settings.nestedItemSelector ? '.' + this.settings.nestedItemSelector : undefined;
width = this.$element.children(nestedSelector).width();
if (imgs.length && width <= 0) {
this.preloadAutoWidthImages(imgs);
}
}
this.initializeStage();
this.initializeItems();
// register event handlers
this.registerEventHandlers();
this.leave('initializing');
this.trigger('initialized');
};
/**
* @returns {Boolean} visibility of $element
* if you know the carousel will always be visible you can set `checkVisibility` to `false` to
* prevent the expensive browser layout forced reflow the $element.is(':visible') does
*/
Owl.prototype.isVisible = function() {
return this.settings.checkVisibility
? this.$element.is(':visible')
: true;
};
/**
* Setups the current settings.
* @todo Remove responsive classes. Why should adaptive designs be brought into IE8?
* @todo Support for media queries by using `matchMedia` would be nice.
* @public
*/
Owl.prototype.setup = function() {
var viewport = this.viewport(),
overwrites = this.options.responsive,
match = -1,
settings = null;
if (!overwrites) {
settings = $.extend({}, this.options);
} else {
$.each(overwrites, function(breakpoint) {
if (breakpoint <= viewport && breakpoint > match) {
match = Number(breakpoint);
}
});
settings = $.extend({}, this.options, overwrites[match]);
if (typeof settings.stagePadding === 'function') {
settings.stagePadding = settings.stagePadding();
}
delete settings.responsive;
// responsive class
if (settings.responsiveClass) {
this.$element.attr('class',
this.$element.attr('class').replace(new RegExp('(' + this.options.responsiveClass + '-)\\S+\\s', 'g'), '$1' + match)
);
}
}
this.trigger('change', { property: { name: 'settings', value: settings } });
this._breakpoint = match;
this.settings = settings;
this.invalidate('settings');
this.trigger('changed', { property: { name: 'settings', value: this.settings } });
};
/**
* Updates option logic if necessery.
* @protected
*/
Owl.prototype.optionsLogic = function() {
if (this.settings.autoWidth) {
this.settings.stagePadding = false;
this.settings.merge = false;
}
};
/**
* Prepares an item before add.
* @todo Rename event parameter `content` to `item`.
* @protected
* @returns {jQuery|HTMLElement} - The item container.
*/
Owl.prototype.prepare = function(item) {
var event = this.trigger('prepare', { content: item });
if (!event.data) {
event.data = $('<' + this.settings.itemElement + '/>')
.addClass(this.options.itemClass).append(item)
}
this.trigger('prepared', { content: event.data });
return event.data;
};
/**
* Updates the view.
* @public
*/
Owl.prototype.update = function() {
var i = 0,
n = this._pipe.length,
filter = $.proxy(function(p) { return this[p] }, this._invalidated),
cache = {};
while (i < n) {
if (this._invalidated.all || $.grep(this._pipe[i].filter, filter).length > 0) {
this._pipe[i].run(cache);
}
i++;
}
this._invalidated = {};
!this.is('valid') && this.enter('valid');
};
/**
* Gets the width of the view.
* @public
* @param {Owl.Width} [dimension=Owl.Width.Default] - The dimension to return.
* @returns {Number} - The width of the view in pixel.
*/
Owl.prototype.width = function(dimension) {
dimension = dimension || Owl.Width.Default;
switch (dimension) {
case Owl.Width.Inner:
case Owl.Width.Outer:
return this._width;
default:
return this._width - this.settings.stagePadding * 2 + this.settings.margin;
}
};
/**
* Refreshes the carousel primarily for adaptive purposes.
* @public
*/
Owl.prototype.refresh = function() {
this.enter('refreshing');
this.trigger('refresh');
this.setup();
this.optionsLogic();
this.$element.addClass(this.options.refreshClass);
this.update();
this.$element.removeClass(this.options.refreshClass);
this.leave('refreshing');
this.trigger('refreshed');
};
/**
* Checks window `resize` event.
* @protected
*/
Owl.prototype.onThrottledResize = function() {
window.clearTimeout(this.resizeTimer);
this.resizeTimer = window.setTimeout(this._handlers.onResize, this.settings.responsiveRefreshRate);
};
/**
* Checks window `resize` event.
* @protected
*/
Owl.prototype.onResize = function() {
if (!this._items.length) {
return false;
}
if (this._width === this.$element.width()) {
return false;
}
if (!this.isVisible()) {
return false;
}
this.enter('resizing');
if (this.trigger('resize').isDefaultPrevented()) {
this.leave('resizing');
return false;
}
this.invalidate('width');
this.refresh();
this.leave('resizing');
this.trigger('resized');
};
/**
* Registers event handlers.
* @todo Check `msPointerEnabled`
* @todo #261
* @protected
*/
Owl.prototype.registerEventHandlers = function() {
if ($.support.transition) {
this.$stage.on($.support.transition.end + '.owl.core', $.proxy(this.onTransitionEnd, this));
}
if (this.settings.responsive !== false) {
this.on(window, 'resize', this._handlers.onThrottledResize);
}
if (this.settings.mouseDrag) {
this.$element.addClass(this.options.dragClass);
this.$stage.on('mousedown.owl.core', $.proxy(this.onDragStart, this));
this.$stage.on('dragstart.owl.core selectstart.owl.core', function() { return false });
}
if (this.settings.touchDrag){
this.$stage.on('touchstart.owl.core', $.proxy(this.onDragStart, this));
this.$stage.on('touchcancel.owl.core', $.proxy(this.onDragEnd, this));
}
};
/**
* Handles `touchstart` and `mousedown` events.
* @todo Horizontal swipe threshold as option
* @todo #261
* @protected
* @param {Event} event - The event arguments.
*/
Owl.prototype.onDragStart = function(event) {
var stage = null;
if (event.which === 3) {
return;
}
if ($.support.transform) {
stage = this.$stage.css('transform').replace(/.*\(|\)| /g, '').split(',');
stage = {
x: stage[stage.length === 16 ? 12 : 4],
y: stage[stage.length === 16 ? 13 : 5]
};
} else {
stage = this.$stage.position();
stage = {
x: this.settings.rtl ?
stage.left + this.$stage.width() - this.width() + this.settings.margin :
stage.left,
y: stage.top
};
}
if (this.is('animating')) {
$.support.transform ? this.animate(stage.x) : this.$stage.stop()
this.invalidate('position');
}
this.$element.toggleClass(this.options.grabClass, event.type === 'mousedown');
this.speed(0);
this._drag.time = new Date().getTime();
this._drag.target = $(event.target);
this._drag.stage.start = stage;
this._drag.stage.current = stage;
this._drag.pointer = this.pointer(event);
$(document).on('mouseup.owl.core touchend.owl.core', $.proxy(this.onDragEnd, this));
$(document).one('mousemove.owl.core touchmove.owl.core', $.proxy(function(event) {
var delta = this.difference(this._drag.pointer, this.pointer(event));
$(document).on('mousemove.owl.core touchmove.owl.core', $.proxy(this.onDragMove, this));
if (Math.abs(delta.x) < Math.abs(delta.y) && this.is('valid')) {
return;
}
event.preventDefault();
this.enter('dragging');
this.trigger('drag');
}, this));
};
/**
* Handles the `touchmove` and `mousemove` events.
* @todo #261
* @protected
* @param {Event} event - The event arguments.
*/
Owl.prototype.onDragMove = function(event) {
var minimum = null,
maximum = null,
pull = null,
delta = this.difference(this._drag.pointer, this.pointer(event)),
stage = this.difference(this._drag.stage.start, delta);
if (!this.is('dragging')) {
return;
}
event.preventDefault();
if (this.settings.loop) {
minimum = this.coordinates(this.minimum());
maximum = this.coordinates(this.maximum() + 1) - minimum;
stage.x = (((stage.x - minimum) % maximum + maximum) % maximum) + minimum;
} else {
minimum = this.settings.rtl ? this.coordinates(this.maximum()) : this.coordinates(this.minimum());
maximum = this.settings.rtl ? this.coordinates(this.minimum()) : this.coordinates(this.maximum());
pull = this.settings.pullDrag ? -1 * delta.x / 5 : 0;
stage.x = Math.max(Math.min(stage.x, minimum + pull), maximum + pull);
}
this._drag.stage.current = stage;
this.animate(stage.x);
};
/**
* Handles the `touchend` and `mouseup` events.
* @todo #261
* @todo Threshold for click event
* @protected
* @param {Event} event - The event arguments.
*/
Owl.prototype.onDragEnd = function(event) {
var delta = this.difference(this._drag.pointer, this.pointer(event)),
stage = this._drag.stage.current,
direction = delta.x > 0 ^ this.settings.rtl ? 'left' : 'right';
$(document).off('.owl.core');
this.$element.removeClass(this.options.grabClass);
if (delta.x !== 0 && this.is('dragging') || !this.is('valid')) {
this.speed(this.settings.dragEndSpeed || this.settings.smartSpeed);
this.current(this.closest(stage.x, delta.x !== 0 ? direction : this._drag.direction));
this.invalidate('position');
this.update();
this._drag.direction = direction;
if (Math.abs(delta.x) > 3 || new Date().getTime() - this._drag.time > 300) {
this._drag.target.one('click.owl.core', function() { return false; });
}
}
if (!this.is('dragging')) {
return;
}
this.leave('dragging');
this.trigger('dragged');
};
/**
* Gets absolute position of the closest item for a coordinate.
* @todo Setting `freeDrag` makes `closest` not reusable. See #165.
* @protected
* @param {Number} coordinate - The coordinate in pixel.
* @param {String} direction - The direction to check for the closest item. Ether `left` or `right`.
* @return {Number} - The absolute position of the closest item.
*/
Owl.prototype.closest = function(coordinate, direction) {
var position = -1,
pull = 30,
width = this.width(),
coordinates = this.coordinates();
if (!this.settings.freeDrag) {
// check closest item
$.each(coordinates, $.proxy(function(index, value) {
// on a left pull, check on current index
if (direction === 'left' && coordinate > value - pull && coordinate < value + pull) {
position = index;
// on a right pull, check on previous index
// to do so, subtract width from value and set position = index + 1
} else if (direction === 'right' && coordinate > value - width - pull && coordinate < value - width + pull) {
position = index + 1;
} else if (this.op(coordinate, '<', value)
&& this.op(coordinate, '>', coordinates[index + 1] !== undefined ? coordinates[index + 1] : value - width)) {
position = direction === 'left' ? index + 1 : index;
}
return position === -1;
}, this));
}
if (!this.settings.loop) {
// non loop boundries
if (this.op(coordinate, '>', coordinates[this.minimum()])) {
position = coordinate = this.minimum();
} else if (this.op(coordinate, '<', coordinates[this.maximum()])) {
position = coordinate = this.maximum();
}
}
return position;
};
/**
* Animates the stage.
* @todo #270
* @public
* @param {Number} coordinate - The coordinate in pixels.
*/
Owl.prototype.animate = function(coordinate) {
var animate = this.speed() > 0;
this.is('animating') && this.onTransitionEnd();
if (animate) {
this.enter('animating');
this.trigger('translate');
}
if ($.support.transform3d && $.support.transition) {
this.$stage.css({
transform: 'translate3d(' + coordinate + 'px,0px,0px)',
transition: (this.speed() / 1000) + 's' + (
this.settings.slideTransition ? ' ' + this.settings.slideTransition : ''
)
});
} else if (animate) {
this.$stage.animate({
left: coordinate + 'px'
}, this.speed(), this.settings.fallbackEasing, $.proxy(this.onTransitionEnd, this));
} else {
this.$stage.css({
left: coordinate + 'px'
});
}
};
/**
* Checks whether the carousel is in a specific state or not.
* @param {String} state - The state to check.
* @returns {Boolean} - The flag which indicates if the carousel is busy.
*/
Owl.prototype.is = function(state) {
return this._states.current[state] && this._states.current[state] > 0;
};
/**
* Sets the absolute position of the current item.
* @public
* @param {Number} [position] - The new absolute position or nothing to leave it unchanged.
* @returns {Number} - The absolute position of the current item.
*/
Owl.prototype.current = function(position) {
if (position === undefined) {
return this._current;
}
if (this._items.length === 0) {
return undefined;
}
position = this.normalize(position);
if (this._current !== position) {
var event = this.trigger('change', { property: { name: 'position', value: position } });
if (event.data !== undefined) {
position = this.normalize(event.data);
}
this._current = position;
this.invalidate('position');
this.trigger('changed', { property: { name: 'position', value: this._current } });
}
return this._current;
};
/**
* Invalidates the given part of the update routine.
* @param {String} [part] - The part to invalidate.
* @returns {Array.<String>} - The invalidated parts.
*/
Owl.prototype.invalidate = function(part) {
if ($.type(part) === 'string') {
this._invalidated[part] = true;
this.is('valid') && this.leave('valid');
}
return $.map(this._invalidated, function(v, i) { return i });
};
/**
* Resets the absolute position of the current item.
* @public
* @param {Number} position - The absolute position of the new item.
*/
Owl.prototype.reset = function(position) {
position = this.normalize(position);
if (position === undefined) {
return;
}
this._speed = 0;
this._current = position;
this.suppress([ 'translate', 'translated' ]);
this.animate(this.coordinates(position));
this.release([ 'translate', 'translated' ]);
};
/**
* Normalizes an absolute or a relative position of an item.
* @public
* @param {Number} position - The absolute or relative position to normalize.
* @param {Boolean} [relative=false] - Whether the given position is relative or not.
* @returns {Number} - The normalized position.
*/
Owl.prototype.normalize = function(position, relative) {
var n = this._items.length,
m = relative ? 0 : this._clones.length;
if (!this.isNumeric(position) || n < 1) {
position = undefined;
} else if (position < 0 || position >= n + m) {
position = ((position - m / 2) % n + n) % n + m / 2;
}
return position;
};
/**
* Converts an absolute position of an item into a relative one.
* @public
* @param {Number} position - The absolute position to convert.
* @returns {Number} - The converted position.
*/
Owl.prototype.relative = function(position) {
position -= this._clones.length / 2;
return this.normalize(position, true);
};
/**
* Gets the maximum position for the current item.
* @public
* @param {Boolean} [relative=false] - Whether to return an absolute position or a relative position.
* @returns {Number}
*/
Owl.prototype.maximum = function(relative) {
var settings = this.settings,
maximum = this._coordinates.length,
iterator,
reciprocalItemsWidth,
elementWidth;
if (settings.loop) {
maximum = this._clones.length / 2 + this._items.length - 1;
} else if (settings.autoWidth || settings.merge) {
iterator = this._items.length;
if (iterator) {
reciprocalItemsWidth = this._items[--iterator].width();
elementWidth = this.$element.width();
while (iterator--) {
reciprocalItemsWidth += this._items[iterator].width() + this.settings.margin;
if (reciprocalItemsWidth > elementWidth) {
break;
}
}
}
maximum = iterator + 1;
} else if (settings.center) {
maximum = this._items.length - 1;
} else {
maximum = this._items.length - settings.items;
}
if (relative) {
maximum -= this._clones.length / 2;
}
return Math.max(maximum, 0);
};
/**
* Gets the minimum position for the current item.
* @public
* @param {Boolean} [relative=false] - Whether to return an absolute position or a relative position.
* @returns {Number}
*/
Owl.prototype.minimum = function(relative) {
return relative ? 0 : this._clones.length / 2;
};
/**
* Gets an item at the specified relative position.
* @public
* @param {Number} [position] - The relative position of the item.
* @return {jQuery|Array.<jQuery>} - The item at the given position or all items if no position was given.
*/
Owl.prototype.items = function(position) {
if (position === undefined) {
return this._items.slice();
}
position = this.normalize(position, true);
return this._items[position];
};
/**
* Gets an item at the specified relative position.
* @public
* @param {Number} [position] - The relative position of the item.
* @return {jQuery|Array.<jQuery>} - The item at the given position or all items if no position was given.
*/
Owl.prototype.mergers = function(position) {
if (position === undefined) {
return this._mergers.slice();
}
position = this.normalize(position, true);
return this._mergers[position];
};
/**
* Gets the absolute positions of clones for an item.
* @public
* @param {Number} [position] - The relative position of the item.
* @returns {Array.<Number>} - The absolute positions of clones for the item or all if no position was given.
*/
Owl.prototype.clones = function(position) {
var odd = this._clones.length / 2,
even = odd + this._items.length,
map = function(index) { return index % 2 === 0 ? even + index / 2 : odd - (index + 1) / 2 };
if (position === undefined) {
return $.map(this._clones, function(v, i) { return map(i) });
}
return $.map(this._clones, function(v, i) { return v === position ? map(i) : null });
};
/**
* Sets the current animation speed.
* @public
* @param {Number} [speed] - The animation speed in milliseconds or nothing to leave it unchanged.
* @returns {Number} - The current animation speed in milliseconds.
*/
Owl.prototype.speed = function(speed) {
if (speed !== undefined) {
this._speed = speed;
}
return this._speed;
};
/**
* Gets the coordinate of an item.
* @todo The name of this method is missleanding.
* @public
* @param {Number} position - The absolute position of the item within `minimum()` and `maximum()`.
* @returns {Number|Array.<Number>} - The coordinate of the item in pixel or all coordinates.
*/
Owl.prototype.coordinates = function(position) {
var multiplier = 1,
newPosition = position - 1,
coordinate;
if (position === undefined) {
return $.map(this._coordinates, $.proxy(function(coordinate, index) {
return this.coordinates(index);
}, this));
}
if (this.settings.center) {
if (this.settings.rtl) {
multiplier = -1;
newPosition = position + 1;
}
coordinate = this._coordinates[position];
coordinate += (this.width() - coordinate + (this._coordinates[newPosition] || 0)) / 2 * multiplier;
} else {
coordinate = this._coordinates[newPosition] || 0;
}
coordinate = Math.ceil(coordinate);
return coordinate;
};
/**
* Calculates the speed for a translation.
* @protected
* @param {Number} from - The absolute position of the start item.
* @param {Number} to - The absolute position of the target item.
* @param {Number} [factor=undefined] - The time factor in milliseconds.
* @returns {Number} - The time in milliseconds for the translation.
*/
Owl.prototype.duration = function(from, to, factor) {
if (factor === 0) {
return 0;
}
return Math.min(Math.max(Math.abs(to - from), 1), 6) * Math.abs((factor || this.settings.smartSpeed));
};
/**
* Slides to the specified item.
* @public
* @param {Number} position - The position of the item.
* @param {Number} [speed] - The time in milliseconds for the transition.
*/
Owl.prototype.to = function(position, speed) {
var current = this.current(),
revert = null,
distance = position - this.relative(current),
direction = (distance > 0) - (distance < 0),
items = this._items.length,
minimum = this.minimum(),
maximum = this.maximum();
if (this.settings.loop) {
if (!this.settings.rewind && Math.abs(distance) > items / 2) {
distance += direction * -1 * items;
}
position = current + distance;
revert = ((position - minimum) % items + items) % items + minimum;
if (revert !== position && revert - distance <= maximum && revert - distance > 0) {
current = revert - distance;
position = revert;
this.reset(current);
}
} else if (this.settings.rewind) {
maximum += 1;
position = (position % maximum + maximum) % maximum;
} else {
position = Math.max(minimum, Math.min(maximum, position));
}
this.speed(this.duration(current, position, speed));
this.current(position);
if (this.isVisible()) {
this.update();
}
};
/**
* Slides to the next item.
* @public
* @param {Number} [speed] - The time in milliseconds for the transition.
*/
Owl.prototype.next = function(speed) {
speed = speed || false;
this.to(this.relative(this.current()) + 1, speed);
};
/**
* Slides to the previous item.
* @public
* @param {Number} [speed] - The time in milliseconds for the transition.
*/
Owl.prototype.prev = function(speed) {
speed = speed || false;
this.to(this.relative(this.current()) - 1, speed);
};
/**
* Handles the end of an animation.
* @protected
* @param {Event} event - The event arguments.
*/
Owl.prototype.onTransitionEnd = function(event) {
// if css2 animation then event object is undefined
if (event !== undefined) {
event.stopPropagation();
// Catch only owl-stage transitionEnd event
if ((event.target || event.srcElement || event.originalTarget) !== this.$stage.get(0)) {
return false;
}
}
this.leave('animating');
this.trigger('translated');
};
/**
* Gets viewport width.
* @protected
* @return {Number} - The width in pixel.
*/
Owl.prototype.viewport = function() {
var width;
if (this.options.responsiveBaseElement !== window) {
width = $(this.options.responsiveBaseElement).width();
} else if (window.innerWidth) {
width = window.innerWidth;
} else if (document.documentElement && document.documentElement.clientWidth) {
width = document.documentElement.clientWidth;
} else {
console.warn('Can not detect viewport width.');
}
return width;
};
/**
* Replaces the current content.
* @public
* @param {HTMLElement|jQuery|String} content - The new content.
*/
Owl.prototype.replace = function(content) {
this.$stage.empty();
this._items = [];
if (content) {
content = (content instanceof jQuery) ? content : $(content);
}
if (this.settings.nestedItemSelector) {
content = content.find('.' + this.settings.nestedItemSelector);
}
content.filter(function() {
return this.nodeType === 1;
}).each($.proxy(function(index, item) {
item = this.prepare(item);
this.$stage.append(item);
this._items.push(item);
this._mergers.push(item.find('[data-merge]').addBack('[data-merge]').attr('data-merge') * 1 || 1);
}, this));
this.reset(this.isNumeric(this.settings.startPosition) ? this.settings.startPosition : 0);
this.invalidate('items');
};
/**
* Adds an item.
* @todo Use `item` instead of `content` for the event arguments.
* @public
* @param {HTMLElement|jQuery|String} content - The item content to add.
* @param {Number} [position] - The relative position at which to insert the item otherwise the item will be added to the end.
*/
Owl.prototype.add = function(content, position) {
var current = this.relative(this._current);
position = position === undefined ? this._items.length : this.normalize(position, true);
content = content instanceof jQuery ? content : $(content);
this.trigger('add', { content: content, position: position });
content = this.prepare(content);
if (this._items.length === 0 || position === this._items.length) {
this._items.length === 0 && this.$stage.append(content);
this._items.length !== 0 && this._items[position - 1].after(content);
this._items.push(content);
this._mergers.push(content.find('[data-merge]').addBack('[data-merge]').attr('data-merge') * 1 || 1);
} else {
this._items[position].before(content);
this._items.splice(position, 0, content);
this._mergers.splice(position, 0, content.find('[data-merge]').addBack('[data-merge]').attr('data-merge') * 1 || 1);
}
this._items[current] && this.reset(this._items[current].index());
this.invalidate('items');
this.trigger('added', { content: content, position: position });
};
/**
* Removes an item by its position.
* @todo Use `item` instead of `content` for the event arguments.
* @public
* @param {Number} position - The relative position of the item to remove.
*/
Owl.prototype.remove = function(position) {
position = this.normalize(position, true);
if (position === undefined) {
return;
}
this.trigger('remove', { content: this._items[position], position: position });
this._items[position].remove();
this._items.splice(position, 1);
this._mergers.splice(position, 1);
this.invalidate('items');
this.trigger('removed', { content: null, position: position });
};
/**
* Preloads images with auto width.
* @todo Replace by a more generic approach
* @protected
*/
Owl.prototype.preloadAutoWidthImages = function(images) {
images.each($.proxy(function(i, element) {
this.enter('pre-loading');
element = $(element);
$(new Image()).one('load', $.proxy(function(e) {
element.attr('src', e.target.src);
element.css('opacity', 1);
this.leave('pre-loading');
!this.is('pre-loading') && !this.is('initializing') && this.refresh();
}, this)).attr('src', element.attr('src') || element.attr('data-src') || element.attr('data-src-retina'));
}, this));
};
/**
* Destroys the carousel.
* @public
*/
Owl.prototype.destroy = function() {
this.$element.off('.owl.core');
this.$stage.off('.owl.core');
$(document).off('.owl.core');
if (this.settings.responsive !== false) {
window.clearTimeout(this.resizeTimer);
this.off(window, 'resize', this._handlers.onThrottledResize);
}
for (var i in this._plugins) {
this._plugins[i].destroy();
}
this.$stage.children('.cloned').remove();
this.$stage.unwrap();
this.$stage.children().contents().unwrap();
this.$stage.children().unwrap();
this.$stage.remove();
this.$element
.removeClass(this.options.refreshClass)
.removeClass(this.options.loadingClass)
.removeClass(this.options.loadedClass)
.removeClass(this.options.rtlClass)
.removeClass(this.options.dragClass)
.removeClass(this.options.grabClass)
.attr('class', this.$element.attr('class').replace(new RegExp(this.options.responsiveClass + '-\\S+\\s', 'g'), ''))
.removeData('owl.carousel');
};
/**
* Operators to calculate right-to-left and left-to-right.
* @protected
* @param {Number} [a] - The left side operand.
* @param {String} [o] - The operator.
* @param {Number} [b] - The right side operand.
*/
Owl.prototype.op = function(a, o, b) {
var rtl = this.settings.rtl;
switch (o) {
case '<':
return rtl ? a > b : a < b;
case '>':
return rtl ? a < b : a > b;
case '>=':
return rtl ? a <= b : a >= b;
case '<=':
return rtl ? a >= b : a <= b;
default:
break;
}
};
/**
* Attaches to an internal event.
* @protected
* @param {HTMLElement} element - The event source.
* @param {String} event - The event name.
* @param {Function} listener - The event handler to attach.
* @param {Boolean} capture - Wether the event should be handled at the capturing phase or not.
*/
Owl.prototype.on = function(element, event, listener, capture) {
if (element.addEventListener) {
element.addEventListener(event, listener, capture);
} else if (element.attachEvent) {
element.attachEvent('on' + event, listener);
}
};
/**
* Detaches from an internal event.
* @protected
* @param {HTMLElement} element - The event source.
* @param {String} event - The event name.
* @param {Function} listener - The attached event handler to detach.
* @param {Boolean} capture - Wether the attached event handler was registered as a capturing listener or not.
*/
Owl.prototype.off = function(element, event, listener, capture) {
if (element.removeEventListener) {
element.removeEventListener(event, listener, capture);
} else if (element.detachEvent) {
element.detachEvent('on' + event, listener);
}
};
/**
* Triggers a public event.
* @todo Remove `status`, `relatedTarget` should be used instead.
* @protected
* @param {String} name - The event name.
* @param {*} [data=null] - The event data.
* @param {String} [namespace=carousel] - The event namespace.
* @param {String} [state] - The state which is associated with the event.
* @param {Boolean} [enter=false] - Indicates if the call enters the specified state or not.
* @returns {Event} - The event arguments.
*/
Owl.prototype.trigger = function(name, data, namespace, state, enter) {
var status = {
item: { count: this._items.length, index: this.current() }
}, handler = $.camelCase(
$.grep([ 'on', name, namespace ], function(v) { return v })
.join('-').toLowerCase()
), event = $.Event(
[ name, 'owl', namespace || 'carousel' ].join('.').toLowerCase(),
$.extend({ relatedTarget: this }, status, data)
);
if (!this._supress[name]) {
$.each(this._plugins, function(name, plugin) {
if (plugin.onTrigger) {
plugin.onTrigger(event);
}
});
this.register({ type: Owl.Type.Event, name: name });
this.$element.trigger(event);
if (this.settings && typeof this.settings[handler] === 'function') {
this.settings[handler].call(this, event);
}
}
return event;
};
/**
* Enters a state.
* @param name - The state name.
*/
Owl.prototype.enter = function(name) {
$.each([ name ].concat(this._states.tags[name] || []), $.proxy(function(i, name) {
if (this._states.current[name] === undefined) {
this._states.current[name] = 0;
}
this._states.current[name]++;
}, this));
};
/**
* Leaves a state.
* @param name - The state name.
*/
Owl.prototype.leave = function(name) {
$.each([ name ].concat(this._states.tags[name] || []), $.proxy(function(i, name) {
this._states.current[name]--;
}, this));
};
/**
* Registers an event or state.
* @public
* @param {Object} object - The event or state to register.
*/
Owl.prototype.register = function(object) {
if (object.type === Owl.Type.Event) {
if (!$.event.special[object.name]) {
$.event.special[object.name] = {};
}
if (!$.event.special[object.name].owl) {
var _default = $.event.special[object.name]._default;
$.event.special[object.name]._default = function(e) {
if (_default && _default.apply && (!e.namespace || e.namespace.indexOf('owl') === -1)) {
return _default.apply(this, arguments);
}
return e.namespace && e.namespace.indexOf('owl') > -1;
};
$.event.special[object.name].owl = true;
}
} else if (object.type === Owl.Type.State) {
if (!this._states.tags[object.name]) {
this._states.tags[object.name] = object.tags;
} else {
this._states.tags[object.name] = this._states.tags[object.name].concat(object.tags);
}
this._states.tags[object.name] = $.grep(this._states.tags[object.name], $.proxy(function(tag, i) {
return $.inArray(tag, this._states.tags[object.name]) === i;
}, this));
}
};
/**
* Suppresses events.
* @protected
* @param {Array.<String>} events - The events to suppress.
*/
Owl.prototype.suppress = function(events) {
$.each(events, $.proxy(function(index, event) {
this._supress[event] = true;
}, this));
};
/**
* Releases suppressed events.
* @protected
* @param {Array.<String>} events - The events to release.
*/
Owl.prototype.release = function(events) {
$.each(events, $.proxy(function(index, event) {
delete this._supress[event];
}, this));
};
/**
* Gets unified pointer coordinates from event.
* @todo #261
* @protected
* @param {Event} - The `mousedown` or `touchstart` event.
* @returns {Object} - Contains `x` and `y` coordinates of current pointer position.
*/
Owl.prototype.pointer = function(event) {
var result = { x: null, y: null };
event = event.originalEvent || event || window.event;
event = event.touches && event.touches.length ?
event.touches[0] : event.changedTouches && event.changedTouches.length ?
event.changedTouches[0] : event;
if (event.pageX) {
result.x = event.pageX;
result.y = event.pageY;
} else {
result.x = event.clientX;
result.y = event.clientY;
}
return result;
};
/**
* Determines if the input is a Number or something that can be coerced to a Number
* @protected
* @param {Number|String|Object|Array|Boolean|RegExp|Function|Symbol} - The input to be tested
* @returns {Boolean} - An indication if the input is a Number or can be coerced to a Number
*/
Owl.prototype.isNumeric = function(number) {
return !isNaN(parseFloat(number));
};
/**
* Gets the difference of two vectors.
* @todo #261
* @protected
* @param {Object} - The first vector.
* @param {Object} - The second vector.
* @returns {Object} - The difference.
*/
Owl.prototype.difference = function(first, second) {
return {
x: first.x - second.x,
y: first.y - second.y
};
};
/**
* The jQuery Plugin for the Owl Carousel
* @todo Navigation plugin `next` and `prev`
* @public
*/
$.fn.owlCarousel = function(option) {
var args = Array.prototype.slice.call(arguments, 1);
return this.each(function() {
var $this = $(this),
data = $this.data('owl.carousel');
if (!data) {
data = new Owl(this, typeof option == 'object' && option);
$this.data('owl.carousel', data);
$.each([
'next', 'prev', 'to', 'destroy', 'refresh', 'replace', 'add', 'remove'
], function(i, event) {
data.register({ type: Owl.Type.Event, name: event });
data.$element.on(event + '.owl.carousel.core', $.proxy(function(e) {
if (e.namespace && e.relatedTarget !== this) {
this.suppress([ event ]);
data[event].apply(this, [].slice.call(arguments, 1));
this.release([ event ]);
}
}, data));
});
}
if (typeof option == 'string' && option.charAt(0) !== '_') {
data[option].apply(data, args);
}
});
};
/**
* The constructor for the jQuery Plugin
* @public
*/
$.fn.owlCarousel.Constructor = Owl;
})(window.Zepto || window.jQuery, window, document);
/**
* AutoRefresh Plugin
* @version 2.3.4
* @author Artus Kolanowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
/**
* Creates the auto refresh plugin.
* @class The Auto Refresh Plugin
* @param {Owl} carousel - The Owl Carousel
*/
var AutoRefresh = function(carousel) {
/**
* Reference to the core.
* @protected
* @type {Owl}
*/
this._core = carousel;
/**
* Refresh interval.
* @protected
* @type {number}
*/
this._interval = null;
/**
* Whether the element is currently visible or not.
* @protected
* @type {Boolean}
*/
this._visible = null;
/**
* All event handlers.
* @protected
* @type {Object}
*/
this._handlers = {
'initialized.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.autoRefresh) {
this.watch();
}
}, this)
};
// set default options
this._core.options = $.extend({}, AutoRefresh.Defaults, this._core.options);
// register event handlers
this._core.$element.on(this._handlers);
};
/**
* Default options.
* @public
*/
AutoRefresh.Defaults = {
autoRefresh: true,
autoRefreshInterval: 500
};
/**
* Watches the element.
*/
AutoRefresh.prototype.watch = function() {
if (this._interval) {
return;
}
this._visible = this._core.isVisible();
this._interval = window.setInterval($.proxy(this.refresh, this), this._core.settings.autoRefreshInterval);
};
/**
* Refreshes the element.
*/
AutoRefresh.prototype.refresh = function() {
if (this._core.isVisible() === this._visible) {
return;
}
this._visible = !this._visible;
this._core.$element.toggleClass('owl-hidden', !this._visible);
this._visible && (this._core.invalidate('width') && this._core.refresh());
};
/**
* Destroys the plugin.
*/
AutoRefresh.prototype.destroy = function() {
var handler, property;
window.clearInterval(this._interval);
for (handler in this._handlers) {
this._core.$element.off(handler, this._handlers[handler]);
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] != 'function' && (this[property] = null);
}
};
$.fn.owlCarousel.Constructor.Plugins.AutoRefresh = AutoRefresh;
})(window.Zepto || window.jQuery, window, document);
/**
* Lazy Plugin
* @version 2.3.4
* @author Bartosz Wojciechowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
/**
* Creates the lazy plugin.
* @class The Lazy Plugin
* @param {Owl} carousel - The Owl Carousel
*/
var Lazy = function(carousel) {
/**
* Reference to the core.
* @protected
* @type {Owl}
*/
this._core = carousel;
/**
* Already loaded items.
* @protected
* @type {Array.<jQuery>}
*/
this._loaded = [];
/**
* Event handlers.
* @protected
* @type {Object}
*/
this._handlers = {
'initialized.owl.carousel change.owl.carousel resized.owl.carousel': $.proxy(function(e) {
if (!e.namespace) {
return;
}
if (!this._core.settings || !this._core.settings.lazyLoad) {
return;
}
if ((e.property && e.property.name == 'position') || e.type == 'initialized') {
var settings = this._core.settings,
n = (settings.center && Math.ceil(settings.items / 2) || settings.items),
i = ((settings.center && n * -1) || 0),
position = (e.property && e.property.value !== undefined ? e.property.value : this._core.current()) + i,
clones = this._core.clones().length,
load = $.proxy(function(i, v) { this.load(v) }, this);
//TODO: Need documentation for this new option
if (settings.lazyLoadEager > 0) {
n += settings.lazyLoadEager;
// If the carousel is looping also preload images that are to the "left"
if (settings.loop) {
position -= settings.lazyLoadEager;
n++;
}
}
while (i++ < n) {
this.load(clones / 2 + this._core.relative(position));
clones && $.each(this._core.clones(this._core.relative(position)), load);
position++;
}
}
}, this)
};
// set the default options
this._core.options = $.extend({}, Lazy.Defaults, this._core.options);
// register event handler
this._core.$element.on(this._handlers);
};
/**
* Default options.
* @public
*/
Lazy.Defaults = {
lazyLoad: false,
lazyLoadEager: 0
};
/**
* Loads all resources of an item at the specified position.
* @param {Number} position - The absolute position of the item.
* @protected
*/
Lazy.prototype.load = function(position) {
var $item = this._core.$stage.children().eq(position),
$elements = $item && $item.find('.owl-lazy');
if (!$elements || $.inArray($item.get(0), this._loaded) > -1) {
return;
}
$elements.each($.proxy(function(index, element) {
var $element = $(element), image,
url = (window.devicePixelRatio > 1 && $element.attr('data-src-retina')) || $element.attr('data-src') || $element.attr('data-srcset');
this._core.trigger('load', { element: $element, url: url }, 'lazy');
if ($element.is('img')) {
$element.one('load.owl.lazy', $.proxy(function() {
$element.css('opacity', 1);
this._core.trigger('loaded', { element: $element, url: url }, 'lazy');
}, this)).attr('src', url);
} else if ($element.is('source')) {
$element.one('load.owl.lazy', $.proxy(function() {
this._core.trigger('loaded', { element: $element, url: url }, 'lazy');
}, this)).attr('srcset', url);
} else {
image = new Image();
image.onload = $.proxy(function() {
$element.css({
'background-image': 'url("' + url + '")',
'opacity': '1'
});
this._core.trigger('loaded', { element: $element, url: url }, 'lazy');
}, this);
image.src = url;
}
}, this));
this._loaded.push($item.get(0));
};
/**
* Destroys the plugin.
* @public
*/
Lazy.prototype.destroy = function() {
var handler, property;
for (handler in this.handlers) {
this._core.$element.off(handler, this.handlers[handler]);
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] != 'function' && (this[property] = null);
}
};
$.fn.owlCarousel.Constructor.Plugins.Lazy = Lazy;
})(window.Zepto || window.jQuery, window, document);
/**
* AutoHeight Plugin
* @version 2.3.4
* @author Bartosz Wojciechowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
/**
* Creates the auto height plugin.
* @class The Auto Height Plugin
* @param {Owl} carousel - The Owl Carousel
*/
var AutoHeight = function(carousel) {
/**
* Reference to the core.
* @protected
* @type {Owl}
*/
this._core = carousel;
this._previousHeight = null;
/**
* All event handlers.
* @protected
* @type {Object}
*/
this._handlers = {
'initialized.owl.carousel refreshed.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.autoHeight) {
this.update();
}
}, this),
'changed.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.autoHeight && e.property.name === 'position'){
this.update();
}
}, this),
'loaded.owl.lazy': $.proxy(function(e) {
if (e.namespace && this._core.settings.autoHeight
&& e.element.closest('.' + this._core.settings.itemClass).index() === this._core.current()) {
this.update();
}
}, this)
};
// set default options
this._core.options = $.extend({}, AutoHeight.Defaults, this._core.options);
// register event handlers
this._core.$element.on(this._handlers);
this._intervalId = null;
var refThis = this;
// These changes have been taken from a PR by gavrochelegnou proposed in #1575
// and have been made compatible with the latest jQuery version
$(window).on('load', function() {
if (refThis._core.settings.autoHeight) {
refThis.update();
}
});
// Autoresize the height of the carousel when window is resized
// When carousel has images, the height is dependent on the width
// and should also change on resize
$(window).resize(function() {
if (refThis._core.settings.autoHeight) {
if (refThis._intervalId != null) {
clearTimeout(refThis._intervalId);
}
refThis._intervalId = setTimeout(function() {
refThis.update();
}, 250);
}
});
};
/**
* Default options.
* @public
*/
AutoHeight.Defaults = {
autoHeight: false,
autoHeightClass: 'owl-height'
};
/**
* Updates the view.
*/
AutoHeight.prototype.update = function() {
var start = this._core._current,
end = start + this._core.settings.items,
lazyLoadEnabled = this._core.settings.lazyLoad,
visible = this._core.$stage.children().toArray().slice(start, end),
heights = [],
maxheight = 0;
$.each(visible, function(index, item) {
heights.push($(item).height());
});
maxheight = Math.max.apply(null, heights);
if (maxheight <= 1 && lazyLoadEnabled && this._previousHeight) {
maxheight = this._previousHeight;
}
this._previousHeight = maxheight;
this._core.$stage.parent()
.height(maxheight)
.addClass(this._core.settings.autoHeightClass);
};
AutoHeight.prototype.destroy = function() {
var handler, property;
for (handler in this._handlers) {
this._core.$element.off(handler, this._handlers[handler]);
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] !== 'function' && (this[property] = null);
}
};
$.fn.owlCarousel.Constructor.Plugins.AutoHeight = AutoHeight;
})(window.Zepto || window.jQuery, window, document);
/**
* Video Plugin
* @version 2.3.4
* @author Bartosz Wojciechowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
/**
* Creates the video plugin.
* @class The Video Plugin
* @param {Owl} carousel - The Owl Carousel
*/
var Video = function(carousel) {
/**
* Reference to the core.
* @protected
* @type {Owl}
*/
this._core = carousel;
/**
* Cache all video URLs.
* @protected
* @type {Object}
*/
this._videos = {};
/**
* Current playing item.
* @protected
* @type {jQuery}
*/
this._playing = null;
/**
* All event handlers.
* @todo The cloned content removale is too late
* @protected
* @type {Object}
*/
this._handlers = {
'initialized.owl.carousel': $.proxy(function(e) {
if (e.namespace) {
this._core.register({ type: 'state', name: 'playing', tags: [ 'interacting' ] });
}
}, this),
'resize.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.video && this.isInFullScreen()) {
e.preventDefault();
}
}, this),
'refreshed.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.is('resizing')) {
this._core.$stage.find('.cloned .owl-video-frame').remove();
}
}, this),
'changed.owl.carousel': $.proxy(function(e) {
if (e.namespace && e.property.name === 'position' && this._playing) {
this.stop();
}
}, this),
'prepared.owl.carousel': $.proxy(function(e) {
if (!e.namespace) {
return;
}
var $element = $(e.content).find('.owl-video');
if ($element.length) {
$element.css('display', 'none');
this.fetch($element, $(e.content));
}
}, this)
};
// set default options
this._core.options = $.extend({}, Video.Defaults, this._core.options);
// register event handlers
this._core.$element.on(this._handlers);
this._core.$element.on('click.owl.video', '.owl-video-play-icon', $.proxy(function(e) {
this.play(e);
}, this));
};
/**
* Default options.
* @public
*/
Video.Defaults = {
video: false,
videoHeight: false,
videoWidth: false
};
/**
* Gets the video ID and the type (YouTube/Vimeo/vzaar only).
* @protected
* @param {jQuery} target - The target containing the video data.
* @param {jQuery} item - The item containing the video.
*/
Video.prototype.fetch = function(target, item) {
var type = (function() {
if (target.attr('data-vimeo-id')) {
return 'vimeo';
} else if (target.attr('data-vzaar-id')) {
return 'vzaar'
} else {
return 'youtube';
}
})(),
id = target.attr('data-vimeo-id') || target.attr('data-youtube-id') || target.attr('data-vzaar-id'),
width = target.attr('data-width') || this._core.settings.videoWidth,
height = target.attr('data-height') || this._core.settings.videoHeight,
url = target.attr('href');
if (url) {
/*
Parses the id's out of the following urls (and probably more):
https://www.youtube.com/watch?v=:id
https://youtu.be/:id
https://vimeo.com/:id
https://vimeo.com/channels/:channel/:id
https://vimeo.com/groups/:group/videos/:id
https://app.vzaar.com/videos/:id
Visual example: https://regexper.com/#(http%3A%7Chttps%3A%7C)%5C%2F%5C%2F(player.%7Cwww.%7Capp.)%3F(vimeo%5C.com%7Cyoutu(be%5C.com%7C%5C.be%7Cbe%5C.googleapis%5C.com)%7Cvzaar%5C.com)%5C%2F(video%5C%2F%7Cvideos%5C%2F%7Cembed%5C%2F%7Cchannels%5C%2F.%2B%5C%2F%7Cgroups%5C%2F.%2B%5C%2F%7Cwatch%5C%3Fv%3D%7Cv%5C%2F)%3F(%5BA-Za-z0-9._%25-%5D*)(%5C%26%5CS%2B)%3F
*/
id = url.match(/(http:|https:|)\/\/(player.|www.|app.)?(vimeo\.com|youtu(be\.com|\.be|be\.googleapis\.com|be\-nocookie\.com)|vzaar\.com)\/(video\/|videos\/|embed\/|channels\/.+\/|groups\/.+\/|watch\?v=|v\/)?([A-Za-z0-9._%-]*)(\&\S+)?/);
if (id[3].indexOf('youtu') > -1) {
type = 'youtube';
} else if (id[3].indexOf('vimeo') > -1) {
type = 'vimeo';
} else if (id[3].indexOf('vzaar') > -1) {
type = 'vzaar';
} else {
throw new Error('Video URL not supported.');
}
id = id[6];
} else {
throw new Error('Missing video URL.');
}
this._videos[url] = {
type: type,
id: id,
width: width,
height: height
};
item.attr('data-video', url);
this.thumbnail(target, this._videos[url]);
};
/**
* Creates video thumbnail.
* @protected
* @param {jQuery} target - The target containing the video data.
* @param {Object} info - The video info object.
* @see `fetch`
*/
Video.prototype.thumbnail = function(target, video) {
var tnLink,
icon,
path,
dimensions = video.width && video.height ? 'width:' + video.width + 'px;height:' + video.height + 'px;' : '',
customTn = target.find('img'),
srcType = 'src',
lazyClass = '',
settings = this._core.settings,
create = function(path) {
icon = '<div class="owl-video-play-icon"></div>';
if (settings.lazyLoad) {
tnLink = $('<div/>',{
"class": 'owl-video-tn ' + lazyClass,
"srcType": path
});
} else {
tnLink = $( '<div/>', {
"class": "owl-video-tn",
"style": 'opacity:1;background-img:url(' + path + ')'
});
}
target.after(tnLink);
target.after(icon);
};
// wrap video content into owl-video-wrapper div
target.wrap( $( '<div/>', {
"class": "owl-video-wrapper",
"style": dimensions
}));
if (this._core.settings.lazyLoad) {
srcType = 'data-src';
lazyClass = 'owl-lazy';
}
// custom thumbnail
if (customTn.length) {
create(customTn.attr(srcType));
customTn.remove();
return false;
}
if (video.type === 'youtube') {
path = "//img.youtube.com/vi/" + video.id + "/hqdefault.jpg";
create(path);
} else if (video.type === 'vimeo') {
$.ajax({
type: 'GET',
url: '//vimeo.com/api/v2/video/' + video.id + '.json',
jsonp: 'callback',
dataType: 'jsonp',
success: function(data) {
path = data[0].thumbnail_large;
create(path);
}
});
} else if (video.type === 'vzaar') {
$.ajax({
type: 'GET',
url: '//vzaar.com/api/videos/' + video.id + '.json',
jsonp: 'callback',
dataType: 'jsonp',
success: function(data) {
path = data.framegrab_url;
create(path);
}
});
}
};
/**
* Stops the current video.
* @public
*/
Video.prototype.stop = function() {
this._core.trigger('stop', null, 'video');
this._playing.find('.owl-video-frame').remove();
this._playing.removeClass('owl-video-playing');
this._playing = null;
this._core.leave('playing');
this._core.trigger('stopped', null, 'video');
};
/**
* Starts the current video.
* @public
* @param {Event} event - The event arguments.
*/
Video.prototype.play = function(event) {
var target = $(event.target),
item = target.closest('.' + this._core.settings.itemClass),
video = this._videos[item.attr('data-video')],
width = video.width || '100%',
height = video.height || this._core.$stage.height(),
html,
iframe;
if (this._playing) {
return;
}
this._core.enter('playing');
this._core.trigger('play', null, 'video');
item = this._core.items(this._core.relative(item.index()));
this._core.reset(item.index());
html = $( '<iframe frameborder="0" allowfullscreen mozallowfullscreen webkitAllowFullScreen ></iframe>' );
html.attr( 'height', height );
html.attr( 'width', width );
if (video.type === 'youtube') {
html.attr( 'src', '//www.youtube.com/embed/' + video.id + '?autoplay=1&rel=0&v=' + video.id );
} else if (video.type === 'vimeo') {
html.attr( 'src', '//player.vimeo.com/video/' + video.id + '?autoplay=1' );
} else if (video.type === 'vzaar') {
html.attr( 'src', '//view.vzaar.com/' + video.id + '/player?autoplay=true' );
}
iframe = $(html).wrap( '<div class="owl-video-frame" />' ).insertAfter(item.find('.owl-video'));
this._playing = item.addClass('owl-video-playing');
};
/**
* Checks whether an video is currently in full screen mode or not.
* @todo Bad style because looks like a readonly method but changes members.
* @protected
* @returns {Boolean}
*/
Video.prototype.isInFullScreen = function() {
var element = document.fullscreenElement || document.mozFullScreenElement ||
document.webkitFullscreenElement;
return element && $(element).parent().hasClass('owl-video-frame');
};
/**
* Destroys the plugin.
*/
Video.prototype.destroy = function() {
var handler, property;
this._core.$element.off('click.owl.video');
for (handler in this._handlers) {
this._core.$element.off(handler, this._handlers[handler]);
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] != 'function' && (this[property] = null);
}
};
$.fn.owlCarousel.Constructor.Plugins.Video = Video;
})(window.Zepto || window.jQuery, window, document);
/**
* Animate Plugin
* @version 2.3.4
* @author Bartosz Wojciechowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
/**
* Creates the animate plugin.
* @class The Navigation Plugin
* @param {Owl} scope - The Owl Carousel
*/
var Animate = function(scope) {
this.core = scope;
this.core.options = $.extend({}, Animate.Defaults, this.core.options);
this.swapping = true;
this.previous = undefined;
this.next = undefined;
this.handlers = {
'change.owl.carousel': $.proxy(function(e) {
if (e.namespace && e.property.name == 'position') {
this.previous = this.core.current();
this.next = e.property.value;
}
}, this),
'drag.owl.carousel dragged.owl.carousel translated.owl.carousel': $.proxy(function(e) {
if (e.namespace) {
this.swapping = e.type == 'translated';
}
}, this),
'translate.owl.carousel': $.proxy(function(e) {
if (e.namespace && this.swapping && (this.core.options.animateOut || this.core.options.animateIn)) {
this.swap();
}
}, this)
};
this.core.$element.on(this.handlers);
};
/**
* Default options.
* @public
*/
Animate.Defaults = {
animateOut: false,
animateIn: false
};
/**
* Toggles the animation classes whenever an translations starts.
* @protected
* @returns {Boolean|undefined}
*/
Animate.prototype.swap = function() {
if (this.core.settings.items !== 1) {
return;
}
if (!$.support.animation || !$.support.transition) {
return;
}
this.core.speed(0);
var left,
clear = $.proxy(this.clear, this),
previous = this.core.$stage.children().eq(this.previous),
next = this.core.$stage.children().eq(this.next),
incoming = this.core.settings.animateIn,
outgoing = this.core.settings.animateOut;
if (this.core.current() === this.previous) {
return;
}
if (outgoing) {
left = this.core.coordinates(this.previous) - this.core.coordinates(this.next);
previous.one($.support.animation.end, clear)
.css( { 'left': left + 'px' } )
.addClass('animated owl-animated-out')
.addClass(outgoing);
}
if (incoming) {
next.one($.support.animation.end, clear)
.addClass('animated owl-animated-in')
.addClass(incoming);
}
};
Animate.prototype.clear = function(e) {
$(e.target).css( { 'left': '' } )
.removeClass('animated owl-animated-out owl-animated-in')
.removeClass(this.core.settings.animateIn)
.removeClass(this.core.settings.animateOut);
this.core.onTransitionEnd();
};
/**
* Destroys the plugin.
* @public
*/
Animate.prototype.destroy = function() {
var handler, property;
for (handler in this.handlers) {
this.core.$element.off(handler, this.handlers[handler]);
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] != 'function' && (this[property] = null);
}
};
$.fn.owlCarousel.Constructor.Plugins.Animate = Animate;
})(window.Zepto || window.jQuery, window, document);
/**
* Autoplay Plugin
* @version 2.3.4
* @author Bartosz Wojciechowski
* @author Artus Kolanowski
* @author David Deutsch
* @author Tom De Caluwé
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
/**
* Creates the autoplay plugin.
* @class The Autoplay Plugin
* @param {Owl} scope - The Owl Carousel
*/
var Autoplay = function(carousel) {
/**
* Reference to the core.
* @protected
* @type {Owl}
*/
this._core = carousel;
/**
* The autoplay timeout id.
* @type {Number}
*/
this._call = null;
/**
* Depending on the state of the plugin, this variable contains either
* the start time of the timer or the current timer value if it's
* paused. Since we start in a paused state we initialize the timer
* value.
* @type {Number}
*/
this._time = 0;
/**
* Stores the timeout currently used.
* @type {Number}
*/
this._timeout = 0;
/**
* Indicates whenever the autoplay is paused.
* @type {Boolean}
*/
this._paused = true;
/**
* All event handlers.
* @protected
* @type {Object}
*/
this._handlers = {
'changed.owl.carousel': $.proxy(function(e) {
if (e.namespace && e.property.name === 'settings') {
if (this._core.settings.autoplay) {
this.play();
} else {
this.stop();
}
} else if (e.namespace && e.property.name === 'position' && this._paused) {
// Reset the timer. This code is triggered when the position
// of the carousel was changed through user interaction.
this._time = 0;
}
}, this),
'initialized.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.autoplay) {
this.play();
}
}, this),
'play.owl.autoplay': $.proxy(function(e, t, s) {
if (e.namespace) {
this.play(t, s);
}
}, this),
'stop.owl.autoplay': $.proxy(function(e) {
if (e.namespace) {
this.stop();
}
}, this),
'mouseover.owl.autoplay': $.proxy(function() {
if (this._core.settings.autoplayHoverPause && this._core.is('rotating')) {
this.pause();
}
}, this),
'mouseleave.owl.autoplay': $.proxy(function() {
if (this._core.settings.autoplayHoverPause && this._core.is('rotating')) {
this.play();
}
}, this),
'touchstart.owl.core': $.proxy(function() {
if (this._core.settings.autoplayHoverPause && this._core.is('rotating')) {
this.pause();
}
}, this),
'touchend.owl.core': $.proxy(function() {
if (this._core.settings.autoplayHoverPause) {
this.play();
}
}, this)
};
// register event handlers
this._core.$element.on(this._handlers);
// set default options
this._core.options = $.extend({}, Autoplay.Defaults, this._core.options);
};
/**
* Default options.
* @public
*/
Autoplay.Defaults = {
autoplay: false,
autoplayTimeout: 5000,
autoplayHoverPause: false,
autoplaySpeed: false
};
/**
* Transition to the next slide and set a timeout for the next transition.
* @private
* @param {Number} [speed] - The animation speed for the animations.
*/
Autoplay.prototype._next = function(speed) {
this._call = window.setTimeout(
$.proxy(this._next, this, speed),
this._timeout * (Math.round(this.read() / this._timeout) + 1) - this.read()
);
if (this._core.is('interacting') || document.hidden) {
return;
}
this._core.next(speed || this._core.settings.autoplaySpeed);
}
/**
* Reads the current timer value when the timer is playing.
* @public
*/
Autoplay.prototype.read = function() {
return new Date().getTime() - this._time;
};
/**
* Starts the autoplay.
* @public
* @param {Number} [timeout] - The interval before the next animation starts.
* @param {Number} [speed] - The animation speed for the animations.
*/
Autoplay.prototype.play = function(timeout, speed) {
var elapsed;
if (!this._core.is('rotating')) {
this._core.enter('rotating');
}
timeout = timeout || this._core.settings.autoplayTimeout;
// Calculate the elapsed time since the last transition. If the carousel
// wasn't playing this calculation will yield zero.
elapsed = Math.min(this._time % (this._timeout || timeout), timeout);
if (this._paused) {
// Start the clock.
this._time = this.read();
this._paused = false;
} else {
// Clear the active timeout to allow replacement.
window.clearTimeout(this._call);
}
// Adjust the origin of the timer to match the new timeout value.
this._time += this.read() % timeout - elapsed;
this._timeout = timeout;
this._call = window.setTimeout($.proxy(this._next, this, speed), timeout - elapsed);
};
/**
* Stops the autoplay.
* @public
*/
Autoplay.prototype.stop = function() {
if (this._core.is('rotating')) {
// Reset the clock.
this._time = 0;
this._paused = true;
window.clearTimeout(this._call);
this._core.leave('rotating');
}
};
/**
* Pauses the autoplay.
* @public
*/
Autoplay.prototype.pause = function() {
if (this._core.is('rotating') && !this._paused) {
// Pause the clock.
this._time = this.read();
this._paused = true;
window.clearTimeout(this._call);
}
};
/**
* Destroys the plugin.
*/
Autoplay.prototype.destroy = function() {
var handler, property;
this.stop();
for (handler in this._handlers) {
this._core.$element.off(handler, this._handlers[handler]);
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] != 'function' && (this[property] = null);
}
};
$.fn.owlCarousel.Constructor.Plugins.autoplay = Autoplay;
})(window.Zepto || window.jQuery, window, document);
/**
* Navigation Plugin
* @version 2.3.4
* @author Artus Kolanowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
'use strict';
/**
* Creates the navigation plugin.
* @class The Navigation Plugin
* @param {Owl} carousel - The Owl Carousel.
*/
var Navigation = function(carousel) {
/**
* Reference to the core.
* @protected
* @type {Owl}
*/
this._core = carousel;
/**
* Indicates whether the plugin is initialized or not.
* @protected
* @type {Boolean}
*/
this._initialized = false;
/**
* The current paging indexes.
* @protected
* @type {Array}
*/
this._pages = [];
/**
* All DOM elements of the user interface.
* @protected
* @type {Object}
*/
this._controls = {};
/**
* Markup for an indicator.
* @protected
* @type {Array.<String>}
*/
this._templates = [];
/**
* The carousel element.
* @type {jQuery}
*/
this.$element = this._core.$element;
/**
* Overridden methods of the carousel.
* @protected
* @type {Object}
*/
this._overrides = {
next: this._core.next,
prev: this._core.prev,
to: this._core.to
};
/**
* All event handlers.
* @protected
* @type {Object}
*/
this._handlers = {
'prepared.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.dotsData) {
this._templates.push('<div class="' + this._core.settings.dotClass + '">' +
$(e.content).find('[data-dot]').addBack('[data-dot]').attr('data-dot') + '</div>');
}
}, this),
'added.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.dotsData) {
this._templates.splice(e.position, 0, this._templates.pop());
}
}, this),
'remove.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.dotsData) {
this._templates.splice(e.position, 1);
}
}, this),
'changed.owl.carousel': $.proxy(function(e) {
if (e.namespace && e.property.name == 'position') {
this.draw();
}
}, this),
'initialized.owl.carousel': $.proxy(function(e) {
if (e.namespace && !this._initialized) {
this._core.trigger('initialize', null, 'navigation');
this.initialize();
this.update();
this.draw();
this._initialized = true;
this._core.trigger('initialized', null, 'navigation');
}
}, this),
'refreshed.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._initialized) {
this._core.trigger('refresh', null, 'navigation');
this.update();
this.draw();
this._core.trigger('refreshed', null, 'navigation');
}
}, this)
};
// set default options
this._core.options = $.extend({}, Navigation.Defaults, this._core.options);
// register event handlers
this.$element.on(this._handlers);
};
/**
* Default options.
* @public
* @todo Rename `slideBy` to `navBy`
*/
Navigation.Defaults = {
nav: false,
navText: [
'<span aria-label="' + 'Previous' + '">‹</span>',
'<span aria-label="' + 'Next' + '">›</span>'
],
navSpeed: false,
navElement: 'button type="button" role="presentation"',
navContainer: false,
navContainerClass: 'owl-nav',
navClass: [
'owl-prev',
'owl-next'
],
slideBy: 1,
dotClass: 'owl-dot',
dotsClass: 'owl-dots',
dots: true,
dotsEach: false,
dotsData: false,
dotsSpeed: false,
dotsContainer: false
};
/**
* Initializes the layout of the plugin and extends the carousel.
* @protected
*/
Navigation.prototype.initialize = function() {
var override,
settings = this._core.settings;
// create DOM structure for relative navigation
this._controls.$relative = (settings.navContainer ? $(settings.navContainer)
: $('<div>').addClass(settings.navContainerClass).appendTo(this.$element)).addClass('disabled');
this._controls.$previous = $('<' + settings.navElement + '>')
.addClass(settings.navClass[0])
.html(settings.navText[0])
.prependTo(this._controls.$relative)
.on('click', $.proxy(function(e) {
this.prev(settings.navSpeed);
}, this));
this._controls.$next = $('<' + settings.navElement + '>')
.addClass(settings.navClass[1])
.html(settings.navText[1])
.appendTo(this._controls.$relative)
.on('click', $.proxy(function(e) {
this.next(settings.navSpeed);
}, this));
// create DOM structure for absolute navigation
if (!settings.dotsData) {
this._templates = [ $('<button role="button">')
.addClass(settings.dotClass)
.append($('<span>'))
.prop('outerHTML') ];
}
this._controls.$absolute = (settings.dotsContainer ? $(settings.dotsContainer)
: $('<div>').addClass(settings.dotsClass).appendTo(this.$element)).addClass('disabled');
this._controls.$absolute.on('click', 'button', $.proxy(function(e) {
var index = $(e.target).parent().is(this._controls.$absolute)
? $(e.target).index() : $(e.target).parent().index();
e.preventDefault();
this.to(index, settings.dotsSpeed);
}, this));
/*$el.on('focusin', function() {
$(document).off(".carousel");
$(document).on('keydown.carousel', function(e) {
if(e.keyCode == 37) {
$el.trigger('prev.owl')
}
if(e.keyCode == 39) {
$el.trigger('next.owl')
}
});
});*/
// override public methods of the carousel
for (override in this._overrides) {
this._core[override] = $.proxy(this[override], this);
}
};
/**
* Destroys the plugin.
* @protected
*/
Navigation.prototype.destroy = function() {
var handler, control, property, override, settings;
settings = this._core.settings;
for (handler in this._handlers) {
this.$element.off(handler, this._handlers[handler]);
}
for (control in this._controls) {
if (control === '$relative' && settings.navContainer) {
this._controls[control].html('');
} else {
this._controls[control].remove();
}
}
for (override in this.overides) {
this._core[override] = this._overrides[override];
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] != 'function' && (this[property] = null);
}
};
/**
* Updates the internal state.
* @protected
*/
Navigation.prototype.update = function() {
var i, j, k,
lower = this._core.clones().length / 2,
upper = lower + this._core.items().length,
maximum = this._core.maximum(true),
settings = this._core.settings,
size = settings.center || settings.autoWidth || settings.dotsData
? 1 : settings.dotsEach || settings.items;
if (settings.slideBy !== 'page') {
settings.slideBy = Math.min(settings.slideBy, settings.items);
}
if (settings.dots || settings.slideBy == 'page') {
this._pages = [];
for (i = lower, j = 0, k = 0; i < upper; i++) {
if (j >= size || j === 0) {
this._pages.push({
start: Math.min(maximum, i - lower),
end: i - lower + size - 1
});
if (Math.min(maximum, i - lower) === maximum) {
break;
}
j = 0, ++k;
}
j += this._core.mergers(this._core.relative(i));
}
}
};
/**
* Draws the user interface.
* @todo The option `dotsData` wont work.
* @protected
*/
Navigation.prototype.draw = function() {
var difference,
settings = this._core.settings,
disabled = this._core.items().length <= settings.items,
index = this._core.relative(this._core.current()),
loop = settings.loop || settings.rewind;
this._controls.$relative.toggleClass('disabled', !settings.nav || disabled);
if (settings.nav) {
this._controls.$previous.toggleClass('disabled', !loop && index <= this._core.minimum(true));
this._controls.$next.toggleClass('disabled', !loop && index >= this._core.maximum(true));
}
this._controls.$absolute.toggleClass('disabled', !settings.dots || disabled);
if (settings.dots) {
difference = this._pages.length - this._controls.$absolute.children().length;
if (settings.dotsData && difference !== 0) {
this._controls.$absolute.html(this._templates.join(''));
} else if (difference > 0) {
this._controls.$absolute.append(new Array(difference + 1).join(this._templates[0]));
} else if (difference < 0) {
this._controls.$absolute.children().slice(difference).remove();
}
this._controls.$absolute.find('.active').removeClass('active');
this._controls.$absolute.children().eq($.inArray(this.current(), this._pages)).addClass('active');
}
};
/**
* Extends event data.
* @protected
* @param {Event} event - The event object which gets thrown.
*/
Navigation.prototype.onTrigger = function(event) {
var settings = this._core.settings;
event.page = {
index: $.inArray(this.current(), this._pages),
count: this._pages.length,
size: settings && (settings.center || settings.autoWidth || settings.dotsData
? 1 : settings.dotsEach || settings.items)
};
};
/**
* Gets the current page position of the carousel.
* @protected
* @returns {Number}
*/
Navigation.prototype.current = function() {
var current = this._core.relative(this._core.current());
return $.grep(this._pages, $.proxy(function(page, index) {
return page.start <= current && page.end >= current;
}, this)).pop();
};
/**
* Gets the current succesor/predecessor position.
* @protected
* @returns {Number}
*/
Navigation.prototype.getPosition = function(successor) {
var position, length,
settings = this._core.settings;
if (settings.slideBy == 'page') {
position = $.inArray(this.current(), this._pages);
length = this._pages.length;
successor ? ++position : --position;
position = this._pages[((position % length) + length) % length].start;
} else {
position = this._core.relative(this._core.current());
length = this._core.items().length;
successor ? position += settings.slideBy : position -= settings.slideBy;
}
return position;
};
/**
* Slides to the next item or page.
* @public
* @param {Number} [speed=false] - The time in milliseconds for the transition.
*/
Navigation.prototype.next = function(speed) {
$.proxy(this._overrides.to, this._core)(this.getPosition(true), speed);
};
/**
* Slides to the previous item or page.
* @public
* @param {Number} [speed=false] - The time in milliseconds for the transition.
*/
Navigation.prototype.prev = function(speed) {
$.proxy(this._overrides.to, this._core)(this.getPosition(false), speed);
};
/**
* Slides to the specified item or page.
* @public
* @param {Number} position - The position of the item or page.
* @param {Number} [speed] - The time in milliseconds for the transition.
* @param {Boolean} [standard=false] - Whether to use the standard behaviour or not.
*/
Navigation.prototype.to = function(position, speed, standard) {
var length;
if (!standard && this._pages.length) {
length = this._pages.length;
$.proxy(this._overrides.to, this._core)(this._pages[((position % length) + length) % length].start, speed);
} else {
$.proxy(this._overrides.to, this._core)(position, speed);
}
};
$.fn.owlCarousel.Constructor.Plugins.Navigation = Navigation;
})(window.Zepto || window.jQuery, window, document);
/**
* Hash Plugin
* @version 2.3.4
* @author Artus Kolanowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
'use strict';
/**
* Creates the hash plugin.
* @class The Hash Plugin
* @param {Owl} carousel - The Owl Carousel
*/
var Hash = function(carousel) {
/**
* Reference to the core.
* @protected
* @type {Owl}
*/
this._core = carousel;
/**
* Hash index for the items.
* @protected
* @type {Object}
*/
this._hashes = {};
/**
* The carousel element.
* @type {jQuery}
*/
this.$element = this._core.$element;
/**
* All event handlers.
* @protected
* @type {Object}
*/
this._handlers = {
'initialized.owl.carousel': $.proxy(function(e) {
if (e.namespace && this._core.settings.startPosition === 'URLHash') {
$(window).trigger('hashchange.owl.navigation');
}
}, this),
'prepared.owl.carousel': $.proxy(function(e) {
if (e.namespace) {
var hash = $(e.content).find('[data-hash]').addBack('[data-hash]').attr('data-hash');
if (!hash) {
return;
}
this._hashes[hash] = e.content;
}
}, this),
'changed.owl.carousel': $.proxy(function(e) {
if (e.namespace && e.property.name === 'position') {
var current = this._core.items(this._core.relative(this._core.current())),
hash = $.map(this._hashes, function(item, hash) {
return item === current ? hash : null;
}).join();
if (!hash || window.location.hash.slice(1) === hash) {
return;
}
window.location.hash = hash;
}
}, this)
};
// set default options
this._core.options = $.extend({}, Hash.Defaults, this._core.options);
// register the event handlers
this.$element.on(this._handlers);
// register event listener for hash navigation
$(window).on('hashchange.owl.navigation', $.proxy(function(e) {
var hash = window.location.hash.substring(1),
items = this._core.$stage.children(),
position = this._hashes[hash] && items.index(this._hashes[hash]);
if (position === undefined || position === this._core.current()) {
return;
}
this._core.to(this._core.relative(position), false, true);
}, this));
};
/**
* Default options.
* @public
*/
Hash.Defaults = {
URLhashListener: false
};
/**
* Destroys the plugin.
* @public
*/
Hash.prototype.destroy = function() {
var handler, property;
$(window).off('hashchange.owl.navigation');
for (handler in this._handlers) {
this._core.$element.off(handler, this._handlers[handler]);
}
for (property in Object.getOwnPropertyNames(this)) {
typeof this[property] != 'function' && (this[property] = null);
}
};
$.fn.owlCarousel.Constructor.Plugins.Hash = Hash;
})(window.Zepto || window.jQuery, window, document);
/**
* Support Plugin
*
* @version 2.3.4
* @author Vivid Planet Software GmbH
* @author Artus Kolanowski
* @author David Deutsch
* @license The MIT License (MIT)
*/
;(function($, window, document, undefined) {
var style = $('<support>').get(0).style,
prefixes = 'Webkit Moz O ms'.split(' '),
events = {
transition: {
end: {
WebkitTransition: 'webkitTransitionEnd',
MozTransition: 'transitionend',
OTransition: 'oTransitionEnd',
transition: 'transitionend'
}
},
animation: {
end: {
WebkitAnimation: 'webkitAnimationEnd',
MozAnimation: 'animationend',
OAnimation: 'oAnimationEnd',
animation: 'animationend'
}
}
},
tests = {
csstransforms: function() {
return !!test('transform');
},
csstransforms3d: function() {
return !!test('perspective');
},
csstransitions: function() {
return !!test('transition');
},
cssanimations: function() {
return !!test('animation');
}
};
function test(property, prefixed) {
var result = false,
upper = property.charAt(0).toUpperCase() + property.slice(1);
$.each((property + ' ' + prefixes.join(upper + ' ') + upper).split(' '), function(i, property) {
if (style[property] !== undefined) {
result = prefixed ? property : true;
return false;
}
});
return result;
}
function prefixed(property) {
return test(property, true);
}
if (tests.csstransitions()) {
/* jshint -W053 */
$.support.transition = new String(prefixed('transition'))
$.support.transition.end = events.transition.end[ $.support.transition ];
}
if (tests.cssanimations()) {
/* jshint -W053 */
$.support.animation = new String(prefixed('animation'))
$.support.animation.end = events.animation.end[ $.support.animation ];
}
if (tests.csstransforms()) {
/* jshint -W053 */
$.support.transform = new String(prefixed('transform'));
$.support.transform3d = tests.csstransforms3d();
}
})(window.Zepto || window.jQuery, window, document);
|
import binascii
import json
import os
from pathlib import Path
from django.contrib.auth.models import User
from django.db.models import Q
from django.db import models
class FindJobManager(models.Manager):
def get_queryset(self):
return super().get_queryset().exclude(fun="saltutil.find_job")
class Jids(models.Model):
jid = models.CharField(primary_key=True, db_index=True, max_length=255)
load = models.TextField()
def loaded_load(self):
return json.loads(self.load)
def user(self):
if "user" in self.loaded_load():
return self.loaded_load()["user"]
return ""
class Meta:
managed = False
db_table = "jids"
app_label = "api"
class SaltReturns(models.Model):
fun = models.CharField(max_length=50, db_index=True)
jid = models.CharField(max_length=255, db_index=True)
# Field renamed because it was a Python reserved word.
return_field = models.TextField(db_column="return")
id = models.CharField(max_length=255, primary_key=True)
success = models.CharField(max_length=10)
full_ret = models.TextField()
alter_time = models.DateTimeField()
objects = FindJobManager()
def loaded_ret(self):
return json.loads(self.full_ret)
def user(self):
# TODO: find a better way?
return Jids.objects.get(jid=self.jid).user()
def arguments(self):
ret = self.loaded_ret()
if "fun_args" in ret and ret["fun_args"]:
return " ".join(str(i) for i in ret["fun_args"] if "=" not in str(i))
return ""
def keyword_arguments(self):
ret = self.loaded_ret()
if "fun_args" in ret and ret["fun_args"]:
return " ".join(str(i) for i in ret["fun_args"] if "=" in str(i))
return ""
def success_bool(self):
ret = self.loaded_ret()
if "success" in ret:
return ret["success"]
if "return" in ret:
# It shouldn't happened unless you have a custom module
# so let's assume we can trust retcode
if isinstance(ret["return"], str) or isinstance(ret["return"], bool):
return True if "retcode" in ret and ret["retcode"] == 0 else False
if "success" in ret["return"]:
return ret["return"]["success"]
if "result" in ret["return"]:
return ret["return"]["result"]
return self.jid
class Meta:
managed = False
db_table = "salt_returns"
app_label = "api"
class SaltEvents(models.Model):
id = models.BigAutoField(primary_key=True)
tag = models.CharField(max_length=255, db_index=True)
data = models.TextField()
alter_time = models.DateTimeField()
master_id = models.CharField(max_length=255)
class Meta:
managed = False
db_table = "salt_events"
app_label = "api"
# Alcali custom.
class Functions(models.Model):
name = models.CharField(max_length=255)
type = models.CharField(max_length=255)
description = models.TextField()
def __str__(self):
return "{}".format(self.name)
class Meta:
db_table = "salt_functions"
app_label = "api"
class JobTemplate(models.Model):
name = models.CharField(max_length=255)
job = models.TextField()
def __str__(self):
return "{}".format(self.name)
class Meta:
db_table = "salt_job_template"
app_label = "api"
class Minions(models.Model):
minion_id = models.CharField(max_length=128, null=False, blank=False)
grain = models.TextField()
pillar = models.TextField()
def loaded_grain(self):
return json.loads(self.grain)
def loaded_pillar(self):
return json.loads(self.pillar)
def last_job(self):
return (
SaltReturns.objects.filter(id=self.minion_id)
.order_by("-alter_time")
.first()
)
def last_highstate(self):
# Get all potential jobs.
states = SaltReturns.objects.filter(
Q(fun="state.apply") | Q(fun="state.highstate"), id=self.minion_id
)
states = sorted(states, key=lambda x: x.jid, reverse=True)
# Remove jobs with arguments.
for state in states:
if (
not state.loaded_ret()["fun_args"]
or state.loaded_ret()["fun_args"][0] == {"test": True}
or state.loaded_ret()["fun_args"][0] == "test=True"
):
return state
return None
def conformity(self):
last_highstate = self.last_highstate()
if not last_highstate:
return None
highstate_ret = last_highstate.loaded_ret()
# Flat out error(return is a string)
return_item = highstate_ret.get("return")
if not return_item or isinstance(return_item, list):
return False
for state in return_item:
# One of the state is not ok
if not return_item.get(state, {}).get("result"):
return False
return True
def custom_conformity(self, fun, *args):
# First, filter with fun.
jobs = SaltReturns.objects.filter(fun=fun, id=self.minion_id).order_by(
"-alter_time"
)
if not jobs:
return False
if args:
for job in jobs:
ret = job.loaded_ret()
# if provided args are the same.
if not list(
set(args) ^ {i for i in ret["fun_args"] if isinstance(i, str)}
):
return ret["return"]
# If no args or kwargs, just return the first job.
else:
job = jobs.first()
return job.loaded_ret()["return"]
def __str__(self):
return "{}".format(self.minion_id)
class Meta:
db_table = "salt_minions"
app_label = "api"
class Keys(models.Model):
KEY_STATUS = (
("accepted", "accepted"),
("rejected", "rejected"),
("denied", "denied"),
("unaccepted", "unaccepted"),
)
minion_id = models.CharField(max_length=255)
pub = models.TextField(blank=True)
status = models.CharField(max_length=64, choices=KEY_STATUS)
def __str__(self):
return "{}".format(self.minion_id)
class Meta:
# TODO add constraints (only one accepted per minion_id)
db_table = "salt_keys"
app_label = "api"
class MinionsCustomFields(models.Model):
name = models.CharField(max_length=255)
value = models.TextField()
minion = models.ForeignKey(
Minions, related_name="custom_fields", on_delete=models.CASCADE
)
function = models.CharField(max_length=255)
def __str__(self):
return "{}: {}".format(self.name, self.function)
class Meta:
db_table = "minions_custom_fields"
app_label = "api"
class Schedule(models.Model):
minion = models.CharField(max_length=128, null=False, blank=False)
name = models.CharField(max_length=255, blank=False, null=False)
job = models.TextField()
def loaded_job(self):
return json.loads(self.job)
class Meta:
app_label = "api"
def generate_key():
return binascii.hexlify(os.urandom(20)).decode()
class UserSettings(models.Model):
"""
The default authorization token model.
"""
with open(
os.path.join(Path(__file__).parent.absolute(), "migrations/usersettings.json"),
"r",
) as fh:
data = json.load(fh)
with open(
os.path.join(Path(__file__).parent.absolute(), "migrations/alcali_permission.json"),
"r",
) as fh1:
data1 = fh1.read()
user = models.OneToOneField(
User, primary_key=True, related_name="user_settings", on_delete=models.CASCADE
)
token = models.CharField(max_length=40)
created = models.DateTimeField(auto_now_add=True)
settings = models.JSONField(default=data)
salt_permissions = models.TextField()
# CK create new data columns
alcali_permissions = models.TextField(default=data1)
def generate_token(self):
self.token = generate_key()
self.save()
class Meta:
db_table = "user_settings"
app_label = "api"
def save(self, *args, **kwargs):
if not self.token:
self.token = generate_key()
return super(UserSettings, self).save(*args, **kwargs)
def __str__(self):
return str(self.user)
class Conformity(models.Model):
name = models.CharField(max_length=255)
function = models.CharField(max_length=255)
class Meta:
db_table = "conformity"
app_label = "api"
|
import {List} from 'immutable';
import {createReducer} from '../../../src/utils/store';
import {
createNewMessage,
createLinkSnippet,
createComponentMessage,
} from '../../../src/utils/messages';
import {MESSAGE_SENDER} from '../../../src/constants.js';
import * as actionTypes from '../actions/actionTypes';
const initialState = List([]);
const messagesReducer = {
[actionTypes.ADD_NEW_USER_MESSAGE]: (state, {text}) =>
state.push(createNewMessage(text, MESSAGE_SENDER.CLIENT)),
[actionTypes.ADD_NEW_RESPONSE_MESSAGE]: (state, {text}) =>
state.push(createNewMessage(text, MESSAGE_SENDER.RESPONSE)),
[actionTypes.ADD_NEW_LINK_SNIPPET]: (state, {link}) =>
state.push(createLinkSnippet(link, MESSAGE_SENDER.RESPONSE)),
[actionTypes.ADD_COMPONENT_MESSAGE]: (
state,
{component, props, showAvatar},
) => state.push(createComponentMessage(component, props, showAvatar)),
[actionTypes.DROP_MESSAGES]: () => List([]),
[actionTypes.HIDE_AVATAR]: (state, {index}) =>
state.update(index, message => message.set('showAvatar', false)),
};
export default (state = initialState, action) =>
createReducer(messagesReducer, state, action);
|
import _ from 'lodash';
import uiModules from 'ui/modules';
import contextAppTemplate from './app.html';
import './components/loading_button';
import './components/size_picker/size_picker';
import {
createInitialQueryParametersState,
QueryParameterActionsProvider,
QUERY_PARAMETER_KEYS,
} from './query_parameters';
import {
createInitialLoadingStatusState,
LOADING_STATUS,
QueryActionsProvider,
} from './query';
const module = uiModules.get('apps/context', [
'elasticsearch',
'kibana',
'kibana/config',
'kibana/notify',
'ngRoute',
]);
module.directive('contextApp', function ContextApp() {
return {
bindToController: true,
controller: ContextAppController,
controllerAs: 'contextApp',
restrict: 'E',
scope: {
anchorUid: '=',
columns: '=',
indexPattern: '=',
predecessorCount: '=',
successorCount: '=',
sort: '=',
discoverUrl: '=',
},
template: contextAppTemplate,
};
});
function ContextAppController($scope, config, Private) {
const queryParameterActions = Private(QueryParameterActionsProvider);
const queryActions = Private(QueryActionsProvider);
this.state = createInitialState(
parseInt(config.get('context:step'), 10),
this.discoverUrl,
);
this.actions = _.mapValues(Object.assign(
{},
queryParameterActions,
queryActions,
), (action) => (...args) => action(this.state)(...args));
this.constants = {
LOADING_STATUS,
};
$scope.$watchGroup([
() => this.state.rows.predecessors,
() => this.state.rows.anchor,
() => this.state.rows.successors,
], (newValues) => this.actions.setAllRows(...newValues));
/**
* Sync query parameters to arguments
*/
$scope.$watchCollection(
() => _.pick(this, QUERY_PARAMETER_KEYS),
(newValues) => {
// break the watch cycle
if (!_.isEqual(newValues, this.state.queryParameters)) {
this.actions.fetchAllRowsWithNewQueryParameters(newValues);
}
},
);
$scope.$watchCollection(
() => this.state.queryParameters,
(newValues) => {
_.assign(this, newValues);
},
);
}
function createInitialState(defaultStepSize, discoverUrl) {
return {
queryParameters: createInitialQueryParametersState(defaultStepSize),
rows: {
all: [],
anchor: null,
predecessors: [],
successors: [],
},
loadingStatus: createInitialLoadingStatusState(),
navigation: {
discover: {
url: discoverUrl,
},
},
};
}
|
import statistics
from data.seek_sets import seek_set
from puzzle.heuristics import acrostic
from puzzle.problems import problem
class AcrosticProblem(problem.Problem):
def __init__(self, name, lines, **kwargs):
super(AcrosticProblem, self).__init__(name, lines, **kwargs)
self._acrostic = acrostic.Acrostic(_normalize(lines))
@staticmethod
def score(lines):
if len(lines) <= 1:
return 0
indexes = _parse_indexes(lines[0])
if indexes and len(indexes) + 1 == len(lines):
return 1
# Apply heuristic.
num_words = 0
line_lengths = []
for line in lines:
num_words += line.count(' ') + 1
line_lengths.append(len(line))
max_line_length = max(line_lengths)
# Return a perfect score if:
# - There are more than 4 words.
# - Average line length is >4 letters.
# - The words have identical lengths.
# - There aren't spaces.
# Max .5 for 2 lines; .75 for 3 lines; 1.0 for 4+ lines.
num_lines_weight = min(4, len(lines)) / 4
# Normalize by line length to punish large absolute swings in lengths and
# forgive +/- 1 character changes for already-long words.
line_stddev = statistics.stdev([l / max_line_length for l in line_lengths])
stddev_weight = 1 - line_stddev
# Punish results with multiple words per line.
line_length_weight = len(lines) / num_words
return (num_lines_weight * line_length_weight * stddev_weight *
line_length_weight)
def _solve_iter(self):
for solution, weight in self._acrostic.items():
if weight < self._solution_constraints.weight_threshold:
return
yield solution, weight
def _normalize(lines):
sets = []
if lines[0].startswith('@'):
offset = 1
indexes = _parse_indexes(lines[0])
else:
offset = 0
indexes = None
sets_permutable = False
for line in lines[offset:]:
if line.startswith('* '):
sets_permutable = True
line = line[2:]
if line.endswith('?'):
line = None
else:
line = ''.join(line.split()).lower()
sets.append(line)
return seek_set.SeekSet(
sets, sets_permutable=sets_permutable, indexes=indexes)
def _parse_indexes(line):
if not line.startswith('@'):
return None
parts = line[1:].split()
results = []
for part in parts:
if part == '?':
results.append(None)
else:
results.append(int(part))
return results
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the handler class of the simple_service_search skill."""
import logging
from pathlib import Path
from typing import cast
from unittest.mock import patch
from aea.helpers.search.models import (
Attribute,
Constraint,
ConstraintType,
DataModel,
Description,
Query,
)
from aea.protocols.dialogue.base import DialogueMessage
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.simple_service_search.dialogues import OefSearchDialogues
from packages.fetchai.skills.simple_service_search.handlers import OefSearchHandler
from packages.fetchai.skills.simple_service_search.strategy import Strategy
from tests.conftest import ROOT_DIR
class TestOefSearchHandler(BaseSkillTestCase):
"""Test oef_search handler of simple_service_search."""
path_to_skill = Path(
ROOT_DIR, "packages", "fetchai", "skills", "simple_service_search"
)
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.oef_search_handler = cast(
OefSearchHandler, cls._skill.skill_context.handlers.oef_search
)
cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)
cls.logger = cls._skill.skill_context.logger
cls.oef_search_dialogues = cast(
OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues
)
cls.data = b"some_body"
cls.query = Query(
[Constraint("some_attribute", ConstraintType("==", "some_service"))],
DataModel(
"some_name",
[
Attribute(
"some_attribute", str, False, "Some attribute descriptions."
)
],
),
)
cls.mocked_description = Description({"foo1": 1, "bar1": 2})
cls.list_of_messages = (
DialogueMessage(
OefSearchMessage.Performative.SEARCH_SERVICES, {"query": cls.query}
),
)
def test_setup(self):
"""Test the setup method of the oef_search handler."""
assert self.oef_search_handler.setup() is None
self.assert_quantity_in_outbox(0)
def test_handle_unidentified_dialogue(self):
"""Test the _handle_unidentified_dialogue method of the oef_search handler."""
# setup
incorrect_dialogue_reference = ("", "")
incoming_message = self.build_incoming_message(
message_type=OefSearchMessage,
dialogue_reference=incorrect_dialogue_reference,
performative=OefSearchMessage.Performative.OEF_ERROR,
oef_error_operation=OefSearchMessage.OefErrorOperation.REGISTER_SERVICE,
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.oef_search_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.INFO,
f"received invalid oef_search message={incoming_message}, unidentified dialogue.",
)
def test_handle_error(self):
"""Test the _handle_error method of the oef_search handler."""
# setup
oef_search_dialogue = self.prepare_skill_dialogue(
dialogues=self.oef_search_dialogues, messages=self.list_of_messages[:1],
)
incoming_message = self.build_incoming_message_for_skill_dialogue(
dialogue=oef_search_dialogue,
performative=OefSearchMessage.Performative.OEF_ERROR,
oef_error_operation=OefSearchMessage.OefErrorOperation.REGISTER_SERVICE,
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.oef_search_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.INFO,
f"received oef_search error message={incoming_message} in dialogue={oef_search_dialogue}.",
)
def test_handle_search_i(self):
"""Test the _handle_search method of the oef_search handler where the number of agents found is 0."""
# setup
agents = ("agent_1", "agent_2")
oef_search_dialogue = self.prepare_skill_dialogue(
dialogues=self.oef_search_dialogues, messages=self.list_of_messages[:1],
)
incoming_message = self.build_incoming_message_for_skill_dialogue(
dialogue=oef_search_dialogue,
performative=OefSearchMessage.Performative.SEARCH_RESULT,
agents=agents,
agents_info=OefSearchMessage.AgentsInfo(
{
"agent_1": {"key_1": "value_1", "key_2": "value_2"},
"agent_2": {"key_3": "value_3", "key_4": "value_4"},
}
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.oef_search_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.INFO,
f"found number of agents={len(agents)}, search_response={incoming_message}",
)
assert self.skill.skill_context._agent_context.shared_state[
self.strategy.shared_storage_key
] == set(agents)
def test_handle_search_ii(self):
"""Test the _handle_search method of the oef_search handler where the number of agents found is 0."""
# setup
agents = tuple()
oef_search_dialogue = self.prepare_skill_dialogue(
dialogues=self.oef_search_dialogues, messages=self.list_of_messages[:1],
)
incoming_message = self.build_incoming_message_for_skill_dialogue(
dialogue=oef_search_dialogue,
performative=OefSearchMessage.Performative.SEARCH_RESULT,
agents=agents,
agents_info=OefSearchMessage.AgentsInfo(
{
"agent_1": {"key_1": "value_1", "key_2": "value_2"},
"agent_2": {"key_3": "value_3", "key_4": "value_4"},
}
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.oef_search_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.INFO, f"no agents found, search_response={incoming_message}",
)
def test_handle_invalid(self):
"""Test the _handle_invalid method of the oef_search handler."""
# setup
incoming_message = self.build_incoming_message(
message_type=OefSearchMessage,
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
service_description=self.mocked_description,
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.oef_search_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.WARNING,
f"cannot handle oef_search message of performative={incoming_message.performative} in dialogue={self.oef_search_dialogues.get_dialogue(incoming_message)}.",
)
def test_teardown(self):
"""Test the teardown method of the oef_search handler."""
assert self.oef_search_handler.teardown() is None
self.assert_quantity_in_outbox(0)
|
const {
GraphQLID,
GraphQLObjectType,
GraphQLString,
GraphQLNonNull
} = require('graphql')
const MeType = new GraphQLObjectType({
name: 'Me',
fields: () => {
return {
id: { type: GraphQLNonNull(GraphQLID) },
email: { type: GraphQLNonNull(GraphQLString) },
username: { type: GraphQLNonNull(GraphQLString) },
apiKey: { type: GraphQLNonNull(GraphQLString) }
}
}
})
module.exports = MeType
|
/*
* (c) Copyright IBM Corp. 2021
* (c) Copyright Instana Inc. and contributors 2020
*/
'use strict';
require('@instana/core').registerAdditionalInstrumentations([
require('./instrumentation/process/edgemicro'),
require('./instrumentation/process/childProcess')
]);
|
import spiceypy as spice
import numpy as np
import ale
from ale.base.type_sensor import Framer
from ale.transformation import FrameChain
from ale.rotation import TimeDependentRotation
from ale import util
class NaifSpice():
def __enter__(self):
"""
Called when the context is created. This is used
to get the kernels furnished.
"""
if self.kernels:
[spice.furnsh(k) for k in self.kernels]
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Called when the context goes out of scope. Once
this is done, the object is out of scope and the
kernels can be unloaded.
"""
if self.kernels:
[spice.unload(k) for k in self.kernels]
@property
def kernels(self):
if not hasattr(self, '_kernels'):
if 'kernels' in self._props.keys():
try:
self._kernels = util.get_kernels_from_isis_pvl(self._props['kernels'])
except Exception as e:
self._kernels = self._props['kernels']
else:
if not ale.spice_root:
raise EnvironmentError(f'ale.spice_root is not set, cannot search for metakernels. ale.spice_root = "{ale.spice_root}"')
search_results = util.get_metakernels(ale.spice_root, missions=self.short_mission_name, years=self.utc_start_time.year, versions='latest')
if search_results['count'] == 0:
raise ValueError(f'Failed to find metakernels. mission: {self.short_mission_name}, year:{self.utc_start_time.year}, versions="latest" spice root = "{ale.spice_root}"')
self._kernels = [search_results['data'][0]['path']]
return self._kernels
@property
def light_time_correction(self):
"""
Returns the type of light time correciton and abberation correction to
use in NAIF calls. Expects ikid to be defined. This must be the integer
Naif id code of the instrument.
This searches for the value of the NAIF keyword INS<ikid>_LIGHTTIME_CORRECTION.
If the keyword is not defined, then this defaults to light time
correction and abberation correction (LT+S).
Returns
-------
: str
The light time and abberation correction string for use in NAIF calls.
See https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/abcorr.html
for the different options available.
"""
try:
return spice.gcpool('INS{}_LIGHTTIME_CORRECTION'.format(self.ikid), 0, 1)[0]
except:
return 'LT+S'
@property
def odtx(self):
"""
Returns the x coefficient for the optical distortion model
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list
Optical distortion x coefficients
"""
return spice.gdpool('INS{}_OD_T_X'.format(self.ikid),0, 10).tolist()
@property
def odty(self):
"""
Returns the y coefficient for the optical distortion model.
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list
Optical distortion y coefficients
"""
return spice.gdpool('INS{}_OD_T_Y'.format(self.ikid), 0, 10).tolist()
@property
def odtk(self):
"""
The coefficients for the radial distortion model
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list
Radial distortion coefficients
"""
return spice.gdpool('INS{}_OD_K'.format(self.ikid),0, 3).tolist()
@property
def ikid(self):
"""
Returns the Naif ID code for the instrument
Expects the instrument_id to be defined. This must be a string containing
the short name of the instrument.
Returns
-------
: int
Naif ID used to for indentifying the instrument in Spice kernels
"""
return spice.bods2c(self.instrument_id)
@property
def spacecraft_id(self):
"""
Returns the Naif ID code for the spacecraft
Expects the spacecraft_name to be defined. This must be a string containing
the name of the spacecraft.
Returns
-------
: int
Naif ID code for the spacecraft
"""
return spice.bods2c(self.spacecraft_name)
@property
def target_id(self):
"""
Returns the Naif ID code for the target body
Expects target_name to be defined. This must be a string containig the name
of the target body.
Returns
-------
: int
Naif ID code for the target body
"""
return spice.bods2c(self.target_name)
@property
def target_frame_id(self):
"""
Returns the Naif ID code for the target reference frame
Expects the target_id to be defined. This must be the integer Naif ID code
for the target body.
Returns
-------
: int
Naif ID code for the target frame
"""
frame_info = spice.cidfrm(self.target_id)
return frame_info[0]
@property
def sensor_frame_id(self):
"""
Returns the Naif ID code for the sensor reference frame
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: int
Naif ID code for the sensor frame
"""
return self.ikid
@property
def focal2pixel_lines(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
focal plane to detector lines
"""
return list(spice.gdpool('INS{}_ITRANSL'.format(self.ikid), 0, 3))
@property
def focal2pixel_samples(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
focal plane to detector samples
"""
return list(spice.gdpool('INS{}_ITRANSS'.format(self.ikid), 0, 3))
@property
def pixel2focal_x(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
detector to focal plane x
"""
return list(spice.gdpool('INS{}_TRANSX'.format(self.ikid), 0, 3))
@property
def pixel2focal_y(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: list<double>
detector to focal plane y
"""
return list(spice.gdpool('INS{}_TRANSY'.format(self.ikid), 0, 3))
@property
def focal_length(self):
"""
Returns the focal length of the sensor
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: float
focal length
"""
return float(spice.gdpool('INS{}_FOCAL_LENGTH'.format(self.ikid), 0, 1)[0])
@property
def pixel_size(self):
"""
Expects ikid to be defined. This must be the integer Naif id code of the instrument
Returns
-------
: float pixel size
"""
return spice.gdpool('INS{}_PIXEL_SIZE'.format(self.ikid), 0, 1)[0] * 0.001
@property
def target_body_radii(self):
"""
Returns a list containing the radii of the target body
Expects target_name to be defined. This must be a string containing the name
of the target body
Returns
-------
: list<double>
Radius of all three axis of the target body
"""
rad = spice.bodvrd(self.target_name, 'RADII', 3)
return rad[1]
@property
def reference_frame(self):
"""
Returns a string containing the name of the target reference frame
Expects target_name to be defined. This must be a string containing the name
of the target body
Returns
-------
: str
String name of the target reference frame
"""
return 'IAU_{}'.format(self.target_name)
@property
def sun_position(self):
"""
Returns a tuple with information detailing the sun position at the time
of the image. Expects center_ephemeris_time to be defined. This must be
a floating point number containing the average of the start and end ephemeris time.
Expects reference frame to be defined. This must be a sring containing the name of
the target reference frame. Expects target_name to be defined. This must be
a string containing the name of the target body.
Returns
-------
: (sun_positions, sun_velocities)
a tuple containing a list of sun positions, a list of sun velocities
"""
sun_state, _ = spice.spkezr("SUN",
self.center_ephemeris_time,
self.reference_frame,
'LT+S',
self.target_name)
positions = 1000 * np.asarray([sun_state[:3]])
velocities = 1000 * np.asarray([sun_state[3:6]])
times = np.asarray([self.center_ephemeris_time])
return positions, velocities, times
@property
def sensor_position(self):
"""
Returns a tuple with information detailing the position of the sensor at the time
of the image. Expects ephemeris_time to be defined. This must be a floating point number
containing the ephemeris time. Expects spacecraft_name to be defined. This must be a
string containing the name of the spacecraft containing the sensor. Expects
reference_frame to be defined. This must be a sring containing the name of
the target reference frame. Expects target_name to be defined. This must be
a string containing the name of the target body.
Returns
-------
: (positions, velocities, times)
a tuple containing a list of positions, a list of velocities, and a list of times
"""
if not hasattr(self, '_position'):
ephem = self.ephemeris_time
pos = []
vel = []
target = self.spacecraft_name
observer = self.target_name
# Check for ISIS flag to fix target and observer swapping
if self.swap_observer_target:
target = self.target_name
observer = self.spacecraft_name
for time in ephem:
# spkezr returns a vector from the observer's location to the aberration-corrected
# location of the target. For more information, see:
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/spicelib/spkezr.html
state, _ = spice.spkezr(target,
time,
self.reference_frame,
self.light_time_correction,
observer)
if self.swap_observer_target:
pos.append(-state[:3])
vel.append(-state[3:])
else:
pos.append(state[:3])
vel.append(state[3:])
# By default, SPICE works in km, so convert to m
self._position = [p * 1000 for p in pos]
self._velocity = [v * 1000 for v in vel]
return self._position, self._velocity, self.ephemeris_time
@property
def frame_chain(self):
if not hasattr(self, '_frame_chain'):
self._frame_chain = FrameChain.from_spice(sensor_frame=self.sensor_frame_id,
target_frame=self.target_frame_id,
center_ephemeris_time=self.center_ephemeris_time,
ephemeris_times=self.ephemeris_time)
return self._frame_chain
@property
def sensor_orientation(self):
"""
Returns quaternions describing the sensor orientation. Expects ephemeris_time
to be defined. This must be a floating point number containing the
ephemeris time. Expects instrument_id to be defined. This must be a string
containing the short name of the instrument. Expects reference frame to be defined.
This must be a sring containing the name of the target reference frame.
Returns
-------
: list
Quaternions describing the orientation of the sensor
"""
if not hasattr(self, '_orientation'):
ephem = self.ephemeris_time
qua = np.empty((len(ephem), 4))
for i, time in enumerate(ephem):
# Find the rotation matrix
camera2bodyfixed = spice.pxform(self.instrument_id,
self.reference_frame,
time)
q = spice.m2q(camera2bodyfixed)
qua[i,:3] = q[1:]
qua[i,3] = q[0]
self._orientation = qua
return self._orientation.tolist()
@property
def ephemeris_start_time(self):
"""
Returns the starting ephemeris time of the image. Expects spacecraft_id to
be defined. This must be the integer Naif Id code for the spacecraft. Expects
spacecraft_clock_start_count to be defined. This must be a string
containing the start clock count of the spacecraft
Returns
-------
: double
Starting ephemeris time of the image
"""
return spice.scs2e(self.spacecraft_id, self.spacecraft_clock_start_count)
@property
def ephemeris_stop_time(self):
"""
Returns the ephemeris stop time of the image. Expects spacecraft_id to
be defined. This must be the integer Naif Id code for the spacecraft.
Expects spacecraft_clock_stop_count to be defined. This must be a string
containing the stop clock count of the spacecraft
Returns
-------
: double
Ephemeris stop time of the image
"""
return spice.scs2e(self.spacecraft_id, self.spacecraft_clock_stop_count)
@property
def detector_center_sample(self):
"""
Returns the center detector sample. Expects ikid to be defined. This should
be an integer containing the Naif Id code of the instrument.
Returns
-------
: float
Detector sample of the principal point
"""
return float(spice.gdpool('INS{}_BORESIGHT_SAMPLE'.format(self.ikid), 0, 1)[0])
@property
def detector_center_line(self):
"""
Returns the center detector line. Expects ikid to be defined. This should
be an integer containing the Naif Id code of the instrument.
Returns
-------
: float
Detector line of the principal point
"""
return float(spice.gdpool('INS{}_BORESIGHT_LINE'.format(self.ikid), 0, 1)[0])
@property
def swap_observer_target(self):
"""
Returns if the observer and target should be swapped when determining the
sensor state relative to the target. This is defined by a keyword in
ISIS IAKs. If the keyword is not defined in any loaded kernels then False
is returned.
Expects ikid to be defined. This should be an integer containing the
Naif Id code of the instrument.
"""
try:
swap = spice.gcpool('INS{}_SWAP_OBSERVER_TARGET'.format(self.ikid), 0, 1)[0]
return swap.upper() == "TRUE"
except:
return False
@property
def correct_lt_to_surface(self):
"""
Returns if light time correction should be made to the surface instead of
to the center of the body. This is defined by a keyword in ISIS IAKs.
If the keyword is not defined in any loaded kernels then False is returned.
Expects ikid to be defined. This should be an integer containing the
Naif Id code of the instrument.
"""
try:
surface_correct = spice.gcpool('INS{}_LT_SURFACE_CORRECT'.format(self.ikid), 0, 1)[0]
return surface_correct.upper() == "TRUE"
except:
return False
@property
def naif_keywords(self):
"""
Returns
-------
: dict
Dictionary of keywords and values that ISIS creates and attaches to the label
"""
if not hasattr(self, "_naif_keywords"):
self._naif_keywords = dict()
self._naif_keywords['BODY{}_RADII'.format(self.target_id)] = self.target_body_radii
self._naif_keywords['BODY_FRAME_CODE'] = self.target_frame_id
self._naif_keywords['BODY_CODE'] = self.target_id
self._naif_keywords = {**self._naif_keywords, **util.query_kernel_pool(f"*{self.ikid}*"), **util.query_kernel_pool(f"*{self.target_id}*")}
return self._naif_keywords
|
const path = require("path");
module.exports = {
extends: "../.babelrc.js",
plugins: [
[
"babel-plugin-relay",
{ artifactDirectory: path.resolve(__dirname, "./__generated__") },
],
],
};
|
const path = require('path');
const mix = require('laravel-mix');
mix.config.vue.esModule = true;
mix
.js('resources/js/app.js', 'public/js')
.sass('resources/sass/app.scss', 'public/css')
.sourceMaps();
if (mix.inProduction()) {
mix.version();
}
mix.webpackConfig({
// plugins: [
// Removemos o menu aqui
// new BundleAnalyzerPlugin()
// ],
resolve: {
extensions: ['.js', '.json', '.vue'],
alias: {
'~': path.join(__dirname, './resources/js')
}
},
output: {
chunkFilename: 'js/[name].[chunkhash].js',
}
});
|
import requests
r = requests.get('https://jenapp.appgallabs.io/microservice/')
print(r.status_code)
print(r.json())
|
class ReqParser(object):
@classmethod
def check_body(cls, req, params):
for param in params:
if param not in req:
return False
return True
@classmethod
def as_jsonlist(cls, chunk):
return list(map(lambda x: x.json() if x else None, chunk))
|
import pkg_resources
__all__ = ["VERSION", "__version__", "__short_version__"]
# single-sourcing the package version
version_file = pkg_resources.resource_filename("dftinputgen", "VERSION.txt")
with open(version_file, "r") as fr:
__version__ = fr.read().strip()
VERSION = __version__
__short_version__ = __version__.rpartition(".")[0]
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from paddle.check_import_scipy import check_import_scipy
check_import_scipy(os.name)
try:
from paddle.version import full_version as __version__
from paddle.version import commit as __git_commit__
except ImportError:
import sys
sys.stderr.write('''Warning with import paddle: you should not
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
)
import paddle.batch
batch = batch.batch
from .fluid import monkey_patch_variable
from .fluid.dygraph import monkey_patch_math_varbase
monkey_patch_variable()
monkey_patch_math_varbase()
import paddle.framework
from .framework import VarBase as Tensor
from .framework import ComplexVariable as ComplexTensor
import paddle.compat
import paddle.distributed
import paddle.sysconfig
import paddle.tensor
import paddle.distribution
import paddle.nn
import paddle.distributed.fleet
import paddle.optimizer
import paddle.metric
import paddle.device
import paddle.incubate.complex as complex
import paddle.regularizer
# TODO: define alias in tensor and framework directory
from .tensor.random import randperm
from .tensor.random import bernoulli
from .tensor.attribute import rank #DEFINE_ALIAS
from .tensor.attribute import shape #DEFINE_ALIAS
from .tensor.creation import to_tensor #DEFINE_ALIAS
from .tensor.creation import diag #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS
# from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS
from .tensor.creation import linspace #DEFINE_ALIAS
from .tensor.creation import ones #DEFINE_ALIAS
from .tensor.creation import ones_like #DEFINE_ALIAS
from .tensor.creation import zeros #DEFINE_ALIAS
from .tensor.creation import zeros_like #DEFINE_ALIAS
from .tensor.creation import arange #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import full #DEFINE_ALIAS
from .tensor.creation import full_like #DEFINE_ALIAS
from .tensor.creation import triu #DEFINE_ALIAS
from .tensor.creation import tril #DEFINE_ALIAS
from .tensor.creation import meshgrid #DEFINE_ALIAS
from .tensor.creation import empty #DEFINE_ALIAS
from .tensor.creation import empty_like #DEFINE_ALIAS
from .tensor.creation import assign #DEFINE_ALIAS
from .tensor.linalg import matmul #DEFINE_ALIAS
from .tensor.linalg import dot #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
from .tensor.linalg import norm #DEFINE_ALIAS
from .tensor.linalg import transpose #DEFINE_ALIAS
from .tensor.linalg import dist #DEFINE_ALIAS
from .tensor.linalg import t #DEFINE_ALIAS
from .tensor.linalg import cross #DEFINE_ALIAS
from .tensor.linalg import cholesky #DEFINE_ALIAS
# from .tensor.linalg import tensordot #DEFINE_ALIAS
from .tensor.linalg import bmm #DEFINE_ALIAS
from .tensor.linalg import histogram #DEFINE_ALIAS
from .tensor.linalg import mv #DEFINE_ALIAS
from .tensor.logic import equal #DEFINE_ALIAS
from .tensor.logic import greater_equal #DEFINE_ALIAS
from .tensor.logic import greater_than #DEFINE_ALIAS
from .tensor.logic import is_empty #DEFINE_ALIAS
#from .tensor.logic import isfinite #DEFINE_ALIAS
from .tensor.logic import less_equal #DEFINE_ALIAS
from .tensor.logic import less_than #DEFINE_ALIAS
from .tensor.logic import logical_and #DEFINE_ALIAS
from .tensor.logic import logical_not #DEFINE_ALIAS
from .tensor.logic import logical_or #DEFINE_ALIAS
from .tensor.logic import logical_xor #DEFINE_ALIAS
from .tensor.logic import not_equal #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import equal_all #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS
from .tensor.logic import is_tensor #DEFINE_ALIAS
from .tensor.manipulation import cast #DEFINE_ALIAS
from .tensor.manipulation import concat #DEFINE_ALIAS
from .tensor.manipulation import expand #DEFINE_ALIAS
from .tensor.manipulation import broadcast_to #DEFINE_ALIAS
from .tensor.manipulation import expand_as #DEFINE_ALIAS
from .tensor.manipulation import tile #DEFINE_ALIAS
from .tensor.manipulation import flatten #DEFINE_ALIAS
from .tensor.manipulation import gather #DEFINE_ALIAS
from .tensor.manipulation import gather_nd #DEFINE_ALIAS
from .tensor.manipulation import reshape #DEFINE_ALIAS
from .tensor.manipulation import flip as reverse #DEFINE_ALIAS
from .tensor.manipulation import scatter #DEFINE_ALIAS
from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS
from .tensor.manipulation import scatter_nd #DEFINE_ALIAS
from .tensor.manipulation import shard_index #DEFINE_ALIAS
from .tensor.manipulation import slice #DEFINE_ALIAS
from .tensor.manipulation import split #DEFINE_ALIAS
from .tensor.manipulation import squeeze #DEFINE_ALIAS
from .tensor.manipulation import stack #DEFINE_ALIAS
from .tensor.manipulation import strided_slice #DEFINE_ALIAS
from .tensor.manipulation import transpose #DEFINE_ALIAS
from .tensor.manipulation import unique #DEFINE_ALIAS
from .tensor.manipulation import unsqueeze #DEFINE_ALIAS
from .tensor.manipulation import unstack #DEFINE_ALIAS
from .tensor.manipulation import flip #DEFINE_ALIAS
from .tensor.manipulation import unbind #DEFINE_ALIAS
from .tensor.manipulation import roll #DEFINE_ALIAS
from .tensor.manipulation import chunk #DEFINE_ALIAS
from .tensor.math import abs #DEFINE_ALIAS
from .tensor.math import acos #DEFINE_ALIAS
from .tensor.math import asin #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import ceil #DEFINE_ALIAS
from .tensor.math import cos #DEFINE_ALIAS
from .tensor.math import cosh #DEFINE_ALIAS
from .tensor.math import cumsum #DEFINE_ALIAS
# from .tensor.math import elementwise_add #DEFINE_ALIAS
# from .tensor.math import elementwise_div #DEFINE_ALIAS
# from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
# from .tensor.math import elementwise_mod #DEFINE_ALIAS
# from .tensor.math import elementwise_pow #DEFINE_ALIAS
# from .tensor.math import elementwise_sub #DEFINE_ALIAS
from .tensor.math import exp #DEFINE_ALIAS
from .tensor.math import floor #DEFINE_ALIAS
from .tensor.math import increment #DEFINE_ALIAS
from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import log2 #DEFINE_ALIAS
from .tensor.math import log10 #DEFINE_ALIAS
from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS
from .tensor.math import reciprocal #DEFINE_ALIAS
# from .tensor.math import reduce_max #DEFINE_ALIAS
# from .tensor.math import reduce_min #DEFINE_ALIAS
# from .tensor.math import reduce_prod #DEFINE_ALIAS
# from .tensor.math import reduce_sum #DEFINE_ALIAS
from .tensor.math import all #DEFINE_ALIAS
from .tensor.math import any #DEFINE_ALIAS
from .tensor.math import round #DEFINE_ALIAS
from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS
from .tensor.math import sign #DEFINE_ALIAS
from .tensor.math import sin #DEFINE_ALIAS
from .tensor.math import sinh #DEFINE_ALIAS
from .tensor.math import sqrt #DEFINE_ALIAS
from .tensor.math import square #DEFINE_ALIAS
from .tensor.math import stanh #DEFINE_ALIAS
from .tensor.math import sum #DEFINE_ALIAS
from .tensor.math import tanh #DEFINE_ALIAS
from .tensor.math import add_n #DEFINE_ALIAS
from .tensor.math import max #DEFINE_ALIAS
from .tensor.math import maximum #DEFINE_ALIAS
from .tensor.math import min #DEFINE_ALIAS
from .tensor.math import minimum #DEFINE_ALIAS
from .tensor.math import mm #DEFINE_ALIAS
from .tensor.math import divide #DEFINE_ALIAS
from .tensor.math import floor_divide #DEFINE_ALIAS
from .tensor.math import remainder #DEFINE_ALIAS
from .tensor.math import mod #DEFINE_ALIAS
from .tensor.math import floor_mod #DEFINE_ALIAS
from .tensor.math import multiply #DEFINE_ALIAS
from .tensor.math import add #DEFINE_ALIAS
from .tensor.math import subtract #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import logsumexp #DEFINE_ALIAS
from .tensor.math import inverse #DEFINE_ALIAS
from .tensor.math import log1p #DEFINE_ALIAS
from .tensor.math import erf #DEFINE_ALIAS
# from .tensor.math import addcmul #DEFINE_ALIAS
from .tensor.math import addmm #DEFINE_ALIAS
from .tensor.math import clip #DEFINE_ALIAS
from .tensor.math import trace #DEFINE_ALIAS
from .tensor.math import kron #DEFINE_ALIAS
from .tensor.math import isfinite #DEFINE_ALIAS
from .tensor.math import isinf #DEFINE_ALIAS
from .tensor.math import isnan #DEFINE_ALIAS
from .tensor.math import prod #DEFINE_ALIAS
from .tensor.math import broadcast_shape #DEFINE_ALIAS
from .tensor.random import multinomial #DEFINE_ALIAS
from .tensor.random import standard_normal
from .tensor.random import normal
from .tensor.random import uniform #DEFINE_ALIAS
from .tensor.random import randn #DEFINE_ALIAS
from .tensor.random import rand #DEFINE_ALIAS
from .tensor.random import randint #DEFINE_ALIAS
from .tensor.random import randperm #DEFINE_ALIAS
from .tensor.search import argmax #DEFINE_ALIAS
from .tensor.search import argmin #DEFINE_ALIAS
from .tensor.search import argsort #DEFINE_ALIAS
# from .tensor.search import has_inf #DEFINE_ALIAS
# from .tensor.search import has_nan #DEFINE_ALIAS
from .tensor.search import masked_select #DEFINE_ALIAS
from .tensor.search import topk #DEFINE_ALIAS
from .tensor.search import where #DEFINE_ALIAS
from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS
from .tensor.to_string import set_printoptions #DEFINE_ALIAS
from .framework.random import seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS
# from .framework import create_global_var #DEFINE_ALIAS
from .framework import create_parameter #DEFINE_ALIAS
from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS
from .framework import CUDAPinnedPlace #DEFINE_ALIAS
from .framework import grad #DEFINE_ALIAS
from .framework import no_grad #DEFINE_ALIAS
from .framework import save #DEFINE_ALIAS
from .framework import load #DEFINE_ALIAS
from .framework import DataParallel #DEFINE_ALIAS
from .framework import set_default_dtype #DEFINE_ALIAS
from .framework import get_default_dtype #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS
# from .tensor.stat import reduce_mean #DEFINE_ALIAS
from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS
# from .fluid.data import data
from .tensor.stat import numel #DEFINE_ALIAS
from .tensor.stat import median #DEFINE_ALIAS
from .device import get_cudnn_version
from .device import set_device
from .device import get_device
from .device import is_compiled_with_cuda #DEFINE_ALIAS
from .device import is_compiled_with_xpu
from .device import XPUPlace
# from .tensor.tensor import Tensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensorArray #DEFINE_ALIAS
from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS
from .fluid.layers import crop_tensor as crop #DEFINE_ALIAS
from . import jit
from . import static
from . import amp
from . import onnx
# high-level api
from .hapi import Model
from .hapi import callbacks
from .hapi import summary
import paddle.text
import paddle.vision
disable_static()
|
var searchData=
[
['width_0',['width',['../classwte_1_1cmp_1_1hitbox.html#af23b8b5f3393b47f659aeebc2b2960ff',1,'wte::cmp::hitbox']]]
];
|
import os
import select
import signal
import sys
import uuid
from django_spring.config import Config
from django_spring.utils.logger import get_logger, TERM_COLORS
from django_spring.utils.socket_data import (
closing,
connect,
fd_redirect_list,
write_json,
)
class Client(object):
def __init__(self, data_path, ctl_path, app_env):
self.log = get_logger("[CLIENT]")
self.data_path = data_path
self.ctl_path = ctl_path
self.app_env = app_env
self.client_id = str(uuid.uuid1())
def _redirect_until_socket_breaks(self, redirect_map, ignore_sigint=False):
read_sizes = {sys.stdin: 1}
while True:
try:
ins, _, _ = select.select(redirect_map.keys(), [], [])
if not fd_redirect_list(ins, redirect_map, read_sizes):
break
except KeyboardInterrupt:
if not ignore_sigint:
raise
def run(self, cmd):
# unbuffered STDIN
sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", 0)
data_sock = connect(self.data_path)
ctl_sock = connect(self.ctl_path)
with closing(data_sock), closing(ctl_sock):
redirect_map = {data_sock: sys.stdout, sys.stdin: data_sock}
try:
write_json(
{
"command": cmd,
"app_env": self.app_env,
"client_id": self.client_id,
},
data_sock,
)
self._redirect_until_socket_breaks(redirect_map)
except KeyboardInterrupt:
write_json(
{
"command_ctl": "QUIT",
"signal": signal.SIGINT,
"app_env": self.app_env,
"client_id": self.client_id,
},
ctl_sock,
)
self._redirect_until_socket_breaks(redirect_map, ignore_sigint=True)
def start_client():
app_env = "test" if sys.argv[1] == "test" else "dev"
client = Client(
data_path=Config.MANAGER_SOCK_FILE,
ctl_path=Config.MANAGER_CTL_SOCK_FILE,
app_env=app_env,
)
try:
client.run(" ".join(sys.argv[1:]))
except ConnectionRefusedError:
print(
"{}Can't connect to the spring server, please run: `spring start`{}".format(
TERM_COLORS["YELLOW"], TERM_COLORS["RESET"]
)
)
if __name__ == "__main__":
start_client()
|