text
stringlengths 3
1.05M
|
|---|
!function(e){function r(r){for(var n,a,u=r[0],i=r[1],f=r[2],s=0,p=[];s<u.length;s++)o[a=u[s]]&&p.push(o[a][0]),o[a]=0;for(n in i)Object.prototype.hasOwnProperty.call(i,n)&&(e[n]=i[n]);for(l&&l(r);p.length;)p.shift()();return c.push.apply(c,f||[]),t()}function t(){for(var e,r=0;r<c.length;r++){for(var t=c[r],n=!0,u=1;u<t.length;u++)0!==o[t[u]]&&(n=!1);n&&(c.splice(r--,1),e=a(a.s=t[0]))}return e}var n={},o={8:0},c=[];function a(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,a),t.l=!0,t.exports}a.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise(function(r,n){t=o[e]=[r,n]});r.push(t[2]=n);var c=document.getElementsByTagName("head")[0],u=document.createElement("script");u.charset="utf-8",u.timeout=120,a.nc&&u.setAttribute("nonce",a.nc),u.src=function(e){return a.p+""+({0:"common"}[e]||e)+"."+{0:"96dcdf024ec74b5c3d1f",1:"2daa0e34c00465c6c327",2:"a9539b67f22b8909b229",3:"3979fcbf26952c199945",4:"c456cfabe1bffd172751",5:"c486acb4dd585deb55bb",6:"9b34bb0a9a046ef0033e",7:"3b0ba0ee8fe1bd7b590f"}[e]+".js"}(e);var i=setTimeout(function(){f({type:"timeout",target:u})},12e4);function f(r){u.onerror=u.onload=null,clearTimeout(i);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),c=r&&r.target&&r.target.src,a=new Error("Loading chunk "+e+" failed.\n("+n+": "+c+")");a.type=n,a.request=c,t[1](a)}o[e]=void 0}}u.onerror=u.onload=f,c.appendChild(u)}return Promise.all(r)},a.m=e,a.c=n,a.d=function(e,r,t){a.o(e,r)||Object.defineProperty(e,r,{configurable:!1,enumerable:!0,get:t})},a.r=function(e){Object.defineProperty(e,"__esModule",{value:!0})},a.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return a.d(r,"a",r),r},a.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},a.p="",a.oe=function(e){throw console.error(e),e};var u=window.webpackJsonp=window.webpackJsonp||[],i=u.push.bind(u);u.push=r,u=u.slice();for(var f=0;f<u.length;f++)r(u[f]);var l=i;t()}([]);
|
window.peopleAlsoBoughtJSON = [{"asin":"B01GS5YY0W","authors":"Andy Remic","cover":"61IM1zsgXnL","length":"4 hrs and 52 mins","narrators":"Tim Gerard Reynolds","title":"Return of Souls"},{"asin":"B07VYMVXK6","authors":"Brandon Sanderson","cover":"51QNIbZh6XL","length":"14 hrs and 30 mins","narrators":"Suzy Jackson","title":"Starsight"},{"asin":"B01HBURX4M","authors":"Shawn Speakman","cover":"51qDeoyt3tL","length":"20 hrs and 55 mins","narrators":"Dick Hill, Tim Gerard Reynolds, Nick Podehl, and others","title":"Unbound"},{"asin":"B014LL6R5U","authors":"Joe Abercrombie","cover":"61Wl40L+SXL","length":"22 hrs and 15 mins","narrators":"Steven Pacey","title":"The Blade Itself"},{"asin":"B016N7P4OU","authors":"Joe Abercrombie","cover":"51CF4PEn7NL","length":"26 hrs and 29 mins","narrators":"Steven Pacey","title":"Best Served Cold"},{"asin":"B00478WP40","authors":"Erich Maria Remarque","cover":"51rYcvto30L","length":"6 hrs and 55 mins","narrators":"Frank Muller","title":"All Quiet on the Western Front"},{"asin":"B00656GCH8","authors":"Robert R. McCammon","cover":"51D-m7RkQwL","length":"34 hrs and 19 mins","narrators":"Tom Stechschulte","title":"Swan Song"},{"asin":"B00K1EMZ44","authors":"Robert R. McCammon","cover":"51j2EcXsEzL","length":"22 hrs and 18 mins","narrators":"Simon Prebble","title":"The Wolf's Hour"},{"asin":"B07J9MPF8N","authors":"Nicholas Carey","cover":"41K0tBfk5ZL","length":"3 hrs and 37 mins","narrators":"Christopher Graybill","title":"The Carnival of the Night"},{"asin":"B07GZD5J69","authors":"T. E. Grau","cover":"61DsWcsrz6L","length":"5 hrs and 54 mins","narrators":"Larry Herron","title":"I Am the River"},{"asin":"B005CQTZV2","authors":"Jonathan Maberry","cover":"51sVzzaWfCL","length":"14 hrs and 17 mins","narrators":"Tom Weiner","subHeading":"The Pine Deep Trilogy, Book 1","title":"Ghost Road Blues"},{"asin":"B014JWRJAS","authors":"Jonathan Maberry","cover":"51CqcEMfPlL","length":"13 hrs and 48 mins","narrators":"Ray Porter","title":"Deadlands: Ghostwalkers"},{"asin":"B07CGZZVG9","authors":"C. Robert Cargill","cover":"51p9RDtlSPL","length":"9 hrs and 10 mins","narrators":"James Patrick Cronin, Vikas Adam","title":"We Are Where the Nightmares Go and Other Stories"},{"asin":"B00JV2SVL8","authors":"Daniel H. Wilson","cover":"510jLbHMc2L","length":"15 hrs and 56 mins","narrators":"MacLeod Andrews, Emily Rankin, Mike Chamberlain","subHeading":"A Novel","title":"Robogenesis"},{"asin":"B01N8PPUR7","authors":"Daniel Arenson","cover":"51IMTrnEh6L","length":"8 hrs and 19 mins","narrators":"Jeffrey Kafer","subHeading":"Earthrise, Book 1","title":"Earth Alone"},{"asin":"B003FNHACA","authors":"Robert Jackson Bennett","cover":"61vSAdGorWL","length":"9 hrs and 49 mins","narrators":"T. Ryder Smith","title":"Mr. Shivers"},{"asin":"B00C4YO2ZE","authors":"Jay Kristoff","cover":"51BMVSbOkFL","length":"14 hrs and 47 mins","narrators":"Jennifer Ikeda","subHeading":"The Lotus War, Book One","title":"Stormdancer"},{"asin":"B079C3TLXR","authors":"Adam Nevill","cover":"518mMHFDnKL","length":"11 hrs and 35 mins","narrators":"Matthew Lloyd Davies","title":"The Ritual"}];
window.bookSummaryJSON = "<p>He signed up to fight with visions of honor and glory, of fighting for king and country, of making his family proud at long last. But on a battlefield during the Great War, Robert Jones is shot and wonders how it all went so very wrong and how things could possibly get any worse. </p> <p>He'll soon find out. When the attacking enemy starts to shape-shift into a nightmarish demonic force, Jones finds himself fighting an impossible war against an enemy that shouldn't exist. </p> <p><i>A Song for No Man's Land<i> is the first in an ongoing series. </i></i></p>";
|
import smart_imports
smart_imports.all()
class TestRequestsBase(utils_testcase.TestCase):
def setUp(self):
super(TestRequestsBase, self).setUp()
game_logic.create_test_map()
forum_category = forum_models.Category.objects.create(caption='category-1', slug='category-1')
forum_models.SubCategory.objects.create(caption=conf.settings.FORUM_CATEGORY_UID,
uid=conf.settings.FORUM_CATEGORY_UID,
category=forum_category)
self.assertEqual(forum_models.Thread.objects.all().count(), 0)
self.user = self.accounts_factory.create_account()
self.editor = self.accounts_factory.create_account()
self.news1 = logic.create_news(caption='news1-caption', description='news1-description', content='news1-content')
self.news2 = logic.create_news(caption='news2-caption', description='news2-description', content='news2-content')
self.news3 = logic.create_news(caption='news3-caption', description='news3-description', content='news3-content')
group_edit = utils_permissions.sync_group('edit news', ['news.edit_news'])
group_edit.user_set.add(self.editor._model)
def create_news(self, index):
return logic.create_news(caption='caption-%d' % index, description='description-%d' % index, content='content-%d' % index)
class TestIndexRequests(TestRequestsBase):
def test_index_page(self):
texts = []
for i in range(1, 4):
texts.extend(['news%d-caption' % i,
'news%d-description' % i,
('news%d-content' % i, 0)])
self.check_html_ok(self.client.get(dext_urls.url('news:')), texts=texts)
def test_second_page(self):
for i in range(conf.settings.NEWS_ON_PAGE):
self.create_news(i)
first_page_texts = []
for i in range(conf.settings.NEWS_ON_PAGE):
first_page_texts.extend([('caption-%d' % i, 1), ('description-%d' % i, 1)])
self.check_html_ok(self.request_html(dext_urls.url('news:', page=1)), texts=first_page_texts)
self.check_html_ok(self.request_html(dext_urls.url('news:', page=2)), texts=[('news1-caption', 1), ('news1-description', 1),
('news2-caption', 1), ('news2-description', 1),
('news3-caption', 1), ('news3-description', 1)])
def test_big_page_number(self):
self.check_redirect(dext_urls.url('news:') + '?page=666', dext_urls.url('news:') + '?page=1')
class TestFeedRequests(TestRequestsBase):
def test_feed_page(self):
models.News.objects.filter(id=self.news1.id).update(created_at=self.news1.created_at - datetime.timedelta(seconds=conf.settings.FEED_ITEMS_DELAY + 1))
models.News.objects.filter(id=self.news3.id).update(created_at=self.news3.created_at - datetime.timedelta(seconds=conf.settings.FEED_ITEMS_DELAY + 1))
texts = [('news1-caption', 1),
('news1-description', 0),
('news1-content', 1),
('news2-caption', 0), # not pass throught time limit
('news2-description', 0),
('news2-content', 0),
('news3-caption', 1),
('news3-description', 0),
('news3-content', 1)]
self.check_html_ok(self.request_html(dext_urls.url('news:feed')), texts=texts, content_type='application/atom+xml')
class TestShowRequests(TestRequestsBase):
def test_show_page(self):
self.check_html_ok(self.client.get(dext_urls.url('news:show', self.news1.id)), texts=(('news1-caption', 4), # third caption in addthis widget
('news1-description', 1), # description in addthis widget
('news1-content', 1),
('pgf-forum-block', 0),))
class TestNewRequests(TestRequestsBase):
def test_no_rights(self):
self.check_redirect(dext_urls.url('news:new'), accounts_logic.login_page_url(dext_urls.url('news:new')))
self.request_login(self.user.email)
self.check_html_ok(self.request_html(dext_urls.url('news:new')), texts=['news.no_edit_rights'])
def test_success(self):
self.request_login(self.editor.email)
self.check_html_ok(self.request_html(dext_urls.url('news:new')), texts=[('news.no_edit_rights', 0)])
class TestCreateRequests(TestRequestsBase):
def test_no_rights(self):
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:create'), {}), 'common.login_required')
self.request_login(self.user.email)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:create'), {}),
'news.no_edit_rights')
def test_form_errors(self):
self.request_login(self.editor.email)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:create'), {}),
'form_errors')
def test_success(self):
self.request_login(self.editor.email)
with self.check_delta(models.News.objects.all().count, 1):
self.check_ajax_ok(self.post_ajax_json(dext_urls.url('news:create'), {'caption': 'new-news-caption',
'description': 'new-news-description',
'content': 'new-news-content'}))
last_news = logic.load_last_news()
self.assertEqual(last_news.caption, 'new-news-caption')
self.assertEqual(last_news.description, 'new-news-description')
self.assertEqual(last_news.content, 'new-news-content')
class TestEditRequests(TestRequestsBase):
def test_no_rights(self):
self.check_redirect(dext_urls.url('news:edit', self.news1.id), accounts_logic.login_page_url(dext_urls.url('news:edit', self.news1.id)))
self.request_login(self.user.email)
self.check_html_ok(self.request_html(dext_urls.url('news:edit', self.news1.id)), texts=['news.no_edit_rights'])
def test_success(self):
self.request_login(self.editor.email)
self.check_html_ok(self.request_html(dext_urls.url('news:edit', self.news1.id)), texts=[('news.no_edit_rights', 0)])
def test_no_item(self):
self.request_login(self.editor.email)
self.check_html_ok(self.request_html(dext_urls.url('news:edit', 666)), texts=[('news.no_edit_rights', 0)])
class TestSendMailsRequests(TestRequestsBase):
def test_no_rights(self):
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:send-mails', self.news1.id), {}), 'common.login_required')
self.request_login(self.user.email)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:send-mails', self.news1.id), {}),
'news.no_edit_rights')
def test_success(self):
self.request_login(self.editor.email)
with self.check_delta(post_service_models.Message.objects.count, 1):
self.check_ajax_ok(self.post_ajax_json(dext_urls.url('news:send-mails', self.news1.id)))
last_message = post_service_prototypes.MessagePrototype._db_latest()
self.assertEqual(last_message.handler.news_id, self.news1.id)
news = logic.load_news(self.news1.id)
self.assertTrue(news.emailed.is_EMAILED)
def test_restricted(self):
self.request_login(self.editor.email)
self.news1.emailed = relations.EMAILED_STATE.random(exclude=(relations.EMAILED_STATE.NOT_EMAILED,))
logic.save_news(self.news1)
with self.check_not_changed(post_service_models.Message.objects.count):
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:send-mails', self.news1.id)),
'wrong_mail_state')
class TestDisableSendMailsRequests(TestRequestsBase):
def test_no_rights(self):
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:disable-send-mails', self.news1.id), {}), 'common.login_required')
self.request_login(self.user.email)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:disable-send-mails', self.news1.id), {}),
'news.no_edit_rights')
def test_success(self):
self.request_login(self.editor.email)
self.check_ajax_ok(self.post_ajax_json(dext_urls.url('news:disable-send-mails', self.news1.id)))
news = logic.load_news(self.news1.id)
self.assertTrue(news.emailed.is_DISABLED)
def test_restricted(self):
self.request_login(self.editor.email)
self.news1.emailed = relations.EMAILED_STATE.random(exclude=(relations.EMAILED_STATE.NOT_EMAILED,))
logic.save_news(self.news1)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:disable-send-mails', self.news1.id)),
'wrong_mail_state')
class TestUpdateRequests(TestRequestsBase):
def test_no_rights(self):
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:update', self.news1.id), {}), 'common.login_required')
self.request_login(self.user.email)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:update', self.news1.id), {}),
'news.no_edit_rights')
def test_form_errors(self):
self.request_login(self.editor.email)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:update', self.news1.id), {}),
'form_errors')
news = logic.load_news(self.news1.id)
self.assertEqual(news.caption, self.news1.caption)
self.assertEqual(news.description, self.news1.description)
self.assertEqual(news.content, self.news1.content)
def test_success(self):
self.request_login(self.editor.email)
with self.check_delta(models.News.objects.all().count, 0):
self.check_ajax_ok(self.post_ajax_json(dext_urls.url('news:update', self.news1.id),
{'caption': 'updated-news-caption',
'description': 'updated-news-description',
'content': 'updated-news-content'}))
news = logic.load_news(self.news1.id)
self.assertEqual(news.caption, 'updated-news-caption')
self.assertEqual(news.description, 'updated-news-description')
self.assertEqual(news.content, 'updated-news-content')
class TestPostOnForumRequests(TestRequestsBase):
def test_post_on_forum_success(self):
self.request_login(self.editor.email)
response = self.post_ajax_json(dext_urls.url('news:publish-on-forum', self.news1.id))
self.assertEqual(forum_models.Thread.objects.all().count(), 1)
thread = forum_models.Thread.objects.all()[0]
self.check_ajax_ok(response, data={'next_url': dext_urls.url('forum:threads:show', thread.id)})
self.check_html_ok(self.client.get(dext_urls.url('forum:threads:show', thread.id)), texts=(('news1-caption', 6),
('news1-description', 0),
('news1-content', 1)))
self.check_html_ok(self.client.get(dext_urls.url('news:show', self.news1.id)), texts=(('pgf-forum-link', 1),
('pgf-forum-block', 1),))
self.check_html_ok(self.client.get(dext_urls.url('news:')), texts=(('pgf-forum-link', 1), ))
def test_post_on_forum_unloggined(self):
self.check_redirect(dext_urls.url('news:publish-on-forum', self.news1.id),
accounts_logic.login_page_url(dext_urls.url('news:publish-on-forum', self.news1.id)))
self.assertEqual(forum_models.Thread.objects.all().count(), 0)
self.check_html_ok(self.client.get(dext_urls.url('news:show', self.news1.id)), texts=(('pgf-forum-link', 0), ))
self.check_html_ok(self.client.get(dext_urls.url('news:')), texts=(('pgf-forum-link', 0), ))
def test_post_on_forum_unexisting_category(self):
self.request_login(self.editor.email)
forum_models.SubCategory.objects.all().delete()
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:publish-on-forum', self.news1.id)),
'forum_category_not_exists')
self.assertEqual(forum_models.Thread.objects.all().count(), 0)
self.check_html_ok(self.client.get(dext_urls.url('news:show', self.news1.id)), texts=(('pgf-forum-link', 0), ))
self.check_html_ok(self.client.get(dext_urls.url('news:')), texts=(('pgf-forum-link', 0), ))
def test_post_on_forum_already_publish(self):
self.request_login(self.editor.email)
self.post_ajax_json(dext_urls.url('news:publish-on-forum', self.news1.id))
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:publish-on-forum', self.news1.id)),
'forum_thread_already_exists')
def test_post_on_forum__no_editor_rights(self):
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:publish-on-forum', self.news1.id)),
'common.login_required')
self.request_login(self.user.email)
self.check_ajax_error(self.post_ajax_json(dext_urls.url('news:publish-on-forum', self.news1.id)),
'news.no_edit_rights')
|
'use strict'
/* ------------------------------------------------------------------------
NB: initially, I used objects for options passing:
decimalToPrecision ('123.456', { digits: 2, round: true, afterPoint: true })
...but it turns out it's hard to port that across different languages and it is also
probably has a performance penalty -- while it's a performance critical code! So
I switched to using named constants instead, as it is actually more readable and
succinct, and surely doesn't come with any inherent performance downside:
decimalToPrecision ('123.456', ROUND, 2, DECIMAL_PLACES) */
const ROUND = 0 // rounding mode
, TRUNCATE = 1
, ROUND_UP = 2
, ROUND_DOWN = 3
const DECIMAL_PLACES = 0 // digits counting mode
, SIGNIFICANT_DIGITS = 1
, TICK_SIZE = 2
const NO_PADDING = 0 // zero-padding mode
, PAD_WITH_ZERO = 1
const precisionConstants = {
ROUND,
TRUNCATE,
ROUND_UP,
ROUND_DOWN,
DECIMAL_PLACES,
SIGNIFICANT_DIGITS,
TICK_SIZE,
NO_PADDING,
PAD_WITH_ZERO,
}
/* ------------------------------------------------------------------------ */
// See https://stackoverflow.com/questions/1685680/how-to-avoid-scientific-notation-for-large-numbers-in-javascript for discussion
function numberToString (x) { // avoids scientific notation for too large and too small numbers
if (typeof x === 'string') return x
const s = x.toString ()
if (Math.abs (x) < 1.0) {
const e = parseInt (s.split ('e-')[1])
const neg = (s[0] === '-')
if (e) {
x *= Math.pow (10, e - 1)
x = (neg ? '-' : '') + '0.' + (new Array (e)).join ('0') + x.toString ().substring (neg ? 3 : 2)
return x
}
} else {
const parts = s.split ('e')
if (parts[1]) {
let e = parseInt (parts[1])
const m = parts[0].split ('.')
if (m[1]) {
e -= m[1].length
}
return m[0] + m[1] + (new Array (e + 1)).join ('0')
}
}
return s;
}
//-----------------------------------------------------------------------------
// expects non-scientific notation
const truncate_regExpCache = []
, truncate_to_string = (num, precision = 0) => {
num = numberToString (num)
if (precision > 0) {
const re = truncate_regExpCache[precision] || (truncate_regExpCache[precision] = new RegExp ("([-]*\\d+\\.\\d{" + precision + "})(\\d)"))
const [ , result] = num.toString ().match (re) || [null, num]
return result.toString ()
}
return parseInt (num).toString ()
}
, truncate = (num, precision = 0) => parseFloat (truncate_to_string (num, precision))
function precisionFromString (string) {
const split = string.replace (/0+$/g, '').split ('.')
return (split.length > 1) ? (split[1].length) : 0
}
/* ------------------------------------------------------------------------ */
const decimalToPrecision = (x, roundingMode
, numPrecisionDigits
, countingMode = DECIMAL_PLACES
, paddingMode = NO_PADDING) => {
if (numPrecisionDigits < 0) {
if (countingMode === TICK_SIZE) {
throw new Error (`TICK_SIZE cant be used with negative numPrecisionDigits`)
}
const toNearest = Math.pow (10, -numPrecisionDigits)
if (roundingMode === ROUND) {
return (toNearest * decimalToPrecision (x / toNearest, roundingMode, 0, countingMode, paddingMode)).toString ()
}
if (roundingMode === TRUNCATE) {
return (x - (x % toNearest)).toString ()
}
}
/* handle tick size */
if (countingMode === TICK_SIZE) {
const precisionDigitsString = decimalToPrecision (numPrecisionDigits, ROUND, 100, DECIMAL_PLACES, NO_PADDING)
const newNumPrecisionDigits = precisionFromString (precisionDigitsString)
let missing = x % numPrecisionDigits
// See: https://github.com/ccxt/ccxt/pull/6486
missing = Number (decimalToPrecision (missing, ROUND, 8, DECIMAL_PLACES, NO_PADDING));
const fpError = decimalToPrecision (missing / numPrecisionDigits, ROUND, Math.max (newNumPrecisionDigits, 8), DECIMAL_PLACES, NO_PADDING)
if (precisionFromString (fpError) !== 0) {
if (roundingMode === ROUND) {
if (x > 0) {
if (missing >= numPrecisionDigits / 2) {
x = x - missing + numPrecisionDigits
} else {
x = x - missing
}
} else {
if (missing >= numPrecisionDigits / 2) {
x = Number (x) - missing
} else {
x = Number (x) - missing - numPrecisionDigits
}
}
} else if (roundingMode === TRUNCATE) {
x = x - missing
}
}
return decimalToPrecision (x, ROUND, newNumPrecisionDigits, DECIMAL_PLACES, paddingMode);
}
/* Convert to a string (if needed), skip leading minus sign (if any) */
const str = numberToString (x)
, isNegative = str[0] === '-'
, strStart = isNegative ? 1 : 0
, strEnd = str.length
/* Find the dot position in the source buffer */
for (var strDot = 0; strDot < strEnd; strDot++)
if (str[strDot] === '.')
break
const hasDot = strDot < str.length
/* Char code constants */
const MINUS = 45
, DOT = 46
, ZERO = 48
, ONE = (ZERO + 1)
, FIVE = (ZERO + 5)
, NINE = (ZERO + 9)
/* For -123.4567 the `chars` array will hold 01234567 (leading zero is reserved for rounding cases when 099 → 100) */
const chars = new Uint8Array ((strEnd - strStart) + (hasDot ? 0 : 1))
chars[0] = ZERO
/* Validate & copy digits, determine certain locations in the resulting buffer */
let afterDot = chars.length
, digitsStart = -1 // significant digits
, digitsEnd = -1
for (var i = 1, j = strStart; j < strEnd; j++, i++) {
const c = str.charCodeAt (j)
if (c === DOT) {
afterDot = i--
} else if ((c < ZERO) || (c > NINE)) {
throw new Error (`${str}: invalid number (contains an illegal character '${str[i - 1]}')`)
} else {
chars[i] = c
if ((c !== ZERO) && (digitsStart < 0)) digitsStart = i
}
}
if (digitsStart < 0) digitsStart = 1
/* Determine the range to cut */
let precisionStart = (countingMode === DECIMAL_PLACES) ? afterDot // 0.(0)001234567
: digitsStart // 0.00(1)234567
, precisionEnd = precisionStart +
numPrecisionDigits
/* Reset the last significant digit index, as it will change during the rounding/truncation. */
digitsEnd = -1
/* Perform rounding/truncation per digit, from digitsEnd to digitsStart, by using the following
algorithm (rounding 999 → 1000, as an example):
step = i=3 i=2 i=1 i=0
chars = 0999 0999 0900 1000
memo = ---0 --1- -1-- 0--- */
let allZeros = true;
let signNeeded = isNegative;
for (let i = chars.length - 1, memo = 0; i >= 0; i--) {
let c = chars[i]
if (i !== 0) {
c += memo
if (i >= (precisionStart + numPrecisionDigits)) {
const ceil = (roundingMode === ROUND) &&
(c >= FIVE) &&
!((c === FIVE) && memo) // prevents rounding of 1.45 to 2
c = ceil ? (NINE + 1) : ZERO
}
if (c > NINE) { c = ZERO; memo = 1; }
else memo = 0
} else if (memo) c = ONE // leading extra digit (0900 → 1000)
chars[i] = c
if (c !== ZERO) {
allZeros = false
digitsStart = i
digitsEnd = (digitsEnd < 0) ? (i + 1) : digitsEnd
}
}
/* Update the precision range, as `digitsStart` may have changed... & the need for a negative sign if it is only 0 */
if (countingMode === SIGNIFICANT_DIGITS) {
precisionStart = digitsStart
precisionEnd = precisionStart + numPrecisionDigits
}
if (allZeros) {
signNeeded = false
}
/* Determine the input character range */
const readStart = ((digitsStart >= afterDot) || allZeros) ? (afterDot - 1) : digitsStart // 0.000(1)234 ----> (0).0001234
, readEnd = (digitsEnd < afterDot) ? (afterDot ) : digitsEnd // 12(3)000 ----> 123000( )
/* Compute various sub-ranges */
const nSign = (signNeeded ? 1 : 0) // (-)123.456
, nBeforeDot = (nSign + (afterDot - readStart)) // (-123).456
, nAfterDot = Math.max (readEnd - afterDot, 0) // -123.(456)
, actualLength = (readEnd - readStart) // -(123.456)
, desiredLength = (paddingMode === NO_PADDING)
? (actualLength) // -(123.456)
: (precisionEnd - readStart) // -(123.456 )
, pad = Math.max (desiredLength - actualLength, 0) // -123.456( )
, padStart = (nBeforeDot + 1 + nAfterDot) // -123.456( )
, padEnd = (padStart + pad) // -123.456 ( )
, isInteger = (nAfterDot + pad) === 0 // -123
/* Fill the output buffer with characters */
const out = new Uint8Array (nBeforeDot + (isInteger ? 0 : 1) + nAfterDot + pad)
// ------------------------------------------------------------------------------------------ // ---------------------
if (signNeeded) out[0] = MINUS // - minus sign
for (i = nSign, j = readStart; i < nBeforeDot; i++, j++) out[i] = chars[j] // 123 before dot
if (!isInteger) out[nBeforeDot] = DOT // . dot
for (i = nBeforeDot + 1, j = afterDot; i < padStart; i++, j++) out[i] = chars[j] // 456 after dot
for (i = padStart; i < padEnd; i++) out[i] = ZERO // 000 padding
/* Build a string from the output buffer */
return String.fromCharCode (...out)
}
// toWei / fromWei
function fromWei (amount, decimals = 18) {
if (amount === undefined) {
return amount
}
const exponential = Math.floor (amount).toExponential () // wei must be whole numbers
const [ n, exponent ] = exponential.split ('e')
const newExponent = parseInt (exponent) - decimals
return parseFloat (n + 'e' + newExponent)
}
function toWei (amount, decimals = 18) {
if (amount === undefined) {
return amount
}
const exponential = parseFloat (amount).toExponential ()
const [ n, exponent ] = exponential.split ('e')
const newExponent = parseInt (exponent) + decimals
return numberToString (Math.floor (parseFloat (n + 'e' + newExponent))) // wei must be whole numbers
}
/* ------------------------------------------------------------------------ */
module.exports = {
toWei,
fromWei,
numberToString,
precisionFromString,
decimalToPrecision,
truncate_to_string,
truncate,
precisionConstants,
ROUND,
TRUNCATE,
ROUND_UP,
ROUND_DOWN,
DECIMAL_PLACES,
SIGNIFICANT_DIGITS,
TICK_SIZE,
NO_PADDING,
PAD_WITH_ZERO,
}
/* ------------------------------------------------------------------------ */
|
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "VKResponse.h"
@class VKCountries;
@interface database_getCountriesById_res : VKResponse
{
VKCountries *_response;
}
@property(retain, nonatomic) VKCountries *response; // @synthesize response=_response;
- (void).cxx_destruct;
@end
|
import { expect } from "chai"
import { toLegendState } from "./stacked-legend"
describe("Stacked Legend", () => {
describe("toLegendState helper", () => {
it("should nominal", () => {
expect(
toLegendState([
{
type: "ordinal",
domain: ["en", "pt", "es", "in", "und", "ja"],
range: [
"#27aeef",
"#ea5545",
"#87bc45",
"#b33dc6",
"#f46a9b",
"#ede15b"
]
}
])
).to.deep.equal({
type: "nominal",
title: "Legend",
open: true,
domain: ["en", "pt", "es", "in", "und", "ja"],
position: "bottom-left",
range: [
"#27aeef",
"#ea5545",
"#87bc45",
"#b33dc6",
"#f46a9b",
"#ede15b"
]
})
})
it("should gradient", () => {
expect(
toLegendState([
{
type: "quantitative",
domain: [0, 100],
range: [
"#27aeef",
"#ea5545",
"#87bc45",
"#b33dc6",
"#f46a9b",
"#ede15b"
],
legend: {
title: "My Legend",
locked: true
}
}
])
).to.deep.equal({
type: "gradient",
title: "My Legend",
locked: true,
open: true,
domain: [0, 100],
position: "bottom-left",
range: [
"#27aeef",
"#ea5545",
"#87bc45",
"#b33dc6",
"#f46a9b",
"#ede15b"
]
})
})
it("should undefined", () => {})
it("should stacked", () => {})
})
})
|
import pandas as pd
import matplotlib.pyplot as plt
from data import games
attendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']]
attendance.columns = ['year', 'attendance']
attendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])
attendance.plot(x='year',y='attendance', figsize=(15, 7), kind = 'bar')
plt.xlabel('Year')
plt.ylabel('Attendance')
plt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')
plt.show()
# print(attendance)
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.flags import _helpers
from absl.testing import absltest
FLAGS = flags.FLAGS
class FlagsUnitTest(absltest.TestCase):
"""Flags formatting Unit Test."""
def test_get_help_width(self):
"""Verify that get_help_width() reflects _help_width."""
default_help_width = _helpers._DEFAULT_HELP_WIDTH # Save.
self.assertEqual(80, _helpers._DEFAULT_HELP_WIDTH)
self.assertEqual(_helpers._DEFAULT_HELP_WIDTH, flags.get_help_width())
_helpers._DEFAULT_HELP_WIDTH = 10
self.assertEqual(_helpers._DEFAULT_HELP_WIDTH, flags.get_help_width())
_helpers._DEFAULT_HELP_WIDTH = default_help_width # restore
def test_text_wrap(self):
"""Test that wrapping works as expected.
Also tests that it is using global flags._help_width by default.
"""
default_help_width = _helpers._DEFAULT_HELP_WIDTH
_helpers._DEFAULT_HELP_WIDTH = 10
# Generate a string with length 40, no spaces
text = ''
expect = []
for n in range(4):
line = str(n)
line += '123456789'
text += line
expect.append(line)
# Verify we still break
wrapped = flags.text_wrap(text).split('\n')
self.assertEqual(4, len(wrapped))
self.assertEqual(expect, wrapped)
wrapped = flags.text_wrap(text, 80).split('\n')
self.assertEqual(1, len(wrapped))
self.assertEqual([text], wrapped)
# Normal case, breaking at word boundaries and rewriting new lines
input_value = 'a b c d e f g h'
expect = {1: ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'],
2: ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'],
3: ['a b', 'c d', 'e f', 'g h'],
4: ['a b', 'c d', 'e f', 'g h'],
5: ['a b c', 'd e f', 'g h'],
6: ['a b c', 'd e f', 'g h'],
7: ['a b c d', 'e f g h'],
8: ['a b c d', 'e f g h'],
9: ['a b c d e', 'f g h'],
10: ['a b c d e', 'f g h'],
11: ['a b c d e f', 'g h'],
12: ['a b c d e f', 'g h'],
13: ['a b c d e f g', 'h'],
14: ['a b c d e f g', 'h'],
15: ['a b c d e f g h']}
for width, exp in expect.items():
self.assertEqual(exp, flags.text_wrap(input_value, width).split('\n'))
# We turn lines with only whitespace into empty lines
# We strip from the right up to the first new line
self.assertEqual('', flags.text_wrap(' '))
self.assertEqual('\n', flags.text_wrap(' \n '))
self.assertEqual('\n', flags.text_wrap('\n\n'))
self.assertEqual('\n\n', flags.text_wrap('\n\n\n'))
self.assertEqual('\n', flags.text_wrap('\n '))
self.assertEqual('a\n\nb', flags.text_wrap('a\n \nb'))
self.assertEqual('a\n\n\nb', flags.text_wrap('a\n \n \nb'))
self.assertEqual('a\nb', flags.text_wrap(' a\nb '))
self.assertEqual('\na\nb', flags.text_wrap('\na\nb\n'))
self.assertEqual('\na\nb\n', flags.text_wrap(' \na\nb\n '))
self.assertEqual('\na\nb\n', flags.text_wrap(' \na\nb\n\n'))
# Double newline.
self.assertEqual('a\n\nb', flags.text_wrap(' a\n\n b'))
# We respect prefix
self.assertEqual(' a\n b\n c', flags.text_wrap('a\nb\nc', 80, ' '))
self.assertEqual('a\n b\n c', flags.text_wrap('a\nb\nc', 80, ' ', ''))
# tabs
self.assertEqual('a\n b c',
flags.text_wrap('a\nb\tc', 80, ' ', ''))
self.assertEqual('a\n bb c',
flags.text_wrap('a\nbb\tc', 80, ' ', ''))
self.assertEqual('a\n bbb c',
flags.text_wrap('a\nbbb\tc', 80, ' ', ''))
self.assertEqual('a\n bbbb c',
flags.text_wrap('a\nbbbb\tc', 80, ' ', ''))
self.assertEqual('a\n b\n c\n d',
flags.text_wrap('a\nb\tc\td', 3, ' ', ''))
self.assertEqual('a\n b\n c\n d',
flags.text_wrap('a\nb\tc\td', 4, ' ', ''))
self.assertEqual('a\n b\n c\n d',
flags.text_wrap('a\nb\tc\td', 5, ' ', ''))
self.assertEqual('a\n b c\n d',
flags.text_wrap('a\nb\tc\td', 6, ' ', ''))
self.assertEqual('a\n b c\n d',
flags.text_wrap('a\nb\tc\td', 7, ' ', ''))
self.assertEqual('a\n b c\n d',
flags.text_wrap('a\nb\tc\td', 8, ' ', ''))
self.assertEqual('a\n b c\n d',
flags.text_wrap('a\nb\tc\td', 9, ' ', ''))
self.assertEqual('a\n b c d',
flags.text_wrap('a\nb\tc\td', 10, ' ', ''))
# multiple tabs
self.assertEqual('a c',
flags.text_wrap('a\t\tc', 80, ' ', ''))
_helpers._DEFAULT_HELP_WIDTH = default_help_width # restore
def test_doc_to_help(self):
self.assertEqual('', flags.doc_to_help(' '))
self.assertEqual('', flags.doc_to_help(' \n '))
self.assertEqual('a\n\nb', flags.doc_to_help('a\n \nb'))
self.assertEqual('a\n\n\nb', flags.doc_to_help('a\n \n \nb'))
self.assertEqual('a b', flags.doc_to_help(' a\nb '))
self.assertEqual('a b', flags.doc_to_help('\na\nb\n'))
self.assertEqual('a\n\nb', flags.doc_to_help('\na\n\nb\n'))
self.assertEqual('a b', flags.doc_to_help(' \na\nb\n '))
# Different first line, one line empty - erm double new line.
self.assertEqual('a b c\n\nd', flags.doc_to_help('a\n b\n c\n\n d'))
self.assertEqual('a b\n c d', flags.doc_to_help('a\n b\n \tc\n d'))
self.assertEqual('a b\n c\n d',
flags.doc_to_help('a\n b\n \tc\n \td'))
def test_doc_to_help_flag_values(self):
# !!!!!!!!!!!!!!!!!!!!
# The following doc string is taken as is directly from flags.py:FlagValues
# The intention of this test is to verify 'live' performance
# !!!!!!!!!!!!!!!!!!!!
"""Used as a registry for 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: flags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value member of the registered 'Flag' objects can be accessed as
members of this 'FlagValues' object, through __getattr__. Both the
long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
"""
doc = flags.doc_to_help(self.test_doc_to_help_flag_values.__doc__)
# Test the general outline of the converted docs
lines = doc.splitlines()
self.assertEqual(17, len(lines))
empty_lines = [index for index in range(len(lines)) if not lines[index]]
self.assertEqual([1, 3, 5, 8, 12, 15], empty_lines)
# test that some starting prefix is kept
flags_lines = [index for index in range(len(lines))
if lines[index].startswith(' FLAGS')]
self.assertEqual([7, 10, 11], flags_lines)
# but other, especially common space has been removed
space_lines = [index for index in range(len(lines))
if lines[index] and lines[index][0].isspace()]
self.assertEqual([7, 10, 11, 14], space_lines)
# No right space was kept
rspace_lines = [index for index in range(len(lines))
if lines[index] != lines[index].rstrip()]
self.assertEqual([], rspace_lines)
# test double spaces are kept
self.assertEqual(True, lines[2].endswith('application: flags.FLAGS'))
def test_text_wrap_raises_on_excessive_indent(self):
"""Ensure an indent longer than line length raises."""
self.assertRaises(ValueError,
flags.text_wrap, 'dummy', length=10, indent=' ' * 10)
def test_text_wrap_raises_on_excessive_first_line(self):
"""Ensure a first line indent longer than line length raises."""
self.assertRaises(
ValueError,
flags.text_wrap, 'dummy', length=80, firstline_indent=' ' * 80)
if __name__ == '__main__':
absltest.main()
|
//01-MODULOS INDIVIDUALES MODULO CONTROLADOR })();
var controladorPresupuesto = (function() {
var Gasto = function(id, descripcion, valor) {
this.id = id;
this.descripcion = descripcion;
this.valor = valor;
};
var Ingreso = function(id, descripcion, valor) {
this.id = id;
this.descripcion = descripcion;
this.valor = valor;
};
var calcularTotal = function(type) {
var sum = 0;
data.todoslosItems[type].forEach(function(act) {
// Funciona asi sum es 0 mas [200,400,100]
// sum = 0 + 200 =
// sum = 200 + 400
// sum = 600 + 100
//suma mas el valor actual
sum = sum + act.valor;
});
data.totales[type] = sum;
};
var data = {
todoslosItems: {
income: [],
expenses: []
},
totales: {
income: 0,
expenses: 0
},
presup: 0,
// Se pone -1 porque si no hay dATOS NO PUEDE EXISTIR PORCENTAJE, -1 HACE REFERENNCIA A QUE NO EXISTE
porcent: -1
};
/// Aqui estamos recibiendo la informaciòn incial de la app
return {
agreItem: function(ty, des, val) {
var nuevoItem, ID;
// ID es un codigo que agregamos a cad gasto o ingreso
// Nuevo ID
// Item actual // ultimo item -1 es porque comienza el connteo desde 0, y a eso le agregamos 1 para que aumennte la numeraciòn del ID
if (data.todoslosItems[ty].length > 0) {
ID = data.todoslosItems[ty][data.todoslosItems[ty].length - 1].id + 1;
} else {
ID = 0;
}
// Creamos unn nuevo Item desde la info ingresada.
if (ty === "income") {
nuevoItem = new Ingreso(ID, des, val);
} else if (ty === "expenses") {
nuevoItem = new Gasto(ID, des, val);
}
// Agregamos al array los datos ingresados segun sean si es un inncome o expense
data.todoslosItems[ty].push(nuevoItem);
// Para que tengamos acceso a el objeto que acabamos de crear
return nuevoItem;
},
//Esto es un metodo qu se puede usar luego .borraritem() llamandolo
borrarItem: function(type, id) {
var ids, index;
// para borrar necesitamos saber si es un gato o un ignreso y el id
//data.todoslosItems[type][id];
//mapa leey devuelve una nueva matriz con la info requerida
ids = data.todoslosItems[type].map(function(current) {
return current.ids;
});
index = ids.indexOf(id);
if (index !== 1) {
data.todoslosItems[type].splice(index, 1);
}
},
calculoPresupuesto: function() {
// Calculamos el total de los ingresos y gastos
calcularTotal("expenses");
calcularTotal("income");
// Calculamos el presupuesto: ingresos - gastos
data.presup = data.totales.income - data.totales.expenses;
// Calculamos el porcentaje de ingresos que gastamos
// Creamos la formula de porcentaje y redondeamos el valor con Math.round
if (data.totales.income > 0) {
data.porcent = Math.round(
(data.totales.expenses / data.totales.income) * 100
);
} else {
data.porcent = -1;
}
},
tomarPresupuesto: function() {
return {
presupuesto: data.presup,
totaling: data.totales.income,
totalgast: data.totales.expenses,
porcentajes: data.porcent
};
},
//Con esto testeamos la aplicaciòn con el comando controladordepresupuesto.testinng() enn la consola de javascript
testing: function() {
console.log(data);
}
};
//some code
})();
//02-MODULO CONNTROLADOR INTERFAS USUARIO UI })();
var controladorUI = (function() {
// DOM es el que inntereactua con html y javascriot
var DOMclasshtml = {
entradaTipo: ".add__type",
entradaDescripcion: ".add__description",
entradaDinero: ".add__value",
entradaboton: ".add__btn",
contenedorIngreso: ".income__list",
contenedorGasto: ".expenses__list",
presupuestoEtiqueta: ".budget__value",
ingresoEtiqueta: ".budget__income--value",
gastoEtiqueta: ".budget__expenses--value",
porcentajeEtiqueta: ".budget__expenses--percentage",
contenedor: ".container"
};
return {
// funcion que recibe el tipo de valor, la descripciòn y dinero.
tomarinfoentrada: function() {
return {
tipo: document.querySelector(DOMclasshtml.entradaTipo).value, // Recibimos inc(+) or exp(-)
descripcion: document.querySelector(DOMclasshtml.entradaDescripcion)
.value, // Recibimos el texto descripciòn
//Es valor es un string y con ParseFloat lo convertimos en Numero
dinero: parseFloat(
document.querySelector(DOMclasshtml.entradaDinero).value
) // recibimos el valor
// Esto es unn objeto que devuelve las tres propiedades
};
},
agregarListaItem: function(obj, type) {
var html, newhtml;
// 01- Crear html
if (type === "income") {
element = DOMclasshtml.contenedorIngreso;
html =
'<div class="item clearfix" id="income-%id%"><div class="item__description">%descripcion%</div><div class="right clearfix"><div class="item__value">%valor%</div><div class="item__delete"><button class="item__delete--btn"><i class="ion-ios-close-outline"></i></button></div></div></div>';
} else if (type === "expenses") {
element = DOMclasshtml.contenedorGasto;
html =
'<div class="item clearfix" id="expense-%id%"><div class="item__description">%descripcion%</div><div class="right clearfix"><div class="item__value">%valor%</div><div class="item__percentage">21%</div><div class="item__delete"><button class="item__delete--btn"><i class="ion-ios-close-outline"></i></button></div></div></div>';
}
// Remplazar el html con el cambio de info de la funcion connstructor Gasto
// No me queda claro porque el primero es Html y los otros dos tiene que ser newhtml, si todos se colocan como newhtml da error.
newhtml = html.replace("%id%", obj.id);
newhtml = newhtml.replace("%descripcion%", obj.descripcion);
newhtml = newhtml.replace("%valor%", obj.valor);
// Insertar el html en el DOM
// esto hace que todo nuestra innfo se inserte en los contenedores de lista de ingresos y gastos.
document.querySelector(element).insertAdjacentHTML("beforeend", newhtml);
},
borrarListaItem: function(selectorID) {
// https://blog.garstasio.com/you-dont-need-jquery/dom-manipulation/ Removing Elements
document
.getElementById(selectorID)
.parentNode.removeChild(document.getElementById(selectorID));
},
limpiadorDeCampos: function() {
var campos, camposArr;
campos = document.querySelectorAll(
DOMclasshtml.entradaDescripcion + "," + DOMclasshtml.entradaDinero
);
camposArr = Array.prototype.slice.call(campos);
camposArr.forEach(function(current, index, array) {
current.value = "";
});
camposArr[0].focus();
},
mostrarPresupuesto: function(objeto) {
document.querySelector(DOMclasshtml.presupuestoEtiqueta).textContent =
objeto.presupuesto;
document.querySelector(DOMclasshtml.ingresoEtiqueta).textContent =
objeto.totaling;
document.querySelector(DOMclasshtml.gastoEtiqueta).textContent =
objeto.totalgast;
if (objeto.porcentajes > 0) {
document.querySelector(DOMclasshtml.porcentajeEtiqueta).textContent =
objeto.porcentajes + "%";
} else {
document.querySelector(DOMclasshtml.porcentajeEtiqueta).textContent =
"---";
}
},
///Con esto hacemos el DOM publico para que sea consultado por otros metodos.
tomarDOM: function() {
return DOMclasshtml;
}
};
//some code
})();
//03-MODULO CONTROLADOR APP PRINCIPAL
var controladorApp = (function(contPresupuesto, contUI) {
var configEventListener = function() {
var DOM = controladorUI.tomarDOM(); // Tengo que poner los parentesis al final ya que esta haciedo una llamada.
//Seleccionamos el boton añadir con class del boton html que en este caso es .add__btn, luego le agregamos un escuchador de eventos para que ocurra algo cuando suceda el evento en este caso un click y luego la funcion que queremos que ejecute.
// evento para el click en el botonn add, hace lo que este escrito en cotnrolAddBoton
document
.querySelector(DOM.entradaboton)
.addEventListener("click", controlAddItem);
/// Evento para la tecla enter hace lo que este escrito en cotnrolAddBoton
document.addEventListener("keypress", function(evento) {
// 13 es el codigo de la tecla enter, asi solo funciona al presionar enter, keycode hace referencia ala tecla, en los navegadores viejos utilizan el comando which asi que utilziamos || que es or para decir que funcione en cualquiera de los dos casos.
if (evento.keyCode === 13 || evento.which === 13) {
controlAddItem();
}
});
document
.querySelector(DOM.contenedor)
.addEventListener("click", controlBorrarItem);
};
var actualizacionPresupuesto = function() {
// 01. Calcular el presupuesto.
controladorPresupuesto.calculoPresupuesto();
// 02 Retornar el Presupuesto
var presupuesto = contPresupuesto.tomarPresupuesto();
// 03. Mostrar el Presupuesto en UI para verlo.
controladorUI.mostrarPresupuesto(presupuesto);
};
var controlAddItem = function() {
var entrada, nuevoItem;
// Cuando alguien haga click en el boton + necesitamos
// 01. Conseguir la info de entrada
// Con esta variable conectamos la funciòn de entrada de tezto con este modulo.
entrada = controladorUI.tomarinfoentrada();
if (
entrada.descripcion !== " " &&
!isNaN(entrada.dinero) &&
entrada.dinero > 0
) {
// 02. Agregar el item a el conntrolador de presupuesto
// Llamamos el metodo agreItem de el Modulo Controlador de Presupuesto.
nuevoItem = controladorPresupuesto.agreItem(
entrada.tipo,
entrada.descripcion,
entrada.dinero
);
// 03. Agregar el item a UI para verlo.
//Lo que nos permite ver el gasto o ingres ode forma visual
controladorUI.agregarListaItem(nuevoItem, entrada.tipo);
//04. Limpiar los campos ( no esta funcionanndo no se porque =,()
controladorUI.limpiadorDeCampos();
// 05. Calcular y actualziar el presupuesto
actualizacionPresupuesto();
}
};
var controlBorrarItem = function(evento) {
var itemID, splitID, type, ID;
// borbuja
itemID = evento.target.parentNode.parentNode.parentNode.parentNode.id;
if (itemID) {
splitID = itemID.split("-");
type = splitID[0];
ID = parseInt(splitID[1]);
// 1. Borrar el item de la estructura de data
controladorPresupuesto.borrarItem(type, ID);
// 2. Borrar el item de el UI
controladorUI.borrarListaItem(itemID);
// 3. Actualizar y mostrar el nuevo presupuesto.
actualizacionPresupuesto();
}
};
// Funciòn publica de iniciaciòn. Para iniciar los Event Listennner
return {
init: function() {
console.log("La aplicación se inicio");
//Pone el contador en cero
controladorUI.mostrarPresupuesto({
presupuesto: 0,
totaling: 0,
totalgast: 0,
porcentajes: -1
});
configEventListener();
}
};
//Estos dos le dice que contPresupuesto es igual a ControladorPresupuesto y ContUI es controladorUI a nivel externo. Asi queda conectado con los dos modulos exteriores 01 y 02.
})(controladorPresupuesto, controladorUI);
//LLamamos a init desde el exterior para iniciar la app
controladorApp.init();
|
import numpy as np
from random import shuffle
import scipy.sparse
def softmax_loss_naive(theta, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs:
- theta: d x K parameter matrix. Each column is a coefficient vector for class k
- X: m x d array of data. Data are d-dimensional rows.
- y: 1-dimensional array of length m with labels 0...K-1, for K classes
- reg: (float) regularization strength
Returns:
a tuple of:
- loss as single float
- gradient with respect to parameter matrix theta, an array of same size as theta
"""
# Initialize the loss and gradient to zero.
J = 0.0
grad = np.zeros_like(theta)
m, dim = X.shape
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in J and the gradient in grad. If you are not #
# careful here, it is easy to run into numeric instability. Don't forget #
# the regularization term! #
#############################################################################
theta_x = np.dot(X, theta);
# regularize theta
theta_x_reg = theta_x - np.max(theta_x, axis = 1).reshape(-1,1);
exp_theta = np.exp(theta_x_reg);
P = exp_theta/(np.sum(exp_theta,axis=1).reshape(-1,1));
for i in range(m):
for j in range(len(theta[0])):
if(y[i] == j):
l = np.log(P[i,j]);
J += l
J = -J/m;
for i in range(dim):
for j in range(len(theta[0])):
J += reg / 2 / m * np.square(theta[i,j]);
for i in range(len(theta[0])):
for j in range(m):
if (y[j] == i):
I = 1;
else:
I = 0;
grad[:, i] += (X[j,:] * (I - P[j,i]))
grad[:,i] = -grad[:,i]/m
grad[:,i] += reg/m * theta[:,i]
#############################################################################
# END OF YOUR CODE #
#############################################################################
return J, grad
def softmax_loss_vectorized(theta, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
J = 0.0
grad = np.zeros_like(theta)
m, dim = X.shape
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in J and the gradient in grad. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization term! #
#############################################################################
theta_x = np.dot(X, theta);
# regularize theta
theta_x_reg = theta_x - np.max(theta_x, axis = 1).reshape(-1,1);
exp_theta = np.exp(theta_x_reg);
P = exp_theta/(np.sum(exp_theta,axis=1).reshape(-1,1));
I = np.zeros([m, len(theta[0])]);
ind = np.linspace(0,m-1,m,dtype='int')
I[ind , y] = 1;
#print(i)
J = -1/m * np.sum(np.multiply(I, np.log(P))) + reg/2/m * np.sum(np.square(theta));
grad = -1/m * np.dot(X.T, I - P) + reg/m * theta
#############################################################################
# END OF YOUR CODE #
#############################################################################
return J, grad
|
class Hello:
def __init__(self):
while True:
print("Hello!")
|
window.__NUXT__=(function(a,b,c,d,e){return {staticAssetsBase:"https:\u002F\u002Fwww.baca-quran.id\u002Fstatic\u002F1627814429",layout:"default",error:b,state:{notification:{show:a,title:c,message:c},isShowSidebar:a,isSupportWebShare:a,headerTitle:"Baca Qur'an",page:"home",lastReadVerse:b,settingActiveTheme:{name:"dark",bgColor:"#071e3d",fgColor:"#fff"},settingShowTranslation:a,settingShowTafsir:a,settingShowMuqaddimah:d,surahFavorite:[]},serverRendered:d,routePath:"\u002Famp\u002F7\u002F85",config:{_app:{basePath:e,assetsPath:e,cdnURL:"https:\u002F\u002Fwww.baca-quran.id\u002F"}}}}(false,null,"",true,"\u002F"));
|
"""
Funds API For Digital Portals
Search for mutual funds and ETFs using one single consolidated API, including a criteria-based screener. The API provides also base data, key figures, and holdings. A separate endpoint returns the possible values and value range for the parameters that the endpoint /fund/notation/screener/search accepts: Application developers can request the values and value range only for a restricted set of notations that match predefined parameters. This functionality may be used to pre-fill the values and value ranges of the parameters of the /fund/notation/screener/search endpoint so that performing a search always leads to a non-empty set of notations. This API is fully integrated with the corresponding Quotes API, allowing access to detailed price and performance information of instruments, as well as basic security identifier cross-reference. For direct access to price histories, please refer to the Time Series API for Digital Portals. Similar criteria based screener APIs exist for equity instruments and securitized derivatives: See the Stocks API and the Securitized Derivatives API for details. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FundsAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FundsAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.FundsAPIforDigitalPortals.model.fund_notation_screener_search_data_fund import FundNotationScreenerSearchDataFund
from fds.sdk.FundsAPIforDigitalPortals.model.fund_notation_screener_search_data_performance import FundNotationScreenerSearchDataPerformance
from fds.sdk.FundsAPIforDigitalPortals.model.fund_notation_screener_search_data_share_class import FundNotationScreenerSearchDataShareClass
from fds.sdk.FundsAPIforDigitalPortals.model.fund_notation_screener_search_data_validation import FundNotationScreenerSearchDataValidation
globals()['FundNotationScreenerSearchDataFund'] = FundNotationScreenerSearchDataFund
globals()['FundNotationScreenerSearchDataPerformance'] = FundNotationScreenerSearchDataPerformance
globals()['FundNotationScreenerSearchDataShareClass'] = FundNotationScreenerSearchDataShareClass
globals()['FundNotationScreenerSearchDataValidation'] = FundNotationScreenerSearchDataValidation
class FundNotationScreenerSearchData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'validation': (FundNotationScreenerSearchDataValidation,), # noqa: E501
'fund': (FundNotationScreenerSearchDataFund,), # noqa: E501
'share_class': (FundNotationScreenerSearchDataShareClass,), # noqa: E501
'performance': (FundNotationScreenerSearchDataPerformance,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'validation': 'validation', # noqa: E501
'fund': 'fund', # noqa: E501
'share_class': 'shareClass', # noqa: E501
'performance': 'performance', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""FundNotationScreenerSearchData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
validation (FundNotationScreenerSearchDataValidation): [optional] # noqa: E501
fund (FundNotationScreenerSearchDataFund): [optional] # noqa: E501
share_class (FundNotationScreenerSearchDataShareClass): [optional] # noqa: E501
performance (FundNotationScreenerSearchDataPerformance): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""FundNotationScreenerSearchData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
validation (FundNotationScreenerSearchDataValidation): [optional] # noqa: E501
fund (FundNotationScreenerSearchDataFund): [optional] # noqa: E501
share_class (FundNotationScreenerSearchDataShareClass): [optional] # noqa: E501
performance (FundNotationScreenerSearchDataPerformance): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
_base_ = [
'../_base_/models/dcgan_64x64.py',
'../_base_/datasets/unconditional_imgs_64x64.py',
'../_base_/default_runtime.py'
]
model = dict(
discriminator=dict(output_scale=4, out_channels=1),
gan_loss=dict(type='GANLoss', gan_type='lsgan'))
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=128, train=dict(imgs_root='./data/lsun/bedroom_train'))
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=10000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 100000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 64, 64)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
|
from __future__ import print_function
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
import caffe
import numpy as np
import matplotlib.pyplot as plt
import time
import datetime
from PIL import Image
import sys
sys.setrecursionlimit(150000)
# helper function for common structures
def log():
print ('device: ', device)
print ('stages: ', stages)
print ('deathRate: ', deathRate)
print ('niter: ', niter)
print ('lr: ', lr)
print ('real: ', real)
def conv_factory(bottom, ks, nout, stride=1, pad=0):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, bias_term=True, weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
batch_norm = L.BatchNorm(conv, in_place=True, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
scale = L.Scale(batch_norm, bias_term=True, in_place=True)
return scale
def conv_factory_relu(bottom, ks, nout, stride=1, pad=0):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, bias_term=True, weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
batch_norm = L.BatchNorm(conv, in_place=True, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
scale = L.Scale(batch_norm, bias_term=True, in_place=True)
relu = L.ReLU(scale, in_place=True)
return relu
#written by me
def residual_factory1(bottom, num_filter):
conv1 = conv_factory_relu(bottom, 3, num_filter, 1, 1)
conv2 = conv_factory(conv1, 3, num_filter, 1, 1)
addition = L.Eltwise(bottom, conv2, operation=P.Eltwise.SUM)
relu = L.ReLU(addition, in_place=True)
return relu
def residual_factory2(bottom, num_filter):
conv1 = conv_factory_relu(bottom, 3, num_filter, 1, 1)
conv2 = conv_factory(conv1, 3, num_filter, 1, 1)
addition = L.Python(bottom, conv2, module='resnet_oc', ntop=1, layer='RandAdd')
relu = L.ReLU(addition, in_place=True)
return relu
#written by me
def residual_factory_padding1(bottom, num_filter, stride, batch_size, feature_size):
conv1 = conv_factory_relu(bottom, ks=3, nout=num_filter, stride=stride, pad=1)
conv2 = conv_factory(conv1, ks=3, nout=num_filter, stride=1, pad=1)
pool1 = L.Pooling(bottom, pool=P.Pooling.AVE, kernel_size=2, stride=2)
print("hey 1")
padding = L.Input(input_param=dict(shape=dict(dim=[batch_size, num_filter/2, feature_size, feature_size])))
print("hey 1 after")
concate = L.Concat(pool1, padding, axis=1)
addition = L.Eltwise(concate, conv2, operation=P.Eltwise.SUM)
relu = L.ReLU(addition, in_place=True)
return relu
def residual_factory_padding2(bottom, num_filter, stride, batch_size, feature_size):
conv1 = conv_factory_relu(bottom, ks=3, nout=num_filter, stride=stride, pad=1)
conv2 = conv_factory(conv1, ks=3, nout=num_filter, stride=1, pad=1)
pool1 = L.Pooling(bottom, pool=P.Pooling.AVE, kernel_size=2, stride=2)
print("hey 2")
padding = L.Input(input_param=dict(shape=dict(dim=[batch_size, num_filter/2, feature_size, feature_size])))
print("hey 2 after")
concate = L.Concat(pool1, padding, axis=1)
addition = L.Python(concate, conv2, module='resnet_oc', ntop=1, layer='RandAdd')
relu = L.ReLU(addition, in_place=True)
return relu
def resnet(leveldb, batch_size=128, stages=[2, 2, 2, 2], first_output=16):
feature_size=32
data, label = L.Data(source=leveldb, backend=P.Data.LEVELDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=feature_size, mirror=True))
residual = conv_factory_relu(data, 3, first_output, stride=1, pad=1)
st = 0
for i in stages[1:]:
st += 1
for j in range(i):
if j==i-1:
first_output *= 2
feature_size /= 2
if i==0:#never called
residual = residual_factory_proj(residual, first_output, 1)
# bottleneck layer, but not at the last stage
elif st != 3:
if real:
residual = residual_factory_padding1(residual, num_filter=first_output, stride=2,
batch_size=batch_size, feature_size=feature_size)
else:
residual = residual_factory_padding2(residual, num_filter=first_output, stride=2,
batch_size=batch_size, feature_size=feature_size)
else:
if real:
residual = residual_factory1(residual, first_output)
else:
residual = residual_factory2(residual, first_output)
glb_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True);
fc = L.InnerProduct(glb_pool, num_output=10,bias_term=True, weight_filler=dict(type='msra'))
loss = L.SoftmaxWithLoss(fc, label)
return to_proto(loss)
def make_net(stages, device):
with open('examples/python_stoch_dep/residual_train.prototxt', 'w') as f:
train_net = resnet('/scratch/pas282/caffe/examples/cifar10/cifar10_train_leveldb_padding3', stages=stages, batch_size=128)
print(str(train_net), file=f)
with open('examples/python_stoch_dep/residual_test.prototxt', 'w') as f:
test_net = resnet('/scratch/pas282/caffe/examples/cifar10/cifar10_test_leveldb_padding3', stages=stages, batch_size=100)
print(str(test_net), file=f)
def make_solver(niter=20000, lr = 0.1):
s = caffe_pb2.SolverParameter()
s.random_seed = 0xCAFFE
s.train_net = 'examples/python_stoch_dep/residual_train.prototxt'
s.test_net.append('examples/python_stoch_dep/residual_test.prototxt')
s.test_interval = 10000
s.test_iter.append(100)
s.max_iter = niter
s.type = 'Nesterov'
s.base_lr = lr
s.momentum = 0.9
s.weight_decay = 1e-4
s.lr_policy='multistep'
s.gamma = 0.1
s.stepvalue.append(int(0.5 * s.max_iter))
s.stepvalue.append(int(0.75 * s.max_iter))
s.solver_mode = caffe_pb2.SolverParameter.GPU
solver_path = 'examples/python_stoch_dep/solver.prototxt'
with open(solver_path, 'w') as f:
f.write(str(s))
def sample_gates():
for i in addtables:
if np.random.rand(1)[0] < solver.net.layers[i].deathRate:
solver.net.layers[i].gate = False
else:
solver.net.layers[i].gate = True
def show_gates():
a = []
for i in addtables:
a.append(solver.net.layers[i].gate)
a.append(solver.net.layers[i].deathRate)
print(a)
# if __name__ == '__main__':
if True:
device = 1
niter = 200000
N=18
stages = [2, N+1, N, N]
deathRate = 0
lr = 0.1
real = True
make_net(stages, device)
make_solver(niter=niter)
# TRAINING THE NET
# execfile("examples/resnet_cifar/generate_final_proto.py")
# date = time.strftime('%Y_%m_%d_%H',time.localtime(time.time()))
#
# caffe.set_device(device)
# caffe.set_mode_gpu()
# solver = None
# solver = caffe.get_solver('examples/resnet_cifar/solver.prototxt')
#
# # to keep the same init with torch code
# std = 1./np.sqrt(solver.net.params['InnerProduct1'][0].shape[1])
# # solver.net.params['InnerProduct1'][0].data[...] = np.random.uniform(-std, std, solver.net.params['InnerProduct1'][0].shape)
# # solver.net.params['InnerProduct1'][1].data[...] = np.random.uniform(-std, std, solver.net.params['InnerProduct1'][1].shape)
#
#
# addtables = []
# for i in range(len(solver.net.layers)):
# if type(solver.net.layers[i]).__name__ == 'RandAdd':
# addtables.append(i)
# for i in range(len(addtables)):
# solver.net.layers[addtables[i]].deathRate = float(i+1)/len(addtables) * deathRate
# solver.net.layers[addtables[i]].train = True
# solver.test_nets[0].layers[addtables[i]].deathRate = float(i+1)/len(addtables) * deathRate
# solver.test_nets[0].layers[addtables[i]].train = False
#
#
#
# batch_size = 128
# iter_per_epoch = int(np.ceil(50000/batch_size))
#
# train_loss = np.zeros(int(np.ceil(niter / iter_per_epoch)) + 1)
# test_error = np.zeros(int(np.ceil(niter / iter_per_epoch)) + 1)
# loss = 0
#
# time_last = datetime.datetime.now()
# sample_gates()
#
# solver.step(1)
# log()
# print ('Iteration\tEpoch\tTest Accuracy\tTraining Loss\tTime')
# for it in range(1, niter):
#
# if it % iter_per_epoch == 0:
# time_now = datetime.datetime.now()
# delta_time = (time_now - time_last).seconds
# time_last = time_now
#
# epoch = it / iter_per_epoch
# correct = 0
#
# for test_it in range(100):
# solver.test_nets[0].forward()
# correct += sum(solver.test_nets[0].blobs['InnerProduct1'].data.argmax(1)
# == solver.test_nets[0].blobs['Data2'].data)
# test_error[epoch] = 1 - correct / 1e4
# train_loss[epoch] = loss / iter_per_epoch
# loss = 0
# print('%d\t\t%d\t\t%0.2f\t\t%0.5f\t\t%ds'% (it, epoch, test_error[epoch]*100, train_loss[epoch], delta_time))
# np.savetxt('examples/resnet_cifar/results/%s_%d_%d_%d_%d_%.2f_%d_%.1f' % (date, niter, stages[1], stages[2], stages[3], lr, niter, deathRate),
# np.column_stack((test_error, train_loss)))
#
# sample_gates()
# solver.step(1)
# loss += solver.net.blobs['SoftmaxWithLoss1'].data
class RandAdd(caffe.Layer):
def setup(self, bottom, top):
assert len(bottom) == 2
self.train = False
self.gate = False
self.deathRate = 0
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
#bottom[0] is skip connection
if self.train:
if self.gate:
top[0].data[...] = bottom[0].data + bottom[1].data
else:
top[0].data[...] = bottom[0].data
else:
top[0].data[...] = bottom[0].data + bottom[1].data * (1- self.deathRate)
# print('test')
def backward(self, top, propagate_down, bottom):
if self.train:
if self.gate:
bottom[0].diff[...] = top[0].diff
bottom[1].diff[...] = top[0].diff
else:
bottom[0].diff[...] = top[0].diff
bottom[1].diff[...] = np.zeros(bottom[0].diff.shape)
else:
print("No backward during testing!")
|
import logging
import math
import cmath
import os
from functools import reduce
from six import string_types
import numpy as np
# Ditto imports
from ditto.readers.abstract_reader import AbstractReader
from ditto.store import Store
from ditto.models.position import Position
from ditto.models.node import Node
from ditto.models.line import Line
from ditto.models.load import Load
from ditto.models.phase_load import PhaseLoad
from ditto.models.regulator import Regulator
from ditto.models.wire import Wire
from ditto.models.capacitor import Capacitor
from ditto.models.phase_capacitor import PhaseCapacitor
from ditto.models.powertransformer import PowerTransformer
from ditto.models.power_source import PowerSource
from ditto.models.winding import Winding
from ditto.models.phase_winding import PhaseWinding
from ditto.models.feeder_metadata import Feeder_metadata
from ditto.models.photovoltaic import Photovoltaic
from ditto.models.storage import Storage
from ditto.models.phase_storage import PhaseStorage
from ditto.models.base import Unicode
from ditto.modify.system_structure import system_structure_modifier
logger = logging.getLogger(__name__)
class Reader(AbstractReader):
"""
CYME-->DiTTo Reader class
Author: Nicolas Gensollen. October 2017
.. note::
Different versions of CYME might have different header names for the same object.
The reader class has a mapping between the objects and the header names with the default mapping being for CYME version XXX (see table below).
When using another version of CYME, make sure to modify this mapping to have something consistent:
>>> my_reader.update_header_mapping(modifications)
Here, modification is a dictionary {object: header} of updates to apply to the default mapping.
**Default header mapping:**
+-------------------------------------------+--------------------------------------------+
| Object | Header |
+===========================================+============================================+
| NODE PARSER |
+-------------------------------------------+--------------------------------------------+
| 'node' | '[NODE]' |
+-------------------------------------------+--------------------------------------------+
| LINE PARSER |
+-------------------------------------------+--------------------------------------------+
| 'overhead_unbalanced_line_settings' | '[OVERHEADLINEUNBALANCED SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'overhead_line_settings' | '[OVERHEADLINE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'overhead_byphase_settings' | '[OVERHEAD BYPHASE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'underground_line_settings' | '[UNDERGROUNDLINE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'switch_settings' | '[SWITCH SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'fuse_settings' | '[FUSE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'recloser_settings' | '[RECLOSER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'section' | '[SECTION]' |
+-------------------------------------------+--------------------------------------------+
| 'line' | '[LINE]' |
+-------------------------------------------+--------------------------------------------+
| 'unbalanced_line' | '[LINE UNBALANCED]' |
+-------------------------------------------+--------------------------------------------+
| 'spacing_table' | '[SPACING TABLE FOR LINE]' |
+-------------------------------------------+--------------------------------------------+
| 'concentric_neutral_cable' | '[CONCENTRIC NEUTRAL CABLE]' |
+-------------------------------------------+--------------------------------------------+
| 'conductor' | '[CONDUCTOR]' |
+-------------------------------------------+--------------------------------------------+
| CAPACITOR PARSER |
+-------------------------------------------+--------------------------------------------+
| 'serie_capacitor_settings' | '[SERIE CAPACITOR SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'shunt_capacitor_settings' | '[SHUNT CAPACITOR SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'serie_capacitor' | '[SERIE CAPACITOR]' |
+-------------------------------------------+--------------------------------------------+
| 'shunt_capacitor' | '[SHUNT CAPACITOR]' |
+-------------------------------------------+--------------------------------------------+
| TRANSFORMER PARSER |
+-------------------------------------------+--------------------------------------------+
| 'auto_transformer_settings' | '[AUTO TRANSFORMER SETTING' |
+-------------------------------------------+--------------------------------------------+
| 'grounding_transformer_settings' | '[GROUNDINGTRANSFORMER SETTINGS]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_auto_transformer_settings' | '[THREE WINDING AUTO TRANSFORMER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_transformer_settings' | '[THREE WINDING TRANSFORMER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'transformer_settings' | '[TRANSFORMER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'auto_transformer' | '[AUTO TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'grounding_transformer' | '[GROUNDING TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_auto_transformer' | '[THREE WINDING AUTO TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_transformer' | '[THREE WINDING TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'transformer' | '[TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'phase_shifter_transformer' | '[PHASE SHIFTER TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| REGULATOR PARSER |
+-------------------------------------------+--------------------------------------------+
| 'regulator_settings' | '[REGULATOR SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'regulator' | '[REGULATOR]' |
+-------------------------------------------+--------------------------------------------+
| LOAD PARSER |
+-------------------------------------------+--------------------------------------------+
| 'customer_loads' | '[CUSTOMER LOADS]' |
+-------------------------------------------+--------------------------------------------+
| 'customer_class' | '[CUSTOMER CLASS]' |
+-------------------------------------------+--------------------------------------------+
| 'loads' | '[LOADS]' |
+-------------------------------------------+--------------------------------------------+
| DISTRIBUTED GENERATION PARSER |
+-------------------------------------------+--------------------------------------------+
| 'converter' | '[CONVERTER]' |
+-------------------------------------------+--------------------------------------------+
| 'converter_control_settings' | '[CONVERTER CONTROL SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'photovoltaic_settings' ' | [PHOTOVOLTAIC SETTINGS]' |
| | [ELECTRONIC CONVERTER GENERATOR SETTING] |
+-------------------------------------------+--------------------------------------------+
| 'long_term_dynamics_curve_ext' | '[LONG TERM DYNAMICS CURVE EXT]' |
+-------------------------------------------+--------------------------------------------+
| 'dggenerationmodel' | '[DGGENERATIONMODEL]' |
+-------------------------------------------+--------------------------------------------+
| 'bess_settings' | '[BESS SETTINGS]' |
+-------------------------------------------+--------------------------------------------+
| 'bess' | '[BESS]' |
+-------------------------------------------+--------------------------------------------+
"""
register_names = ["cyme", "Cyme", "CYME"]
def __init__(self, **kwargs):
"""
CYME-->DiTTo class constructor
"""
# Call super
super(Reader, self).__init__(**kwargs)
# Setting the file names and path
#
# Set the path to the CYME data files
if "data_folder_path" in kwargs:
self.data_folder_path = kwargs["data_folder_path"]
# Default is current directory
else:
self.data_folder_path = "."
# Set the name of the network file
if "network_filename" in kwargs:
self.network_filename = kwargs["network_filename"]
else:
self.network_filename = "network.txt"
# Set the name of the equipment file
if "equipment_filename" in kwargs:
self.equipment_filename = kwargs["equipment_filename"]
else:
self.equipment_filename = "equipment.txt"
# Set the name of the load file
if "load_filename" in kwargs:
self.load_filename = kwargs["load_filename"]
else:
self.load_filename = "load.txt"
# Set the Network Type to be None. This is set in the parse_sections() function
self.network_type = None
# Header_mapping.
#
# Modify this structure if the headers of your CYME version are not the default one.
# Modification done by the 'update_header_mapping' method
#
self.header_mapping = { # NODES
"node": ["[NODE]"],
# LINES
"overhead_unbalanced_line_settings": ["[OVERHEADLINEUNBALANCED SETTING]"],
"overhead_line_settings": ["[OVERHEADLINE SETTING]"],
"overhead_byphase_settings": ["[OVERHEAD BYPHASE SETTING]"],
"underground_line_settings": ["[UNDERGROUNDLINE SETTING]"],
"switch": ["[SWITCH]"],
"switch_settings": ["[SWITCH SETTING]"],
"sectionalizer": ["[SECTIONALIZER]"],
"sectionalizer_settings": ["[SECTIONALIZER SETTING]"],
"fuse": ["[FUSE]"],
"fuse_settings": ["[FUSE SETTING]"],
"recloser": ["[RECLOSER]"],
"recloser_settings": ["[RECLOSER SETTING]"],
"breaker": ["[BREAKER]"],
"breaker_settings": ["[BREAKER SETTING]"],
"section": ["[SECTION]"],
"line": ["[LINE]"],
"unbalanced_line": ["[LINE UNBALANCED]"],
"spacing_table": ["[SPACING TABLE FOR LINE]"],
"conductor": ["[CONDUCTOR]"],
"cable": ["[CABLE]"],
"concentric_neutral_cable": [
"[CABLE CONCENTRIC NEUTRAL]",
"[CONCENTRIC NEUTRAL CABLE]",
],
"network_protector": ["[NETWORKPROTECTOR]"],
"network_protector_settings": ["[NETWORKPROTECTOR SETTING]"],
# CAPACITORS
"serie_capacitor_settings": ["[SERIES CAPACITOR SETTING]"],
"shunt_capacitor_settings": ["[SHUNT CAPACITOR SETTING]"],
"serie_capacitor": ["[SERIES CAPACITOR]"],
"shunt_capacitor": ["[SHUNT CAPACITOR]"],
# TRANSFORMERS
"auto_transformer_settings": ["[AUTO TRANSFORMER SETTING]"],
"grounding_transformer_settings": ["[GROUNDINGTRANSFORMER SETTINGS]"],
"three_winding_auto_transformer_settings": [
"[THREE WINDING AUTO TRANSFORMER SETTING]"
],
"three_winding_transformer_settings": [
"[THREE WINDING TRANSFORMER SETTING]"
],
"transformer_settings": ["[TRANSFORMER SETTING]"],
"phase_shifter_transformer_settings": [
"[PHASE SHIFTER TRANSFORMER SETTING]"
],
"auto_transformer": ["[AUTO TRANSFORMER]"],
"grounding_transformer": ["[GROUNDING TRANSFORMER]"],
"three_winding_auto_transformer": ["[THREE WINDING AUTO TRANSFORMER]"],
"three_winding_transformer": ["[THREE WINDING TRANSFORMER]"],
"transformer": ["[TRANSFORMER]"],
"phase_shifter_transformer": ["[PHASE SHIFTER TRANSFORMER]"],
# REGULATORS
"regulator_settings": ["[REGULATOR SETTING]"],
"regulator": ["[REGULATOR]"],
# LOADS
"customer_loads": ["[CUSTOMER LOADS]"],
"customer_class": ["[CUSTOMER CLASS]"],
"loads": ["[LOADS]"],
"source": ["[SOURCE]"],
"headnodes": ["[HEADNODES]"],
"source_equivalent": ["[SOURCE EQUIVALENT]"],
# DISTRIBUTED GENERATION
"converter": ["[CONVERTER]"],
"converter_control_settings": ["[CONVERTER CONTROL SETTING]"],
"photovoltaic_settings": [
"[PHOTOVOLTAIC SETTINGS]",
"[ELECTRONIC CONVERTER GENERATOR SETTING]",
],
"long_term_dynamics_curve_ext": ["[LONG TERM DYNAMICS CURVE EXT]"],
"dggenerationmodel": ["[DGGENERATIONMODEL]"],
"bess_settings": ["[BESS SETTINGS]"],
"bess": ["[BESS]"],
# SUBSTATIONS
"substation": ["[SUBSTATION]"],
"subnetwork_connections": ["[SUBNETWORK CONNECTIONS]"],
}
def update_header_mapping(self, update):
"""
This method changes the default object<->header mapping.
This can be useful when using a different version of CYME for example.
**Usage:**
>>> my_reader.update_header_mapping(modifications)
:param update: New object<->header mapping
:type update: dict
"""
# Check that the update is a Python dict
if not isinstance(update, dict):
raise ValueError(
"update_header_mapping expects a dictionary. A {type} instance was provided".format(
type(update)
)
)
# Instanciate new header mapping
new_mapping = {k: [] for k in self.header_mapping.keys()}
# Loop over the default header mapping and update as requested
for key, value in self.header_mapping.items():
if key in update and update[key] not in value:
new_mapping[key].append(update[key])
else:
new_mapping[key].append(value)
# Basic safety check
if len(new_mapping) != len(self.header_mapping):
raise ValueError("Error in the update header mapping process.")
# Replace the old mapping by the new one
self.header_mapping = new_mapping
def get_file_content(self, filename):
"""
Open the requested file and returns the content.
For convinience, filename can be either the full file path or:
-'network': Will get the content of the network file given in the constructor
-'equipment': Will get the content of the equipment file given in the constructor
-'load': Will get the content of the load file given in the constructor
"""
# Shortcut mapping
if filename == "network":
filename = os.path.join(self.data_folder_path, self.network_filename)
elif filename == "equipment":
filename = os.path.join(self.data_folder_path, self.equipment_filename)
elif filename == "load":
filename = os.path.join(self.data_folder_path, self.load_filename)
# Open the file and get the content
try:
with open(filename, "r") as f:
content_ = f.readlines()
except:
logger.warning("Unable to open file {name}".format(name=filename))
content_ = []
pass
self.content = iter(content_)
def phase_mapping(self, CYME_value):
"""
Maps the CYME phase value format to a list of ABC phases:
+------------+--------------+
| CYME value | Return value |
+============+==============+
| 0 | [None] |
+------------+--------------+
| 1 | ['A'] |
+------------+--------------+
| 2 | ['B'] |
+------------+--------------+
| 3 | ['C'] |
+------------+--------------+
| 4 | ['A','B'] |
+------------+--------------+
| 5 | ['A','C'] |
+------------+--------------+
| 6 | ['B','C'] |
+------------+--------------+
| 7 | ['A','B','C']|
+------------+--------------+
.. note::
If the value provided is not an integer in [0,7], the function assumes that it receives a string like 'ABC'. In this case, it splits the string in a list of phases ['A','B','C'].
"""
if CYME_value == 0:
return [None]
elif CYME_value == 1:
return ["A"]
elif CYME_value == 2:
return ["B"]
elif CYME_value == 3:
return ["C"]
elif CYME_value == 4:
return ["A", "B"]
elif CYME_value == 5:
return ["A", "C"]
elif CYME_value == 6:
return ["B", "C"]
elif CYME_value == 7:
return ["A", "B", "C"]
else:
return list(CYME_value)
def phase_to_num(self, phase):
"""
Maps phase in 'A', 'B', 'C' format in 1, 2, 3 format.
**Mapping:**
+--------+-------+
| letter | digit |
+========+=======+
| 'A' | 1 |
+--------+-------+
| 'B' | 2 |
+--------+-------+
| 'C' | 3 |
+--------+-------+
"""
if phase == "A" or phase == "a":
return "1"
elif phase == "B" or phase == "b":
return "2"
elif phase == "C" or phase == "c":
return "3"
else:
return phase
def load_value_type_mapping(self, load_type, value1, value2):
"""
CYME customer loads provide two values v1 and v2 as well as a load value type:
This function takes these as inputs and outputs P and Q of the load.
:param load_type: CYME load type
:type load_type: int or str (see table below)
:param value1: Value 1
:type value1: float
:param value2: Value 2
:type value2: float
:returns: P and Q
:rtype: KW and KVAR
**Mapping:**
+-----------+------------+-----------------+------------------------------------------+
| type code | type value | P | Q |
+===========+============+=================+==========================================+
| 0 | KW_KVAR | :math:`v_1` | :math:`v_2` |
+-----------+------------+-----------------+------------------------------------------+
| 1 | KVA_PF | :math:`v_1 v_2` | :math:`v_1 \\sqrt{1-v_2^2}` |
+-----------+------------+-----------------+------------------------------------------+
| 2 | KW_PF | :math:`v_1` | :math:`\\frac{v_1}{v_2} \\sqrt{1-v_2^2}` |
+-----------+------------+-----------------+------------------------------------------+
| 3 | AMP_PF | ?? | ?? |
+-----------+------------+-----------------+------------------------------------------+
"""
if not isinstance(value1, float):
try:
value1 = float(value1)
except:
raise ValueError(
"Value1={} could not be converted to float in load_value_type_mapping.".format(
value1
)
)
if not isinstance(value2, float):
try:
value2 = float(value2)
except:
raise ValueError(
"Value2={} could not be converted to float in load_value_type_mapping.".format(
value2
)
)
if isinstance(load_type, string_types):
if load_type == "0" or load_type.lower() == "kw_kvar":
return value1, value2
if load_type == "1" or load_type.lower() == "kva_pf":
return value1 * value2, value1 * np.sqrt(1 - value2 ** 2)
if load_type == "2" or load_type.lower() == "kw_pf":
return value1, value1 / value2 * np.sqrt(1 - value2 ** 2)
if load_type == "3" or load_type.lower() == "amp_pf":
raise NotImplementedError("AMP_PF load type not implemented yet.")
elif isinstance(load_type, int):
if load_type == 0:
return value1, value2
if load_type == 1:
return value1 * value2, value1 * np.sqrt(1 - value2 ** 2)
if load_type == 2:
return value1, value1 / value2 * np.sqrt(1 - value2 ** 2)
if load_type == 3:
raise NotImplementedError("AMP_PF load type not implemented yet.")
else:
raise ValueError(
"load_value_type_mapping expects a string or integer for load_type. {} was provided.".format(
type(load_type)
)
)
def capacitors_connection_mapping(self, conn):
"""
Maps the capacitors connection in CYME (CAP_CONN) to DiTTo connection_type.
:param conn: Connection in CYME
:type conn: integer or string
:returns: Connection in DiTTo
:rtype: str
**Mapping:**
+---------------+-----------------------+
| CYME CAP_CONN | DiTTo connection_type |
+===============+=======================+
| 0 or 'Y' | 'Y' |
+---------------+-----------------------+
| 1 or 'YNG' | 'Y' |
+---------------+-----------------------+
| 2 or 'D' | 'D' |
+---------------+-----------------------+
"""
if not isinstance(conn, (string_types, int)):
raise ValueError(
"capacitors_connection_mapping only accepts int or string. {} was provided.".format(
type(conn)
)
)
if conn == 0 or conn == "0" or conn == "Y":
return "Y"
elif conn == 1 or conn == "1" or conn == "YNG":
return "Y"
elif conn == 2 or conn == "2" or conn == "D":
return "D"
else:
return conn
def connection_configuration_mapping(self, value):
"""
Map the connection configuration from CYME to DiTTo.
**Mapping:**
+----------+----------------+------------+
| Value | CYME | DiTTo |
+==========+================+============+
| 0 or '0' | 'Yg' | 'Y' |
+----------+----------------+------------+
| 1 or '1' | 'Y' | 'Y' |
+----------+----------------+------------+
| 2 or '2' | 'Delta' | 'D' |
+----------+----------------+------------+
| 3 or '3' | 'Open Delta' | 'D' |
+----------+----------------+------------+
| 4 or '4' | 'Closed Delta' | 'D' |
+----------+----------------+------------+
| 5 or '5' | 'Zg' | 'Z' |
+----------+----------------+------------+
| 6 or '6' | 'CT' | NOT MAPPED |
+----------+----------------+------------+
| 7 or '7' | 'Dg' | NOT MAPPED |
+----------+----------------+------------+
"""
if isinstance(value, int):
if value in [0, 1]:
return "Y"
if value in [2, 3, 4]:
return "D"
if value == 5:
return "Z"
if value in [6, 7]:
raise NotImplementedError(
"Connection {} not implemented.".format(value)
)
elif isinstance(value, string_types):
if (
value == "0"
or value.lower() == "yg"
or value == "1"
or value.lower() == "y"
):
return "Y"
if (
value == "2"
or value.lower() == "delta"
or value == "3"
or value.lower() == "open delta"
or value == "4"
or value.lower() == "closed delta"
):
return "D"
if value == "5" or value.lower() == "zg":
return "Z"
if (
value == "6"
or value.lower() == "ct"
or value == "7"
or value.lower() == "dg"
):
raise NotImplementedError(
"Connection {} not implemented.".format(value)
)
else:
raise ValueError(
"connection_configuration_mapping expects an integer or a string. {} was provided.".format(
type(value)
)
)
def transformer_connection_configuration_mapping(self, value, winding):
"""
Map the connection configuration for transformer (2 windings) objects from CYME to DiTTo.
:param value: CYME value (either string or id)
:type value: int or str
:param winding: Number of the winding (0 or 1)
:type winding: int
:returns: DiTTo connection configuration for the requested winding
:rtype: str
**Mapping:**
+----------+----------------+------------+
| Value | CYME | DiTTo |
+----------+----------------+-----+------+
| | | 1st | 2nd |
+==========+================+=====+======+
| 0 or '0' | 'Y_Y' | 'Y' | 'Y' |
+----------+----------------+-----+------+
| 1 or '1' | 'D_Y' | 'D' | 'Y' |
+----------+----------------+-----+------+
| 2 or '2' | 'Y_D' | 'Y' | 'D' |
+----------+----------------+-----+------+
| 3 or '3' | 'YNG_YNG' | 'Y' | 'Y' |
+----------+----------------+-----+------+
| 4 or '4' | 'D_D' | 'D' | 'D' |
+----------+----------------+-----+------+
| 5 or '5' | 'DO_DO' | 'D' | 'D' |
+----------+----------------+-----+------+
| 6 or '6' | 'YO_DO' | 'Y' | 'D' |
+----------+----------------+-----+------+
| 7 or '7' | 'D_YNG' | 'D' | 'Y' |
+----------+----------------+-----+------+
| 8 or '8' | 'YNG_D' | 'Y' | 'D' |
+----------+----------------+-----+------+
| 9 or '9' | 'Y_YNG' | 'Y' | 'Y' |
+----------+----------------+-----+------+
|10 or '10'| 'YNG_Y' | 'Y' | 'Y' |
+----------+----------------+-----+------+
|11 or '11'| 'Yg_Zg' | 'Y' | 'Z' |
+----------+----------------+-----+------+
|12 or '12'| 'D_Zg' | 'D' | 'Z' |
+----------+----------------+-----+------+
"""
if winding not in [0, 1]:
raise ValueError(
"transformer_connection_configuration_mapping expects an integer 0 or 1 for winding arg. {} was provided.".format(
winding
)
)
res = (None, None)
if isinstance(value, int):
if value == 0 or value == 3 or value == 9 or value == 10:
res = ("Y", "Y")
if value == 1 or value == 7:
res = ("D", "Y")
if value == 2 or value == 6 or value == 8:
res = ("Y", "D")
if value == 4 or value == 5:
res = ("D", "D")
if value == 11:
res = ("Y", "Z")
if value == 12:
res = ("D", "Z")
elif isinstance(value, string_types):
if value == "0" or value.lower() == "y_y":
res = ("Y", "Y")
if value == "1" or value.lower() == "d_y":
res = ("D", "Y")
if value == "2" or value.lower() == "y_d":
res = ("Y", "D")
if value == "3" or value.lower() == "yng_yng":
res = ("Y", "Y")
if value == "4" or value.lower() == "d_d":
res = ("D", "D")
if value == "5" or value.lower() == "do_do":
res = ("D", "D")
if value == "6" or value.lower() == "yo_do":
res = ("Y", "D")
if value == "7" or value.lower() == "d_yng":
res = ("D", "Y")
if value == "8" or value.lower() == "yng_d":
res = ("Y", "D")
if value == "9" or value.lower() == "y_yng":
res = ("Y", "Y")
if value == "10" or value.lower() == "yng_y":
res = ("Y", "Y")
if value == "11" or value.lower() == "yg_zg":
res = ("Y", "Z")
if value == "12" or value.lower() == "d_zg":
res = ("D", "Z")
else:
raise ValueError(
"transformer_connection_configuration_mapping expects an integer or a string. {} was provided.".format(
type(value)
)
)
return res[winding]
def check_object_in_line(self, line, obj):
"""
Check if the header corresponding to object is in the given line.
:param line: Text line from CYME ASCII file
:type line: str
:param obj: Object of interest that exists in the mapping
:type obj: str
:returns: True if the header is in line. False otherwise.
:rtype: bool
"""
# Safety checks
if not isinstance(line, string_types):
raise ValueError(
"check_object_in_line expects a string for both line and object. A {type} instance was provided for line.".format(
type=type(line)
)
)
if not isinstance(obj, string_types):
raise ValueError(
"check_object_in_line expects a string for both line and object. A {type} instance was provided for object.".format(
type=type(obj)
)
)
if not obj in self.header_mapping:
raise ValueError(
"{obj} is not a valid object name for the object<->header mapping.{mapp}".format(
obj=obj, mapp=self.header_mapping
)
)
return np.any([x in line for x in self.header_mapping[obj]])
def parser_helper(self, line, obj_list, attribute_list, mapping, *args):
"""
.. warning:: This is a helper function for the parsers. Do not use directly.
Takes as input the list of objects we want to parse as well as the list of attributes we want to extract.
Also takes the default positions of the attributes (mapping).
The function returns a list of dictionaries, where each dictionary contains the values of the desired attributes of a CYME object.
"""
if isinstance(attribute_list, list):
attribute_list = np.array(attribute_list)
if not isinstance(attribute_list, np.ndarray):
raise ValueError("Could not cast attribute list to Numpy array.")
if args and isinstance(args[0], dict):
additional_information = args[0]
else:
additional_information = {}
result = {}
# Check the presence of headers in the given line
checks = [self.check_object_in_line(line, obj) for obj in obj_list]
# If we have a least one
if any(checks):
# Get the next line
next_line = next(self.content)
# If the next line provides the format, then grab it
if "format" in next_line.lower():
try:
mapping = {}
arg_list = next_line.split("=")[1]
arg_list = arg_list.split(",")
# Put everything in lower case
arg_list = map(lambda x: x.lower().strip("\r\n"), arg_list)
arg_list = map(lambda x: x.strip("\n"), arg_list)
arg_list = map(lambda x: x.strip("\r"), arg_list)
# We want the attributes in the attribute list
for idx, arg in enumerate(arg_list):
temp = np.argwhere(arg == attribute_list).flatten()
if len(temp) == 1:
idx2 = temp[0]
mapping[attribute_list[idx2]] = idx
except:
pass
next_line = next(self.content)
# At this point, we should have the mapping for the parameters of interest
# while next_line[0] not in ['[','',' ','\n','\r\n']:
while len(next_line) > 2:
if "=" not in next_line.lower():
data = next_line.split(",")
ID = data[0].strip()
if len(data) > 1:
while ID in result:
ID += "*"
result[ID] = {}
for k in attribute_list:
try:
result[ID][k] = data[mapping[k]]
except:
pass
result[ID].update(additional_information)
try:
next_line = next(self.content)
except StopIteration:
break
return result
def parse(self, model, **kwargs):
"""
Parse the CYME model to DiTTo.
:param model: DiTTo model
:type model: DiTTo model
:param verbose: Set the verbose mode. Optional. Default=True
:type verbose: bool
"""
if "verbose" in kwargs and isinstance(kwargs["verbose"], bool):
self.verbose = kwargs["verbose"]
else:
self.verbose = False
if self.verbose:
logger.info("Parsing the header...")
self.parse_header()
logger.info("Parsing the sections...")
self.parse_sections(model)
logger.info("Parsing the sources...")
self.parse_sources(model)
# Call parse method of abtract reader
super(Reader, self).parse(model, **kwargs)
# The variable self.network_type is set in the parse_sections() function.
# i.e. parse_sections
if self.network_type == "substation":
logger.info("Parsing the subnetwork connections...")
self.parse_subnetwork_connections(model)
else:
logger.info("Parsing the Headnodes...")
self.parse_head_nodes(model)
model.set_names()
modifier = system_structure_modifier(model)
modifier.set_nominal_voltages_recur()
modifier.set_nominal_voltages_recur_line()
def parse_header(self):
"""
Parse the information available in the header.
Here, we are interested in the version of CYME used in the provided files, as well as the unit system used.
Since the reader was developed using the documentation for CYME v.8.0, give a warning if the version if different.
The user is then responsible to check the differences betweeen the two versions.
"""
cyme_version = None
self.use_SI = None
# Open any file. For example the network file
self.get_file_content("network")
for line in self.content:
if "cyme_version" in line.lower():
try:
cyme_version = line.split("=")[1].strip()
except:
pass
if cyme_version is not None:
logger.info("---| Cyme_version={v} |---".format(v=cyme_version))
if "." in cyme_version:
try:
a, b = cyme_version.split(".")
except:
pass
if a != 8 and b != 0:
logger.warning(
"Warning. The current CYME--->DiTTo reader was developed with documentation of CYME 8.0. Your version is {}. You might want to check the differences between the two.".format(
cyme_version
)
)
if "[si]" in line.lower():
self.use_SI = True
logger.debug("Unit system used = S.I")
if "[imperial]" in line.lower():
self.use_SI = False
logger.debug("Unit system used = Imperial")
self.cyme_version = cyme_version
if self.use_SI is None:
raise ValueError(
"Could not find [SI] or [IMPERIAL] unit system information. Unable to parse."
)
def parse_subnetwork_connections(self, model):
"""Parse the subnetwork connections.
These specify the interconnection points for a substation
"""
model.set_names()
self.get_file_content("network")
mapp_subnetwork_connections = {"nodeid": 1}
self.subnetwork_connections = {}
for line in self.content:
self.subnetwork_connections.update(
self.parser_helper(
line,
["subnetwork_connections"],
["nodeid"],
mapp_subnetwork_connections,
)
)
for key in self.subnetwork_connections:
model[
self.subnetwork_connections[key]["nodeid"]
].is_substation_connection = 1
def parse_head_nodes(self, model):
""" This parses the [HEADNODES] objects and is used to build Feeder_metadata DiTTo objects which define the feeder names and feeder headnodes"""
# Open the network file
self.get_file_content("network")
mapp = {
"nodeid": 0,
"networkid": 1,
} # These correspond to the head node name and the feeder name
headnodes = {}
for line in self.content:
headnodes.update(
self.parser_helper(line, ["headnodes"], ["nodeid", "networkid"], mapp)
)
for sid, headnode in headnodes.items():
feeder_metadata = Feeder_metadata(model)
feeder_metadata.name = headnode["networkid"].strip().lower()
feeder_metadata.headnode = headnode["nodeid"].strip().lower()
def parse_sources(self, model):
"""Parse the sources."""
# Open the network file
self.get_file_content("network")
mapp = {"sourceid": 0, "nodeid": 2, "networkid": 3, "desiredvoltage": 4}
mapp_source_equivalent = {
"nodeid": 0,
"voltage": 1,
"operatingangle1": 2,
"operatingangle2": 3,
"operatingangle3": 4,
"positivesequenceresistance": 5,
"positivesequencereactance": 6,
"zerosequenceresistance": 7,
"zerosequencereactance": 8,
"configuration": 9,
}
mapp_sub = {"id": 0, "mva": 1, "kvll": 6, "conn": 14}
sources = {}
subs = {}
source_equivalents = {}
for line in self.content:
sources.update(
self.parser_helper(
line,
["source"],
["sourceid", "nodeid", "networkid", "desiredvoltage"],
mapp,
)
)
source_equivalents.update(
self.parser_helper(
line,
["source_equivalent"],
[
"nodeid",
"voltage",
"operatingangle1",
"operatingangle2",
"operatingangle3",
"positivesequenceresistance",
"positivesequencereactance",
"zerosequencereactance",
"zerosequenceresistance",
"configuration",
"basemva",
"loadmodelname",
],
mapp_source_equivalent,
)
)
self.get_file_content("equipment")
for line in self.content:
subs.update(
self.parser_helper(
line, ["substation"], ["id", "mva", "kvll", "conn"], mapp_sub
)
)
if len(sources.items()) == 0:
for sid, source_equivalent_data in source_equivalents.items():
if source_equivalent_data["loadmodelname"].lower() != "default":
continue # Want to only use the default source equivalent configuration
for k, v in self.section_phase_mapping.items():
if v["fromnodeid"] == source_equivalent_data["nodeid"]:
sectionID = k
_from = v["fromnodeid"]
_to = v["tonodeid"]
phases = list(v["phase"])
if (
v["tonodeid"] == source_equivalent_data["nodeid"]
): # In case the edge is connected backwards
sectionID = k
_from = v["tonodeid"]
_to = v["fromnodeid"]
phases = list(v["phase"])
try:
api_source = PowerSource(model)
except:
pass
api_source.name = _from + "_src"
try:
api_source.nominal_voltage = (
float(source_equivalent_data["voltage"]) * 10 ** 3
)
except:
pass
try:
api_source.phases = phases
except:
pass
api_source.is_sourcebus = 1
try:
api_source.rated_power = 10 ** 3 * float(
source_equivalent_data["mva"]
) # Modified from source cases where substations can be used.
except:
pass
# TODO: connection_type
try:
api_source.phase_angle = source_equivalent_data["operatingangle1"]
except:
pass
# try:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["positivesequenceresistance"]),
float(source_equivalent_data["positivesequencereactance"]),
)
# except:
# pass
try:
api_source.zero_sequence_impedance = complex(
source_equivalent_data["zerosequenceresistance"],
source_equivalent_data["zerosequencereactance"],
)
except:
pass
try:
api_source.connecting_element = _from
except:
pass
else:
for sid, sdata in sources.items():
source_equivalent_data = None
if "nodeid" in sdata and sdata["nodeid"] in source_equivalents:
source_equivalent_data = source_equivalents[sdata["nodeid"]]
if sid in subs:
# Find the section
for k, v in self.section_phase_mapping.items():
if v["fromnodeid"] == sdata["nodeid"]:
sectionID = k
_from = v["fromnodeid"]
_to = v["tonodeid"]
phases = list(v["phase"])
try:
api_source = PowerSource(model)
except:
pass
api_source.name = _from + "_src"
try:
if "desiredvoltage" in sdata:
api_source.nominal_voltage = (
float(sdata["desiredvoltage"]) * 10 ** 3
)
else:
api_source.nominal_voltage = (
float(source_equivalent_data["voltage"]) * 10 ** 3
)
except:
pass
try:
api_source.phases = phases
except:
pass
api_source.is_sourcebus = 1
try:
api_source.rated_power = 10 ** 3 * float(subs[sid]["mva"])
except:
pass
# TODO: connection_type
try:
api_source.phase_angle = source_equivalent_data[
"operatingangle1"
]
except:
pass
# try:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["positivesequenceresistance"]),
float(source_equivalent_data["positivesequencereactance"]),
)
# except:
# pass
try:
api_source.zero_sequence_impedance = complex(
source_equivalent_data["zerosequenceresistance"],
source_equivalent_data["zerosequencereactance"],
)
except:
pass
try:
api_source.connecting_element = _from
except:
pass
# try:
# api_transformer=PowerTransformer(model)
# except:
# pass
# try:
# api_transformer.is_substation=1
# except:
# pass
# try:
# api_transformer.name=sid
# except:
# pass
# try:
# api_transformer.rated_power=10**3*float(subs[sid]['mva'])
# except:
# pass
# try:
# api_transformer.from_element=_from
# except:
# pass
# try:
# api_transformer.to_element=_to
# except:
# pass
# for w in range(2):
# try:
# api_winding=Winding(model)
# except:
# pass
# try:
# api_winding.connection_type=self.transformer_connection_configuration_mapping(subs[sid]['conn'])
# except:
# pass
# try:
# api_winding.nominal_voltage=10**3*float(subs[sid]['kvll'])
# except:
# pass
# try:
# api_winding.rated_power=10**6*float(subs[sid]['mva'])
# except:
# pass
# for p in phases:
# try:
# api_phase_winding=PhaseWinding(model)
# except:
# pass
# try:
# api_phase_winding.phase=self.phase_mapping(p)
# except:
# pass
# api_winding.phase_windings.append(api_phase_winding)
# api_transformer.windings.append(api_winding)
def parse_nodes(self, model):
"""
Parse the nodes from CYME to DiTTo.
:param model: DiTTo model
:type model: DiTTo model
"""
self._nodes = []
# Open the network file
self.get_file_content("network")
# Default mapp (positions if all fields are present in the format)
mapp = {"nodeid": 0, "ratedvoltage": 48, "coordx": 2, "coordy": 3}
nodes = {}
for line in self.content:
nodes.update(
self.parser_helper(
line, ["node"], ["nodeid", "coordx", "coordy", "ratedvoltage"], mapp
)
)
for ID, node in nodes.items():
# Create a new DiTTo node object
try:
api_node = Node(model)
except:
raise ValueError("Could not instanciate DiTTo Node object.")
# Set the name
try:
api_node.name = ID
except:
pass
# Set the coordinates
try:
position = Position(model)
position.long = float(node["coordx"])
position.lat = float(node["coordy"])
position.elevation = 0
api_node.positions.append(position)
except:
pass
# Set the nominal voltage
try:
api_node.nominal_voltage = float(node["ratedvoltage"])
except:
pass
# Add the node to the list
self._nodes.append(api_node)
return 1
def configure_wire(
self,
model,
conductor_data,
spacing_data,
phase,
is_switch,
is_fuse,
is_open,
is_network_protector,
is_breaker,
is_recloser,
is_sectionalizer,
):
"""Helper function that creates a DiTTo wire object and configures it."""
# Instanciate the wire DiTTo object
api_wire = Wire(model)
# Set the phase of the wire
try:
api_wire.phase = phase
except:
pass
try:
api_wire.nameclass = conductor_data["id"]
except:
pass
# Set the flags
api_wire.is_switch = is_switch
api_wire.is_open = is_open
api_wire.is_fuse = is_fuse
api_wire.is_network_protector = is_network_protector
api_wire.is_breaker = is_breaker
api_wire.is_recloser = is_recloser
api_wire.is_sectionalizer = is_sectionalizer
# Set the diameter of the wire
try:
api_wire.diameter = float(conductor_data["diameter"])
except:
pass
# Set the nameclass
try:
api.wire.nameclass = conductor_data["nameclass"]
except:
pass
# Set the GMR of the wire
try:
api_wire.gmr = float(conductor_data["gmr"])
except:
pass
# Set the ampacity of the wire
try:
api_wire.ampacity = float(conductor_data["amps"])
except:
pass
# Set the interupting current of the wire if it is a network protectors, a fuse, a sectionalizer, a breaker, or a recloser
if (
is_network_protector
or is_fuse
or is_sectionalizer
or is_breaker
or is_recloser
):
try:
api_wire.interrupting_rating = float(
conductor_data["interruptingrating"]
)
except:
pass
# Set the emergency ampacity of the wire
try:
api_wire.emergency_ampacity = float(conductor_data["withstandrating"])
except:
pass
# Set the X spacing
x_map = {
"A": "posofcond1_x",
"B": "posofcond2_x",
"C": "posofcond3_x",
"N": "posofneutralcond_x",
"N2": "posofneutralcond_n2_x",
}
try:
api_wire.X = spacing_data[x_map[phase]]
except:
pass
# Set the Y spacing
y_map = {
"A": "posofcond1_y",
"B": "posofcond2_y",
"C": "posofcond3_y",
"N": "posofneutralcond_y",
"N2": "posofneutralcond_n2_y",
}
try:
api_wire.Y = spacing[y_map[phase]]
except:
pass
return api_wire
def parse_sections(self, model):
"""
This function is responsible for parsing the sections. It is expecting the following structure:
...
[SECTION]
FORMAT_section=sectionid,fromnodeid,tonodeid,phase
FORMAT_Feeder=networkid,headnodeid
Feeder=feeder_1,head_feeder_1
section_1_feeder_1,node_1,node_2,ABC
...
...
Feeder=feeder_2,head_feeder_2
section_1_feeder_2,node_1,node_2,ABC
...
...
**What is done in this function:**
- We need to create a clear and fast mapping between feeders and sectionids
- Same thing, mapping between sectionids and nodes/phases
- Since we will be using these structures a lot in the reader, we need something fast that does not involve looping like crazy
**Data structures:**
1) feeder_section_mapping: dictionary where keys are network_ids and values are lists of section id_s
2) section_feeder_mapping: dictionary where keys are section ids and values are network_ids
(to perform the opposite query as 1) without having to look in every lists of section ids until we find the good one...)
3) section_phase_mapping: dictionary where keys are section ids and values are tuples (node_1, node_2, phase)
.. warning:: This should be called prior to any other parser because the other parsers rely on these 3 data structures.
"""
self.feeder_section_mapping = {}
self.section_feeder_mapping = {}
self.section_phase_mapping = {}
self.network_data = {}
format_section = None
format_feeder = None
_netID = None
job_is_done = False
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
# This will stop reading the file if we have already worked on the sections
if job_is_done:
break
# Find the section section
if "[SECTION]" in line:
job_is_done = True
line = next(self.content)
# Until we meet the next section header, work...
while len(line) > 2 and (
line[0] != "["
or line[0] != " "
or line[0] != "\n"
or line[0] != "\t\n"
):
# First, we grab the format used to define sections
if "format_section" in line.lower():
format_section = list(
map(
lambda x: x.strip(),
map(lambda x: x.lower(), line.split("=")[1].split(",")),
)
)
# Then, we grab the format used to define feeders
elif (
"format_feeder" in line.lower()
or "format_substation" in line.lower()
or "format_generalnetwork" in line.lower()
):
format_feeder = list(
map(
lambda x: x.strip(),
map(lambda x: x.lower(), line.split("=")[1].split(",")),
)
)
# If we have a new feeder declaration
elif len(line) >= 7 and (
line[:7].lower() == "feeder="
or line[:11].lower() == "substation="
or line[:11].lower() == "substation="
or line[:15].lower() == "generalnetwork="
):
if (
line[:7].lower() == "feeder="
or line[:15].lower() == "generalnetwork="
):
self.network_type = "feeder"
if line[:11].lower() == "substation=":
self.network_type = "substation"
# We should have a format for sections and feeders,
# otherwise, raise an error...
if format_section is None:
raise ValueError("No format for sections.")
if format_feeder is None:
raise ValueError("No format for feeders.")
# Get the feeder data (everything after the '=' symbol)
feeder_data = line.split("=")[1].split(",")
# Check that the data obtained have the same length as the format provided
# otherwise, raise an error...
if len(feeder_data) != len(format_feeder):
raise ValueError(
"Feeder/substation data length {a} does not match feeder format length {b}.".format(
a=len(feeder_data), b=len(format_feeder)
)
)
# Check that we have a networkid in the format
# otherwise, raise an error...
if "networkid" not in format_feeder:
raise ValueError(
"Cannot find the networkid in format: "
+ str(format_feeder)
)
# Check that we have a sectionid in the format
# otherwise, raise an error...
if "sectionid" not in format_section:
raise ValueError(
"Cannot find the sectionid in format: "
+ str(format_section)
)
# We should be able to get the networkid from the feeder data.
_netID = feeder_data[format_feeder.index("networkid")].lower()
# First, we store all the feeder data in the network_data structure
self.network_data[_netID] = {}
for key, value in zip(format_feeder, feeder_data):
self.network_data[_netID][key] = value
# Then, we create a new entry in feeder_section_mapping
self.feeder_section_mapping[_netID] = []
# Otherwise, we should have a new section...
else:
# If we have no networkid at this point, raise an error
# Note: If CYME allows sections to be define without
# a network, remove this safety check
#
if _netID is None:
raise ValueError(
"No network ID available when reading line \n" + line
)
# Extract the data for this section
section_data = list(map(lambda x: x.strip(), line.split(",")))
# Check length coherence...
if len(section_data) != len(format_section):
raise ValueError(
"Section data length {a} does not match section format length {b}.".format(
a=len(section_data), b=len(format_section)
)
)
# Grab the sectionid
_sectionID = section_data[
format_section.index("sectionid")
].lower()
# Create a new entry in section_phase_mapping
self.section_phase_mapping[_sectionID] = {}
# Populate this new entry
for key, value in zip(format_section, section_data):
self.section_phase_mapping[_sectionID][key] = value
# And finally, add a new entry to section_feeder_mapping
self.section_feeder_mapping[_sectionID] = _netID
# Finally, move on to next line
line = next(self.content)
def parse_lines(self, model):
"""
Parse the lines from CYME to DiTTo.
:param model: DiTTo model
:type model: DiTTo model
"""
# Default mapp (positions if all fields are present in the format)
# These numbers come from the CYME documentation (position of the fields)
mapp_overhead = {
"sectionid": 0,
"linecableid": 5,
"length": 6,
"coordx": 8,
"coordy": 9,
}
mapp_overhead_byphase = {
"sectionid": 0,
"devicenumber": 1,
"condid_a": 5,
"condid_b": 6,
"condid_c": 7,
"condid_n1": 8,
"condid_n2": 9,
"spacingid": 10,
"length": 11,
"coordx": 14,
"coordy": 15,
}
mapp_underground = {
"sectionid": 0,
"linecableid": 5,
"length": 6,
"amps": 8,
"coordx": 14,
"coordy": 15,
}
mapp_switch = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"closedphase": 9,
}
mapp_sectionalizer = {"sectionid": 0, "eqid": 2, "coordx": 7, "coordy": 8}
mapp_line = {
"id": 0,
"phasecondid": 1,
"neutralcondid": 2,
"spacingid": 3,
"amps": 11,
"r1": 5,
"r0": 6,
"x1": 7,
"x0": 8,
"b1": 9,
"b0": 10,
}
mapp_section = {"sectionid": 0, "fromnodeid": 1, "tonodeid": 2, "phase": 3}
mapp_line_unbalanced = {
"id": 0,
"condid_a": 1,
"condid_b": 2,
"condid_c": 3,
"condid_n1": 4,
"condid_n2": 5,
"spacingid": 6,
"ra": 8,
"rb": 9,
"rc": 10,
"xa": 11,
"xb": 12,
"xc": 13,
"ba": 14,
"bb": 15,
"bc": 16,
"mutualresistanceab": 36,
"mutualresistancebc": 37,
"mutualresistanceca": 38,
"mutualreactanceab": 39,
"mutualreactancebc": 40,
"mutualreactanceca": 41,
}
mapp_spacing = {
"id": 0,
"posofcond1_x": 5,
"posofcond1_y": 6,
"posofcond2_x": 7,
"posofcond2_y": 8,
"posofcond3_x": 9,
"posofcond3_y": 10,
"posofneutralcond_x": 11,
"posofneutralcond_y": 12,
"posofneutralcond_n2_x": 13,
"posofneutralcond_n2_y": 14,
}
mapp_conductor = {
"id": 0,
"diameter": 1,
"gmr": 2,
"amps": 5,
"withstandrating": 15,
}
mapp_cable = {"id": 0, "r1": 1, "r0": 2, "x1": 3, "x0": 4, "amps": 7}
mapp_concentric_neutral_cable = {
"id": 0,
"r1": 1,
"r0": 2,
"x1": 3,
"x0": 4,
"amps": 5,
"phasecondid": 19,
"neutralcondid": 20,
}
mapp_network_protectors = {
"id": 0,
"amps": 1,
"kvll": 6,
"interruptingrating": 8,
}
mapp_sectionalizers = {"id": 0, "amps": 1, "kvll": 6, "interruptingrating": 20}
mapp_switch_eq = {"id": 0, "amps": 1, "kvll": 6}
# Instanciate the lists for storing objects
self.overhead_lines = []
self.underground_lines = []
self.sections = []
# self.lines=[]
self.lines_unbalanced = []
# self.spacings=[]
# self.conductors=[]
self.overhead_by_phase = []
self.balanced_lines = {}
self.unbalanced_lines = {}
self.settings = {}
self.spacings = {}
self.conductors = {}
self.concentric_neutral_cable = {}
self.cables = {}
self.network_protectors = {}
self.breakers = {}
self.fuses = {}
self.reclosers = {}
self.sectionalizers = {}
self.switches = {}
# Instanciate the list in which we store the DiTTo line objects
self._lines = []
self.section_phase = {}
mapp_closed_phase = {
0: "none",
1: "A",
2: "B",
3: "C",
4: "AB",
5: "AC",
6: "BC",
7: "ABC",
"0": "none",
"1": "A",
"2": "B",
"3": "C",
"4": "AB",
"5": "AC",
"6": "BC",
"7": "ABC",
"none": "none",
"NONE": "none",
"A": "A",
"B": "B",
"C": "C",
"AB": "AB",
"AC": "AC",
"BC": "BC",
"ABC": "ABC",
}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# OVERHEAD UNBALANCED LINES #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["overhead_unbalanced_line_settings"],
["sectionid", "coordx", "coordy", "linecableid", "length"],
mapp_overhead,
{"type": "overhead_unbalanced"},
),
)
#########################################
# #
# OVERHEAD BALANCED LINES #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["overhead_line_settings"],
["sectionid", "coordx", "coordy", "linecableid", "length"],
mapp_overhead,
{"type": "overhead_balanced"},
),
)
#########################################
# #
# OVERHEAD BY PHASE SETTINGS #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["overhead_byphase_settings"],
[
"sectionid",
"devicenumber",
"condid_a",
"condid_b",
"condid_c",
"condid_n",
"condid_n1",
"condid_n2",
"spacingid",
"length",
"coordx",
"coordy",
],
mapp_overhead_byphase,
{"type": "overhead_unbalanced"},
),
)
#########################################
# #
# UNDERGROUND LINES #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["underground_line_settings"],
["sectionid", "coordx", "coordy", "linecableid", "length", "amps"],
mapp_underground,
{"type": "underground"},
),
)
#########################################
# #
# SWITCH. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["switch_settings"],
["sectionid", "coordx", "coordy", "eqid", "closedphase"],
mapp_switch,
{"type": "switch"},
),
)
#########################################
# #
# SECTIONALIZER. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["sectionalizer_settings"],
["sectionid", "coordx", "coordy", "eqid"],
mapp_sectionalizer,
{"type": "sectionalizer"},
),
)
#########################################
# #
# FUSES. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["fuse_settings"],
["sectionid", "coordx", "coordy", "eqid"],
mapp_switch, # Same as switches
{"type": "fuse"},
),
)
#########################################
# #
# RECLOSERS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["recloser_settings"],
["sectionid", "coordx", "coordy", "eqid"],
mapp_switch, # Same as switches
{"type": "recloser"},
),
)
#########################################
# #
# BREAKER. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["breaker_settings"],
["sectionid", "coordx", "coordy", "eqid", "closedphase"],
mapp_switch, # Same as switches
{"type": "breaker"},
),
)
#########################################
# #
# NETWORK PROTECTORS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["network_protector_settings"],
["sectionid", "coordx", "coordy", "eqid", "closedphase"],
mapp_switch, # Same as switches
{"type": "network_protector"},
),
)
#########################################
# #
# SECTIONS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["section"],
["sectionid", "fromnodeid", "tonodeid", "phase"],
mapp_section,
),
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# LINES. #
# #
#########################################
#
self.balanced_lines.update(
self.parser_helper(
line,
["line"],
[
"id",
"phasecondid",
"neutralcondid",
"spacingid",
"amps",
"r1",
"r0",
"x1",
"x0",
"b1",
"b0",
],
mapp_line,
{"type": "balanced_line"},
)
)
#########################################
# #
# UNBALANCED LINES. #
# #
#########################################
#
self.unbalanced_lines.update(
self.parser_helper(
line,
["unbalanced_line"],
[
"id",
"condid_a",
"condid_b",
"condid_c",
"condid_n",
"condid_n1",
"condid_n2",
"spacingid",
"ra",
"rb",
"rc",
"xa",
"xb",
"xc",
"ba",
"bb",
"bc",
"mutualresistanceab",
"mutualresistancebc",
"mutualresistanceca",
"mutualreactanceab",
"mutualreactancebc",
"mutualreactanceca",
],
mapp_line_unbalanced,
{"type": "unbalanced_line"},
)
)
#########################################
# #
# SPACING TABLE #
# #
#########################################
#
self.spacings.update(
self.parser_helper(
line,
["spacing_table"],
[
"id",
"posofcond1_x",
"posofcond1_y",
"posofcond2_x",
"posofcond2_y",
"posofcond3_x",
"posofcond3_y",
"posofneutralcond_x",
"posofneutralcond_y",
"posofneutralcond_n2_x",
"posofneutralcond_n2_y",
],
mapp_spacing,
)
)
#########################################
# #
# CONDUCTOR #
# #
#########################################
#
self.conductors.update(
self.parser_helper(
line,
["conductor"],
["id", "diameter", "gmr", "r25", "amps", "withstandrating"],
mapp_conductor,
)
)
#########################################
# #
# CONCENTRIC NEUTRAL CABLE #
# #
#########################################
#
self.concentric_neutral_cable.update(
self.parser_helper(
line,
["concentric_neutral_cable"],
[
"id",
"r1",
"r0",
"x1",
"x0",
"amps",
"phasecondid",
"neutralcondid",
],
mapp_concentric_neutral_cable,
)
)
#########################################
# #
# CABLE #
# #
#########################################
#
self.cables.update(
self.parser_helper(
line,
["cable"],
["id", "r1", "r0", "x1", "x0", "amps"],
mapp_concentric_neutral_cable,
)
)
#########################################
# #
# SWITCHES #
# #
#########################################
#
self.switches.update(
self.parser_helper(
line, ["switch"], ["id", "amps", "kvll"], mapp_switch_eq
)
)
#########################################
# #
# FUSES #
# #
#########################################
#
self.fuses.update(
self.parser_helper(
line,
["fuse"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors, # Same as network protectors
)
)
#########################################
# #
# RECLOSERS #
# #
#########################################
#
self.reclosers.update(
self.parser_helper(
line,
["recloser"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors, # Same as network protectors
)
)
#########################################
# #
# SECTIONALIZERS #
# #
#########################################
#
self.sectionalizers.update(
self.parser_helper(
line,
["sectionalizer"],
["id", "amps", "kvll", "interruptingrating"],
mapp_sectionalizers,
)
)
#########################################
# #
# BREAKERS #
# #
#########################################
#
self.breakers.update(
self.parser_helper(
line,
["breaker"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors, # Same as network protectors
)
)
#########################################
# #
# NETWORK PROTECTORS #
# #
#########################################
#
self.network_protectors.update(
self.parser_helper(
line,
["network_protector"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors,
)
)
#####################################################
# #
# JOIN LISTS AND CREATE DITTO OBJECTS #
# #
#####################################################
#
# At this point, we should have all the line data in multiple list of dictionaries.
# We have then to put everything back together using the foreign keys
#
# Loop over the sections
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
# TODO: CLEAN THIS...
if (
"load" in settings["fromnodeid"].lower()
or "load" in settings["tonodeid"].lower()
):
continue
new_line = {}
# Set the name
try:
new_line["name"] = sectionID
except:
pass
# Set the from_element (info is in the section)
try:
new_line["from_element"] = self.section_phase_mapping[sectionID][
"fromnodeid"
]
except:
pass
# Set the to_element (info is in the section)
try:
new_line["to_element"] = self.section_phase_mapping[sectionID][
"tonodeid"
]
except:
pass
try:
phases = list(self.section_phase_mapping[sectionID]["phase"])
except:
pass
# Set the length
try:
new_line["length"] = float(settings["length"])
except:
pass
new_line["feeder_name"] = self.section_feeder_mapping[sectionID]
# Set the position
try:
position = Position(model)
position.long = float(settings["coordx"])
position.lat = float(settings["coordy"])
position.elevation = 0
new_line["position"] = position
except:
pass
# Set the line type
new_line["is_switch"] = 0
new_line["is_fuse"] = 0
new_line["is_recloser"] = 0
new_line["is_breaker"] = 0
new_line["is_sectionalizer"] = 0
new_line["is_network_protector"] = 0
# Set the nameclass of the line as the equipment ID
if "eqid" in settings:
new_line["nameclass"] = settings["eqid"]
if "type" in settings:
# Overhead lines
if "overhead" in settings["type"]:
new_line["line_type"] = "overhead"
# Underground lines
elif "underground" in settings["type"]:
new_line["line_type"] = "underground"
# Switch
elif "switch" in settings["type"]:
new_line["is_switch"] = 1
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC"
) # If no info, then everything is closed by default...
# Get the sectionalizer equipment data
if "eqid" in settings and settings["eqid"] in self.switches:
switch_data = self.switches[settings["eqid"]]
else:
switch_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
switch_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = float(switch_data["kvll"]) * 1000
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
switch_data,
{},
p,
True,
False,
False,
False,
False,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
switch_data,
{},
p,
True,
False,
False,
False,
False,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
switch_data,
{},
p,
True,
False,
True,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
continue
# Sectionalizer
elif "sectionalizer" in settings["type"]:
new_line["is_sectionalizer"] = 1
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC"
) # If no info, then everything is closed by default...
# Get the sectionalizer equipment data
if "eqid" in settings and settings["eqid"] in self.sectionalizers:
sectionalizer_data = self.sectionalizers[settings["eqid"]]
else:
sectionalizer_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
sectionalizer_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = (
float(sectionalizer_data["kvll"]) * 1000
)
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
sectionalizer_data,
{},
p,
False,
False,
False,
False,
False,
False,
True,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
sectionalizer_data,
{},
p,
False,
False,
False,
False,
False,
False,
True,
)
else:
api_wire = self.configure_wire(
model,
sectionalizer_data,
{},
p,
False,
False,
True,
False,
False,
False,
True,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
continue
# Fuse
elif "fuse" in settings["type"]:
new_line["is_fuse"] = 1
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC"
) # If no info, then everything is closed by default...
# Get the fuse equipment data
if "eqid" in settings and settings["eqid"] in self.fuses:
fuse_data = self.fuses[settings["eqid"]]
else:
fuse_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
fuse_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = float(fuse_data["kvll"]) * 1000
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
fuse_data,
{},
p,
False,
True,
False,
False,
False,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
fuse_data,
{},
p,
False,
True,
False,
False,
False,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
fuse_data,
{},
p,
False,
True,
True,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
continue
# recloser
elif "recloser" in settings["type"]:
new_line["is_recloser"] = 1
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC"
) # If no info, then everything is closed by default...
# Get the recloser equipment data
if "eqid" in settings and settings["eqid"] in self.reclosers:
recloser_data = self.reclosers[settings["eqid"]]
else:
recloser_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
recloser_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = (
float(recloser_data["kvll"]) * 1000
)
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
recloser_data,
{},
p,
False,
False,
False,
False,
False,
True,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
recloser_data,
{},
p,
False,
False,
False,
False,
False,
True,
False,
)
else:
api_wire = self.configure_wire(
model,
recloser_data,
{},
p,
False,
False,
True,
False,
False,
True,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
continue
# breaker
elif "breaker" in settings["type"]:
new_line["is_breaker"] = 1
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC"
) # If no info, then everything is closed by default...
# Get the breaker equipment data
if "eqid" in settings and settings["eqid"] in self.breakers:
breaker_data = self.breakers[settings["eqid"]]
else:
breaker_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
breaker_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = float(breaker_data["kvll"]) * 1000
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
breaker_data,
{},
p,
False,
False,
False,
False,
True,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
breaker_data,
{},
p,
False,
False,
False,
False,
True,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
breaker_data,
{},
p,
False,
False,
True,
False,
True,
False,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
continue
# Network Protectors
elif "network_protector" in settings["type"]:
new_line["is_network_protector"] = 1
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC"
) # If no info, then everything is closed by default...
# Get the network protector equipment data
if (
"eqid" in settings
and settings["eqid"] in self.network_protectors
):
network_protector_data = self.network_protectors[
settings["eqid"]
]
else:
network_protector_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
network_protector_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = (
float(network_protector_data["kvll"]) * 1000
)
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
network_protector_data,
{},
p,
False,
False,
False,
True,
False,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
network_protector_data,
{},
p,
False,
False,
False,
True,
False,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
network_protector_data,
{},
p,
False,
False,
True,
True,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Create the line object
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
continue
line_data = None
# If we have a linecableid for the current section
if "linecableid" in settings:
# And if we have line data with the matching ID
if settings["linecableid"] in self.balanced_lines:
# Cache the line data
line_data = self.balanced_lines[settings["linecableid"]]
if settings["linecableid"] in self.unbalanced_lines:
# Cache the line data
line_data = self.unbalanced_lines[settings["linecableid"]]
if settings["linecableid"] in self.concentric_neutral_cable:
# Cache the line data
line_data = self.concentric_neutral_cable[settings["linecableid"]]
line_data["type"] = "balanced_line"
if settings["linecableid"] in self.cables:
logger.debug("cables {}".format(sectionID))
line_data = self.cables[settings["linecableid"]]
line_data["type"] = "balanced_line"
# We might have a device number instead if we are dealing with BY PHASE settings
#
# TODO: Decide if I should remove this or not...
#
elif "devicenumber" in settings:
# if self.balanced_lines.has_key(settings['devicenumber']):
# #Cache the line data
# line_data=self.balanced_lines[settings['devicenumber']]
# elif self.unbalanced_lines.has_key(settings['devicenumber']):
# #Cache the line data
# line_data=self.unbalanced_lines[settings['devicenumber']]
if settings["devicenumber"] in self.concentric_neutral_cable:
line_data = self.concentric_neutral_cable[settings["devicenumber"]]
line_data["type"] = "balanced_line"
elif (
"condid_a" in settings
and "condid_b" in settings
and "condid_c" in settings
and "spacingid" in settings
):
if "condid_n" in settings or "condid_n1" in settings:
line_data = {"type": "unbalanced_spacing_conf"}
if line_data is None:
if not "phase" in settings.keys():
logger.warning("WARNING:: Skipping Line {} !".format(sectionID))
continue
else:
impedance_matrix = None
# We now face two different case:
#
# Case 1: The line is balanced
#
if line_data["type"] == "balanced_line":
# In this case, we build the impedance matrix from Z+ and Z0 in the following way:
# __________________________
# | Z0+2*Z+ Z0-Z+ Z0-Z+ |
# Z= 1/3 | Z0-Z+ Z0+2*Z+ Z0-Z+ |
# | Z0-Z+ Z0-Z+ Z0+2*Z+ |
# --------------------------
try:
coeff = 10 ** -3
# One phase line
if len(phases) == 1:
impedance_matrix = [
[
1.
/ 3.0
* coeff
* complex(
float(line_data["r0"]), float(line_data["x0"])
)
]
]
# Two phase line
elif len(phases) == 2:
a = (
1.
/ 3.0
* coeff
* complex(
2 * float(line_data["r1"]) + float(line_data["r0"]),
2 * float(line_data["x1"]) + float(line_data["x0"]),
)
)
b = (
1.
/ 3.0
* coeff
* complex(
float(line_data["r0"]) - float(line_data["r1"]),
float(line_data["x0"]) - float(line_data["x1"]),
)
)
impedance_matrix = [[a, b], [b, a]]
# Three phase line
else:
a = (
1.
/ 3.0
* coeff
* complex(
2 * float(line_data["r1"]) + float(line_data["r0"]),
2 * float(line_data["x1"]) + float(line_data["x0"]),
)
)
b = (
1.
/ 3.0
* coeff
* complex(
float(line_data["r0"]) - float(line_data["r1"]),
float(line_data["x0"]) - float(line_data["x1"]),
)
)
impedance_matrix = [[a, b, b], [b, a, b], [b, b, a]]
except:
pass
# In the balanced case, we should have two conductor IDs: One for the phases and one for the neutral
# Handle the Phase conductors first:
if (
"phasecondid" in line_data
and line_data["phasecondid"] in self.conductors
):
conductor_data = self.conductors[line_data["phasecondid"]]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
if conductor_data == {} and "linecableid" in line_data:
conductor_data = self.conductors[line_data["linecableid"]]
# Loop over the phases and create the wires
new_line["wires"] = []
for phase in phases:
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
phase,
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Handle the neutral conductor
if (
"neutralcondid" in line_data
and line_data["neutralcondid"] in self.conductors
):
conductor_data = self.conductors[line_data["neutralcondid"]]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Case 2: The line is unbalanced
#
elif line_data["type"] == "unbalanced_line":
coeff = 10 ** -3
# In this case, we should have all the information for the impedance matrix (mutual terms)
#
try:
# One phase line
if len(phases) == 1:
p = phases[0].lower()
impedance_matrix = [
[
coeff
* complex(
float(line_data["r{}".format(p)]),
coeff * float(line_data["x{}".format(p)]),
)
]
]
# Two phase line
elif len(phases) == 2:
p1 = phases[0].lower()
p2 = phases[1].lower()
p1, p2 = sorted([p1, p2])
if p1 == "a" and p2 == "c":
impedance_matrix = [
[
coeff
* complex(
float(line_data["ra"]),
float(line_data["xa"]),
),
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
],
[
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
coeff
* complex(
float(line_data["rc"]),
float(line_data["xc"]),
),
],
]
else:
impedance_matrix = [
[
coeff
* complex(
float(line_data["r{}".format(p1)]),
float(line_data["x{}".format(p1)]),
),
coeff
* complex(
float(
line_data[
"mutualresistance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
float(
line_data[
"mutualreactance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
),
],
[
coeff
* complex(
float(
line_data[
"mutualresistance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
float(
line_data[
"mutualreactance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
),
coeff
* complex(
float(line_data["r{}".format(p2)]),
float(line_data["x{}".format(p2)]),
),
],
]
# Three phase line
else:
impedance_matrix = [
[
coeff
* complex(
float(line_data["ra"]), float(line_data["xa"])
),
coeff
* complex(
float(line_data["mutualresistanceab"]),
float(line_data["mutualreactanceab"]),
),
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
],
[
coeff
* complex(
float(line_data["mutualresistanceab"]),
float(line_data["mutualreactanceab"]),
),
coeff
* complex(
float(line_data["rb"]), float(line_data["xb"])
),
coeff
* complex(
float(line_data["mutualresistancebc"]),
float(line_data["mutualreactancebc"]),
),
],
[
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
coeff
* complex(
float(line_data["mutualresistancebc"]),
float(line_data["mutualreactancebc"]),
),
coeff
* complex(
float(line_data["rc"]), float(line_data["xc"])
),
],
]
except:
pass
# In the unbalanced case, we should have conductor IDs for the phases and neutral
# Handle the Phase conductors first:
# Loop over the phases and create the wires
new_line["wires"] = []
for phase in phases:
if (
"condid_{}".format(phase.lower()) in line_data
and line_data["condid_{}".format(phase.lower())].lower()
!= "none"
and line_data["condid_{}".format(phase.lower())]
in self.conductors
):
conductor_data = self.conductors[
line_data["condid_{}".format(phase.lower())]
]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"].lower() != "none"
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
phase,
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Handle the neutral conductors
# We might have one or two neutral conductors
# If we have valid condid_n1 and condid_n2 ==> create 2 wires
# If we have only condid_n1 or condid_n alone ==> create 1 wire only
#
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"].lower() != "none"
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
if (
"condid_n1" in line_data
and line_data["condid_n1"].lower() != "none"
and line_data["condid_n1"] in self.conductors
and "condid_n2" in line_data
and line_data["condid_n2"].lower() != "none"
and line_data["condid_n2"] in self.conductors
):
conductor_n1_data = self.conductors[line_data["condid_n1"]]
conductor_n2_data = self.conductors[line_data["condid_n2"]]
api_wire_n1 = self.configure_wire(
model,
conductor_n1_data,
spacing_data,
"N1",
False,
False,
False,
False,
False,
False,
False,
)
api_wire_n2 = self.configure_wire(
model,
conductor_n2_data,
spacing_data,
"N2",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire_n1)
new_line["wires"].append(api_wire_n2)
elif (
"condid_n" in line_data
and line_data["condid_n"].lower() != "none"
and line_data["condid_n"] in self.conductors
):
conductor_data = self.conductors[line_data["condid_n"]]
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
else:
if (
"condid_n1" in line_data
and line_data["condid_n1"].lower() != "none"
and line_data["condid_n1"] in self.conductors
):
conductor_data = self.conductors[line_data["condid_n1"]]
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
elif line_data["type"] == "unbalanced_spacing_conf":
# IMPEDANCE MATRIX FROM SPACINGS
#
# First, we have to get the wires' positions:
if settings["spacingid"] in self.spacings:
# Get the spacing data
spacing_data = self.spacings[settings["spacingid"]]
pos = []
for i, p in enumerate(phases):
pos.append([None, None])
for j, k in enumerate(["x", "y"]):
if (
"posofcond{i}_{k}".format(i=i + 1, k=k)
in spacing_data
):
try:
pos[-1][j] = float(
spacing_data[
"posofcond{i}_{k}".format(i=i + 1, k=k)
]
)
except:
pass
pos.append([None, None])
if (
"posofneutralcond_x" in spacing_data
and "posofneutralcond_y" in spacing_data
):
try:
pos[-1][0] = float(spacing_data["posofneutralcond_x"])
pos[-1][1] = float(spacing_data["posofneutralcond_y"])
except:
pass
pos.append([None, None])
if (
"posofneutralcond_n2_x" in spacing_data
and spacing_data["posofneutralcond_n2_x"] != ""
and "posofneutralcond_n2_y" in spacing_data
and spacing_data["posofneutralcond_n2_y"] != ""
):
try:
pos[-1][0] = float(
spacing_data["posofneutralcond_n2_x"]
)
pos[-1][1] = float(
spacing_data["posofneutralcond_n2_y"]
)
except:
pass
valid_cond = []
ph_list = ["a", "b", "c", "n1", "n2"]
for idd, po in enumerate(pos):
if po != [None, None]:
valid_cond.append(idd)
distance_matrix = []
for i, ii in enumerate(valid_cond):
distance_matrix.append([])
for j, jj in enumerate(valid_cond):
distance_matrix[-1].append(
3.28084 * self.distance(pos[ii], pos[jj])
) # 0.0328084
distance_matrix = np.array(distance_matrix)
gmr_list = []
resistance_list = []
perform_kron_reduction = True
# Get GMR and resistance of valid conductor
for idx, p in enumerate(phases):
if (
"condid_{}".format(p.lower()) in settings
and settings["condid_{}".format(p.lower())]
in self.conductors
):
gmr_list.append(
0.0328084
* float(
self.conductors[
settings["condid_{}".format(p.lower())]
]["gmr"]
)
)
resistance_list.append(
1.0
/ 0.621371
* float(
self.conductors[
settings["condid_{}".format(p.lower())]
]["r25"]
)
)
else:
logger.warning(
"Could not find conductor {name}. Using DEFAULT...".format(
name="condid_{}".format(p.lower())
)
)
gmr_list.append(
0.0328084 * float(self.conductors["DEFAULT"]["gmr"])
)
resistance_list.append(
1.0
/ 0.621371
* float(self.conductors["DEFAULT"]["r25"])
)
# gmr_list.append(None)
# resistance_list.append(None)
if "condid_n" in settings:
if settings["condid_n"] in self.conductors:
gmr_list.append(
0.0328084
* float(
self.conductors[settings["condid_n"]]["gmr"]
)
)
resistance_list.append(
1.0
/ 0.621371
* float(
self.conductors[settings["condid_n"]]["r25"]
)
)
else:
logger.warning(
"Could not find neutral conductor {name}. Using DEFAULT...".format(
name=settings["condid_n"]
)
)
gmr_list.append(
0.0328084 * float(self.conductors["DEFAULT"]["gmr"])
)
resistance_list.append(
1.0
/ 0.621371
* float(self.conductors["DEFAULT"]["r25"])
)
elif (
"condid_n1" in settings
and settings["condid_n1"] is not None
and settings["condid_n1"].lower() != "none"
):
if settings["condid_n1"] in self.conductors:
gmr_list.append(
0.0328084
* float(
self.conductors[settings["condid_n1"]]["gmr"]
)
)
resistance_list.append(
1.0
/ 0.621371
* float(
self.conductors[settings["condid_n1"]]["r25"]
)
)
else:
logger.warning(
"Could not find neutral conductor {name}. Using DEFAULT...".format(
name=settings["condid_n1"]
)
)
gmr_list.append(
0.0328084 * float(self.conductors["DEFAULT"]["gmr"])
)
resistance_list.append(
1.0
/ 0.621371
* float(self.conductors["DEFAULT"]["r25"])
)
else:
gmr_list.append(None)
resistance_list.append(None)
perform_kron_reduction = False
gmr_list = np.array(gmr_list)
resistance_list = np.array(resistance_list)
idx_to_remove = np.argwhere(gmr_list == None).flatten()
idx_to_keep = [
idx
for idx in range(len(distance_matrix))
if idx not in idx_to_remove
]
try:
distance_matrix = distance_matrix[idx_to_keep, :][
:, idx_to_keep
]
except IndexError:
# It can happen that a one phase line is defined with a spacing table where no position are defined.
# This is uncommon but raises an IndexError here.
# To avoid that, use a dummy distance matrix
distance_matrix = np.array([[1]])
pass
primitive_imp_matrix = self.get_primitive_impedance_matrix(
distance_matrix, gmr_list, resistance_list
)
if perform_kron_reduction:
phase_imp_matrix = (
1.0
/ 1609.34
* self.kron_reduction(primitive_imp_matrix)
)
else:
phase_imp_matrix = 1.0 / 1609.34 * primitive_imp_matrix
impedance_matrix = phase_imp_matrix.tolist()
new_line["wires"] = []
for phase in phases:
if (
"condid_{}".format(phase.lower()) in settings
and settings["condid_{}".format(phase.lower())]
in self.conductors
):
conductor_data = self.conductors[
settings["condid_{}".format(phase.lower())]
]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in settings
and settings["spacingid"] in self.spacings
):
spacing_data = self.spacings[settings["spacingid"]]
else:
spacing_data = {}
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
phase,
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Handle the neutral conductors
if (
"condid_n" in settings
and settings["condid_n"] is not None
and settings["condid_n"] != ""
and settings["condid_n"] != "NONE"
and settings["condid_n"] in self.conductors
):
conductor_data = self.conductors[settings["condid_n"]]
elif (
"condid_n1" in settings
and settings["condid_n1"] is not None
and settings["condid_n1"] != ""
and settings["condid_n1"] != "NONE"
and settings["condid_n1"] in self.conductors
):
conductor_data = self.conductors[settings["condid_n1"]]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in settings
and settings["spacingid"] in self.spacings
):
spacing_data = self.spacings[settings["spacingid"]]
else:
spacing_data = {}
if len(conductor_data) != 0:
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
try:
new_line["impedance_matrix"] = impedance_matrix
except:
pass
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
# Append the line DiTTo object to the list of DiTTo lines
self._lines.append(api_line)
return 1
def parse_capacitors(self, model):
"""Parse the capacitors from CYME to DiTTo."""
# Instanciate the list in which we store the DiTTo capacitor objects
self._capacitors = []
mapp_serie_capacitor_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
}
mapp_shunt_capacitor_settings = {
"sectionid": 0,
"shuntcapacitorid": 39,
"connection": 6,
"fixedkvara": 7,
"fixedkvarb": 8,
"fixedkvarc": 9,
"switchedkvara": 13,
"switchedkvarb": 14,
"switchedkvarc": 15,
"kv": 24,
"controllingphase": 35,
}
mapp_serie_capacitor = {"id": 0, "reactance": 6}
mapp_shunt_capacitor = {"id": 0, "kvar": 1, "kv": 2, "type": 6}
self.settings = {}
self.capacitors = {}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# SERIE CAPACITOR #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["serie_capacitor_settings"],
["sectionid", "eqid", "coordx", "coordy"],
mapp_serie_capacitor_settings,
{"type": "serie"},
)
)
#########################################
# #
# SHUNT CAPACITOR #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["shunt_capacitor_settings"],
[
"sectionid",
"shuntcapacitorid",
"connection",
"fixedkvara",
"fixedkvarb",
"fixedkvarc",
"switchedkvara",
"switchedkvarb",
"switchedkvarc",
"kv",
"controllingphase",
],
mapp_shunt_capacitor_settings,
{"type": "shunt"},
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# SERIE CAPACITOR #
# #
#########################################
#
self.capacitors.update(
self.parser_helper(
line, ["serie_capacitor"], ["id", "reactance"], mapp_serie_capacitor
)
)
#########################################
# #
# SHUNT CAPACITOR #
# #
#########################################
#
self.capacitors.update(
self.parser_helper(
line,
["shunt_capacitor"],
["id", "kvar", "kv", "type"],
mapp_shunt_capacitor,
)
)
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
# Instanciate Capacitor DiTTo objects
try:
api_capacitor = Capacitor(model)
except:
raise ValueError(
"Unable to instanciate capacitor {id}".format(id=scap["sectionid"])
)
# Set the name
try:
api_capacitor.name = "Cap_" + sectionID
except:
pass
# Set the connecting element (info is in the section)
try:
api_capacitor.connecting_element = self.section_phase_mapping[
sectionID
]["fromnodeid"]
except:
pass
# PT phase
# (Only works with shunt capacitors)
try:
api_capacitor.pt_phase = self.phase_mapping(
settings["controllingphase"]
)
except:
pass
api_capacitor.feeder_name = self.section_feeder_mapping[sectionID]
# Connection_type
# (Only works with shunt capacitors)
try:
api_capacitor.connection_type = self.capacitors_connection_mapping(
settings["connection"]
)
except:
pass
# Position
try:
position = Position(model)
position.long = float(settings["coordx"])
position.lat = float(settings["coordy"])
position.elevation = 0
api_capacitor.position.append(position)
except:
pass
# Get the device number
if "eqid" in settings:
dev_num = settings["eqid"]
elif "shuntcapacitorid" in settings:
dev_num = settings["shuntcapacitorid"]
else:
dev_num = None
capacitor_data = None
if dev_num is not None:
if dev_num in self.capacitors:
capacitor_data = self.capacitors[dev_num]
# Reactance
try:
api_capacitor.reactance = float(capacitor_data["reactance"])
except:
pass
# KV
try:
api_capacitor.nominal_voltage = (
float(capacitor_data["kv"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Map the phases to DiTTo phase format
phases = self.section_phase_mapping[sectionID]["phase"]
# Rated KV
#
# Note: Rated KV is line-to-neutral for Wye-grounded configuration,
# and line-to-line for delta configuration
#
# If the capacitor is one phase, we have a line-to-neutral,
# and line-to-line if it is 3 phase
#
if "kv" in settings:
try:
if api_capacitor.connection_type == "Y" or len(phases) == 1:
api_capacitor.nominal_voltage = (
float(settings["kv"]) * 10 ** 3
) # DiTTo in var
if api_capacitor.connection_type == "D" or len(phases) == 3:
api_capacitor.nominal_voltage = (
float(settings["kv"]) * 10 ** 3 * math.sqrt(3)
) # DiTTo in var
except:
pass
if (
api_capacitor.pt_phase is not None
and api_capacitor.pt_phase not in phases
):
raise ValueError(
"Capacitor {name} is monitoring phase {p} which is not in the section {id} phase list {lis}.".format(
name=api_capacitor.name,
p=api_capacitor.pt_phase,
id=scap["sectionid"],
lis=phases,
)
)
# For each phase...
for p in phases:
# Instanciate a PhaseCapacitor DiTTo object
try:
api_phaseCapacitor = PhaseCapacitor(model)
except:
raise ValueError(
"Unable to instanciate PhaseCapacitor DiTTo object."
)
# Set the phase
try:
api_phaseCapacitor.phase = p
except:
pass
# Set var value
if (
"fixedkvara" in settings
and "fixedkvarb" in settings
and "fixedkvarc" in settings
and max(
float(settings["fixedkvara"]),
max(
float(settings["fixedkvarb"]), float(settings["fixedkvarc"])
),
)
> 0
):
try:
if p == "A":
api_phaseCapacitor.var = (
float(settings["fixedkvara"]) * 10 ** 3
) # Ditto in var
if p == "B":
api_phaseCapacitor.var = (
float(settings["fixedkvarb"]) * 10 ** 3
) # Ditto in var
if p == "C":
api_phaseCapacitor.var = (
float(settings["fixedkvarc"]) * 10 ** 3
) # Ditto in var
except:
pass
elif (
"switchedkvara" in settings
and "switchedkvarb" in settings
and "switchedkvarc" in settings
and max(
float(settings["switchedkvara"]),
max(
float(settings["switchedkvarb"]),
float(settings["switchedkvarc"]),
),
)
> 0
):
try:
if p == "A":
api_phaseCapacitor.var = (
float(settings["switchedkvara"]) * 10 ** 3
) # Ditto in var
if p == "B":
api_phaseCapacitor.var = (
float(settings["switchedkvarb"]) * 10 ** 3
) # Ditto in var
if p == "C":
api_phaseCapacitor.var = (
float(settings["switchedkvarc"]) * 10 ** 3
) # Ditto in var
except:
pass
elif capacitor_data is not None:
try:
api_phaseCapacitor.var = (
float(capacitor_data["kvar"]) * 10 ** 3
) # DiTTo in var
except:
pass
# Append the phase capacitor object to the capacitor
api_capacitor.phase_capacitors.append(api_phaseCapacitor)
self._capacitors.append(api_capacitor)
return 1
def parse_transformers(self, model):
"""Parse the transformers from CYME to DiTTo. Since substation transformer can have LTCs attached, when parsing a transformer, we may also create a regulator. LTCs are represented as regulators."""
# Instanciate the list in which we store the DiTTo transformer objects
self._transformers = []
mapp_auto_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"connection_configuration": 9,
"tap": 25,
}
mapp_auto_transformer = {
"id": 0,
"kva": 3,
"connection_configuration": 18,
"noloadlosses": 32,
"isltc": 21,
"taps": 22,
"lowerbandwidth": 23,
"upperbandwidth": 24,
}
mapp_grounding_transformer_settings = {
"sectionid": 0,
"equipmentid": 6,
"connectionconfiguration": 10,
"phase": 13,
}
mapp_grounding_transformer = {
"id": 0,
"connectionconfiguration": 7,
"ratedvoltage": 5,
"ratedcapacity": 6,
}
mapp_three_winding_auto_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"primaryfixedtapsetting": 10,
"secondaryfixedtapsetting": 11,
"tertiaryfixedtapsetting": 12,
"primarybasevoltage": 13,
"secondarybasevoltage": 14,
"tertiarybasevoltage": 15,
}
mapp_three_winding_auto_transformer = {
"id": 0,
"primaryratedcapacity": 1,
"primaryvoltage": 6,
"secondaryratedcapacity": 22,
"secondaryvoltage": 27,
"tertiaryratedcapacity": 30,
"tertiaryvoltage": 35,
"noloadlosses": 50,
}
mapp_three_winding_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"primaryfixedtapsetting": 10,
"secondaryfixedtapsetting": 11,
"tertiaryfixedtapsetting": 12,
"primarybasevoltage": 13,
"secondarybasevoltage": 14,
"tertiarybasevoltage": 15,
}
mapp_three_winding_transformer = {
"id": 0,
"primaryratedcapacity": 1,
"primaryvoltage": 6,
"secondaryratedcapacity": 24,
"secondaryvoltage": 29,
"tertiaryratedcapacity": 33,
"tertiaryvoltage": 38,
"noloadlosses": 53,
}
mapp_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"conn": 9,
"primtap": 10,
"secondarytap": 11,
"primarybasevoltage": 17,
"secondarybasevoltage": 18,
"setpoint": 21,
"maxbuck": 29,
"maxboost": 30,
"ct": 31,
"pt": 32,
"phaseon": 37,
}
mapp_transformer = {
"id": 0,
"type": 1,
"kva": 3,
"kvllprim": 5,
"kvllsec": 6,
"z1": 7,
"z0": 8,
"xr": 12,
"xr0": 13,
"conn": 18,
"noloadlosses": 34,
"isltc": 23,
"taps": 24,
"lowerbandwidth": 25,
"upperbandwidth": 26,
"phaseshift": 41,
}
mapp_phase_shifter_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 10,
"coordy": 11,
}
self.auto_transformers = {}
self.grounding_transformers = {}
self.three_winding_auto_transformers = {}
self.three_winding_transformers = {}
self.settings = {}
self.transformers = {}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# AUTO TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["auto_transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"connection_configuration",
"tap",
],
mapp_auto_transformer_settings,
{"type": "auto_transformer"},
)
)
#########################################
# #
# GROUNDING TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["grounding_transformer_settings"],
["sectionid", "equipmentid", "connectionconfiguration", "phase"],
mapp_grounding_transformer_settings,
{"type": "grounding_transformer"},
)
)
#########################################
# #
# THREE WINDING AUTO TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["three_winding_auto_transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"primaryfixedtapsetting",
"secondaryfixedtapsetting",
"tertiaryfixedtapsetting",
"primarybasevoltage",
"secondarybasevoltage",
"tertiarybasevoltage",
],
mapp_three_winding_auto_transformer_settings,
{"type": "three_winding_auto_transformer"},
)
)
#########################################
# #
# THREE WINDING TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["three_winding_transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"primaryfixedtapsetting",
"secondaryfixedtapsetting",
"tertiaryfixedtapsetting",
"primarybasevoltage",
"secondarybasevoltage",
"tertiarybasevoltage",
],
mapp_three_winding_transformer_settings,
{"type": "three_winding_transformer"},
)
)
#########################################
# #
# TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"primaryfixedtapsetting",
"secondaryfixedtapsetting",
"tertiaryfixedtapsetting",
"primarybasevoltage",
"secondarybasevoltage",
"tertiarybasevoltage",
"setpoint",
"maxbuck",
"maxboost",
"ct",
"pt",
],
mapp_transformer_settings,
{"type": "transformer"},
)
)
#########################################
# #
# PHASE SHIFTER TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["phase_shifter_transformer_settings"],
["sectionid", "eqid", "coordx", "coordy"],
mapp_phase_shifter_transformer_settings,
{"type": "phase_shifter_transformer"},
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# AUTO TRANSFORMER #
# #
#########################################
#
self.auto_transformers.update(
self.parser_helper(
line,
["auto_transformer"],
[
"id",
"kva",
"connection_configuration",
"noloadlosses",
"isltc",
"taps",
"lowerbandwidth",
"upperbandwidth",
],
mapp_auto_transformer,
)
)
#########################################
# #
# GROUNDING TRANSFORMER #
# #
#########################################
#
self.grounding_transformers.update(
self.parser_helper(
line,
["grounding_transformer"],
["id", "ratedcapacity", "ratedvoltage", "connection_configuration"],
mapp_grounding_transformer,
)
)
#########################################
# #
# THREE WINDING AUTO TRANSFORMER #
# #
#########################################
#
# LTC controls not yet supported for three-winding transformers
self.three_winding_auto_transformers.update(
self.parser_helper(
line,
["three_winding_auto_transformer"],
[
"id",
"primaryratedcapacity",
"primaryvoltage",
"secondaryratedcapacity",
"secondaryvoltage",
"tertiaryratedcapacity",
"tertiaryvoltage",
"noloadlosses",
],
mapp_three_winding_auto_transformer,
)
)
#########################################
# #
# THREE WINDING TRANSFORMER #
# #
#########################################
#
# LTC controls not yet supported for three-winding transformers
self.three_winding_transformers.update(
self.parser_helper(
line,
["three_winding_transformer"],
[
"id",
"primaryratedcapacity",
"primaryvoltage",
"secondaryratedcapacity",
"secondaryvoltage",
"tertiaryratedcapacity",
"tertiaryvoltage",
"noloadlosses",
],
mapp_three_winding_transformer,
)
)
#########################################
# #
# TRANSFORMER #
# #
#########################################
#
self.transformers.update(
self.parser_helper(
line,
["transformer"],
[
"id",
"type",
"kva",
"kvllprim",
"kvllsec",
"z1",
"z0",
"xr",
"xr0",
"conn",
"noloadlosses",
"phaseshift",
"isltc",
"taps",
"lowerbandwidth",
"upperbandwidth",
],
mapp_transformer,
)
)
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
# Instanciate a PowerTransformer DiTTo object
try:
api_transformer = PowerTransformer(model)
except:
raise ValueError("Unable to instanciate PowerTransformer DiTTo object.")
# Set the name
try:
api_transformer.name = "Trans_" + settings["sectionid"]
except:
pass
api_transformer.feeder_name = self.section_feeder_mapping[sectionID]
try:
phases = self.section_phase_mapping[sectionID]["phase"]
except:
raise ValueError("Empty phases for transformer {}.".format(sectionID))
# Set from_element
try:
api_transformer.from_element = self.section_phase_mapping[sectionID][
"fromnodeid"
]
except:
pass
# Set to_element
try:
api_transformer.to_element = self.section_phase_mapping[sectionID][
"tonodeid"
]
except:
pass
# Set the position
try:
position = Position(model)
position.long = float(settings["coordx"])
position.lat = float(settings["coordy"])
position.elevation = 0
api_transformer.positions.append(position)
except:
pass
# Handle the three winding transformers
if settings["type"] in [
"three_winding_transformer",
"three_winding_auto_transformer",
]:
# Here we know that we have three windings...
for w in range(3):
# Instanciate a DiTTo Winding object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the base voltage
# We assume that 1st winding is primary, 2nd secondary, and third tertiary
try:
if w == 0:
api_winding.nominal_voltage = (
float(settings["primarybasevoltage"]) * 10 ** 3
) # DiTTo in volt
if w == 1:
api_winding.nominal_voltage = (
float(settings["secondarybasevoltage"]) * 10 ** 3
) # DiTTo in volt
if w == 2:
api_winding.nominal_voltage = (
float(settings["tertiarybasevoltage"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Set the rated power
try:
if w == 0:
api_winding.rated_power = (
float(settings["primaryratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 1:
api_winding.rated_power = (
float(settings["secondaryratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 2:
api_winding.rated_power = (
float(settings["tertiaryratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Create the phase windings
for p in phases:
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError(
"Unable to instanciate PhaseWinding DiTTo object."
)
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Set the tap position
try:
if w == 0:
api_phase_winding.tap_position = int(
settings["primaryfixedtapsetting"]
)
if w == 1:
api_phase_winding.tap_position = int(
settings["secondaryfixedtapsetting"]
)
if w == 2:
api_phase_winding.tap_position = int(
settings["tertiaryfixedtapsetting"]
)
except:
pass
# Add the phase winding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# Add the winding object to the transformer
api_transformer.windings.append(api_winding)
# Handle two windings transformers
if settings["type"] == "transformer":
if settings["eqid"] in self.transformers:
transformer_data = self.transformers[settings["eqid"]]
else:
transformer_data = self.transformers["DEFAULT"]
# Resistance
#
# Note: Imported from Julietta's code
#
Z1 = float(transformer_data["z1"])
Z0 = float(transformer_data["z0"])
XR = float(transformer_data["xr"])
XR0 = float(transformer_data["xr0"])
R1 = Z1 / math.sqrt(1 + XR * XR)
R0 = Z0 / math.sqrt(1 + XR * XR)
X1 = Z1 / math.sqrt(1 + XR0 * XR0)
X0 = Z0 / math.sqrt(1 + XR0 * XR0)
complex1 = complex(R0, X0)
complex2 = complex(R1, X1)
matrix = np.matrix(
[[complex1, 0, 0], [0, complex2, 0], [0, 0, complex2]]
)
a = 1 * cmath.exp(2 * math.pi * 1j / 3)
T = np.matrix([[1., 1., 1.], [1., a * a, a], [1., a, a * a]])
T_inv = T.I
Zabc = T * matrix * T_inv
Z_perc = Zabc.item((0, 0))
R_perc = Z_perc.real / 2.0
xhl = Z_perc.imag
# Check if it's an LTC
#
if "isltc" in transformer_data and transformer_data["isltc"]:
# Instanciate a Regulator DiTTo object
try:
api_regulator = Regulator(model)
except:
raise ValueError(
"Unable to instanciate Regulator DiTTo object."
)
try:
api_regulator.name = "Reg_" + settings["sectionid"]
except:
pass
api_regulator.feeder_name = self.section_feeder_mapping[sectionID]
try:
api_regulator.connected_transformer = api_transformer.name
except:
raise ValueError("Unable to connect LTC to transformer")
taps = float(transformer_data["taps"])
lowerbandwidth = float(transformer_data["lowerbandwidth"])
upperbandwidth = float(transformer_data["upperbandwidth"])
minreg_range = int(float(settings["maxbuck"]))
maxreg_range = int(float(settings["maxboost"]))
setpoint = float(settings["setpoint"])
ct = int(float(settings["ct"]))
pt = int(float(settings["pt"]))
center_bandwidth = upperbandwidth - lowerbandwidth
api_regulator.ltc = 1
api_regulator.highstep = minreg_range
api_regulator.lowstep = maxreg_range
api_regulator.pt_ratio = pt
api_regulator.ct_ratio = ct
api_regulator.setpoint = setpoint
api_regulator.center_bandwidth = center_bandwidth
api_regulator.bandwidth = (
upperbandwidth + lowerbandwidth
) # ie. use the average bandwidth. The upper and lower are typically the same
# TODO: Add unit checking. These units are in percentages. Need to be updated to be in Volts for consistency (BUG in cyme writer too)
# TODO: Decide whether or not to put parameters in for the regulator range, and what units they should be.
try:
api_transformer.reactances = [float(xhl)]
except:
pass
# Here we know that we have two windings...
for w in range(2):
# Instanciate a Winding DiTTo object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the rated power
try:
if w == 0:
api_winding.rated_power = (
float(transformer_data["kva"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 1:
api_winding.rated_power = (
float(transformer_data["kva"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Set the nominal voltage
try:
if w == 0:
api_winding.nominal_voltage = (
float(transformer_data["kvllprim"]) * 10 ** 3
) # DiTTo in volt
if w == 1:
api_winding.nominal_voltage = (
float(transformer_data["kvllsec"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Connection configuration
try:
api_winding.connection_type = self.transformer_connection_configuration_mapping(
transformer_data["conn"], w
)
except:
pass
# Resistance
try:
api_winding.resistance = R_perc
except:
pass
# For each phase...
for p in phases:
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError(
"Unable to instanciate PhaseWinding DiTTo object."
)
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Add the phase winding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# Add the winding object to the transformer
api_transformer.windings.append(api_winding)
# Handle Grounding transformers
if settings["type"] == "grounding_transformer":
if settings["equipmentid"] in self.grounding_transformers:
transformer_data = self.grounding_transformers[
settings["equipmentid"]
]
else:
transformer_data = {}
# Here we know that we have two windings...
for w in range(2):
# Instanciate a Winding DiTTo object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the rated power
try:
if w == 0:
api_winding.rated_power = (
float(transformer_data["ratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 1:
api_winding.rated_power = (
float(transformer_data["ratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Set the nominal voltage
try:
if w == 0:
api_winding.nominal_voltage = (
float(transformer_data["ratedvoltage"]) * 10 ** 3
) # DiTTo in volt
if w == 1:
api_winding.nominal_voltage = (
float(transformer_data["ratedvoltage"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Set the connection configuration
try:
api_winding.connection_type = self.connection_configuration_mapping(
transformer_data["conn"]
)
except:
pass
# For each phase...
for p in phases:
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError(
"Unable to instanciate PhaseWinding DiTTo object."
)
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Add the phase winding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# Add the winding object to the transformer
api_transformer.windings.append(api_winding)
# Add the transformer object to the list of transformers
self._transformers.append(api_transformer)
return 1
def parse_regulators(self, model):
"""Parse the regulators from CYME to DiTTo.
.. note::
In CYME a regulator does not have to be associated with a transformer (as it is the case for OpenDSS for example).
In addition, a regulator can monitor multiple phases.
The parser should create the transformers and create separate regulator objects for different phases.
"""
# Instanciate the list in which we store the DiTTo regulator objects
self._regulators = []
mapp_regulators = {
"id": 0,
"type": 1,
"kva": 2,
"kva_1": 3,
"kva_2": 4,
"kva_3": 5,
"kva_4": 6,
"kvln": 7,
"forwardbandwidth": 11,
"bandwidth": 11, # For old CYME version 'forwardbandwidth' is just 'bandwidth'
"ct": 13,
"pt": 14,
}
mapp_regulator_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"phaseon": 9,
"ct": 12,
"pt": 13,
"vseta": 16,
"vsetb": 17,
"vsetc": 18,
"bandwidtha": 25,
"bandwidthb": 26,
"bandwidthc": 27,
"tapa": 28,
"tapb": 29,
"tapc": 30,
"conn": 31,
}
self.settings = {}
self.regulators = {}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
self.settings.update(
self.parser_helper(
line,
["regulator_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"phaseon",
"ct",
"pt",
"vseta",
"vsetb",
"vsetc",
"bandwidtha",
"bandwidthb",
"bandwidthc",
"tapa",
"tapb",
"tapc",
"conn",
],
mapp_regulator_settings,
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("equipment")
# Loop over the network file
for line in self.content:
self.regulators.update(
self.parser_helper(
line,
["regulator"],
[
"id",
"type",
"kva",
"kva_1",
"kva_2",
"kva_3",
"kva_4",
"kvln",
"forwardbandwidth",
"bandwidth",
"ct",
"pt",
],
mapp_regulators,
)
)
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
try:
phases = self.section_phase_mapping[sectionID]["phase"]
except:
raise ValueError("No phase for section {}".format(sectionID))
try:
phases_on = self.phase_mapping(settings["phaseon"])
except:
raise ValueError(
"Unable to get phases for regulator {}".format(sectionID)
)
if "eqid" in settings and settings["eqid"] in self.regulators:
regulator_data = self.regulators[settings["eqid"]]
else:
regulator_data = {}
for p in phases_on:
if p not in phases:
logger.warning(
"Regulator {id} monitors phase {p} which is not in the section phases {pp}".format(
id=sectionID, p=p, pp=phases
)
)
# Instanciate a Regulator DiTTo object
try:
api_regulator = Regulator(model)
except:
raise ValueError("Unable to instanciate Regulator DiTTo object.")
try:
api_regulator.name = "Reg_" + sectionID + "_" + p
except:
pass
api_regulator.feeder_name = self.section_feeder_mapping[sectionID]
try:
api_regulator.from_element = self.section_phase_mapping[sectionID][
"fromnodeid"
]
except:
pass
try:
api_regulator.to_element = self.section_phase_mapping[sectionID][
"tonodeid"
]
except:
pass
try:
api_regulator.pt_phase = p
except:
pass
try:
position = Position(model)
position.long = float(reg_set["coordx"])
position.lat = float(reg_set["coordy"])
position.elevation = 0
api_regulator.positions.append(position)
except:
pass
try:
api_regulator.pt_ratio = float(settings["pt"])
except:
pass
try:
api_regulator.ct_prim = float(settings["ct"])
except:
pass
try:
if p == "A":
api_regulator.bandcenter = float(settings["vseta"])
if p == "B":
api_regulator.bandcenter = float(settings["vsetb"])
if p == "C":
api_regulator.bandcenter = float(settings["vsetc"])
except:
pass
try:
if (
p == "A"
and "bandwidtha" in settings
and settings["bandwidtha"] is not None
):
api_regulator.bandwidth = float(settings["bandwidtha"])
elif "forwardbandwidth" in regulator_data:
api_regulator.bandwidth = float(
regulator_data["forwardbandwidth"]
)
else:
api_regulator.bandwidth = float(
regulator_data["bandwidth"]
) # For old CYME versions
if (
p == "B"
and "bandwidthb" in settings
and settings["bandwidthb"] is not None
):
api_regulator.bandwidth = float(settings["bandwidthb"])
elif "forwardbandwidth" in regulator_data:
api_regulator.bandwidth = float(
regulator_data["forwardbandwidth"]
)
else:
api_regulator.bandwidth = float(
regulator_data["bandwidth"]
) # For old CYME versions
if (
p == "C"
and "bandwidthc" in settings
and settings["bandwidthc"] is not None
):
api_regulator.bandwidth = float(settings["bandwidthc"])
elif "forwardbandwidth" in regulator_data:
api_regulator.bandwidth = float(
regulator_data["forwardbandwidth"]
)
else:
api_regulator.bandwidth = float(
regulator_data["bandwidth"]
) # For old CYME versions
except:
pass
for w in range(2):
# Instanciate a Winding DiTTo object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the rated power
try:
api_winding.rated_power = (
float(regulator_data["kva"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Set the connection type
try:
api_winding.connection_type = self.connection_configuration_mapping(
settings["conn"]
)
except:
pass
# Set the nominal voltage
try:
api_winding.nominal_voltage = float(regulator_data["kvln"])
except:
pass
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError("Unable to instanciate PhaseWinding object.")
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Append the phaseWinding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# api_transformer.windings.append(api_winding)
# Add the winding object to the regulator
api_regulator.windings.append(api_winding)
self._regulators.append(api_regulator)
return 1
def parse_loads(self, model):
"""Parse the loads from CYME to DiTTo."""
# Instanciate the list in which we store the DiTTo load objects
self._loads = {}
mapp_loads = {"sectionid": 0, "devicenumber": 1, "loadtype": 4, "connection": 5}
mapp_customer_loads = {
"sectionid": 0,
"devicenumber": 1,
"loadtype": 2,
"customernumber": 3,
"customertype": 4,
"loadmodelid": 8,
"valuetype": 11,
"loadphase": 12,
"value1": 13,
"value2": 14,
"connectedkva": 15,
"numberofcustomer": 17,
}
mapp_customer_class = {
"id": 0,
"constantpower": 4,
"constantcurrent": 5,
"constantimpedance": 6,
"powerfactor": 8,
"constantimpedancezp": 17,
"constantimpedancezq": 18,
"constantcurrentip": 19,
"constantcurrentiq": 20,
"constantpowerpp": 21,
"constantpowerpq": 22,
}
self.loads = {}
self.customer_loads = {}
self.customer_class = {}
#####################################################
# #
# LOAD FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("load")
# Loop over the load file
for line in self.content:
#########################################
# #
# LOADS #
# #
#########################################
#
self.loads.update(
self.parser_helper(
line,
["loads"],
["sectionid", "devicenumber", "loadtype", "connection"],
mapp_loads,
)
)
#########################################
# #
# CUSTOMER LOADS #
# #
#########################################
#
self.customer_loads.update(
self.parser_helper(
line,
["customer_loads"],
[
"sectionid",
"devicenumber",
"loadtype",
"customernumber",
"customertype",
"loadmodelid",
"valuetype",
"loadphase",
"value1",
"value2",
"connectedkva",
"numberofcustomer",
],
mapp_customer_loads,
)
)
#########################################
# #
# CUSTOMER CLASS #
# #
#########################################
#
self.customer_class.update(
self.parser_helper(
line,
["customer_class"],
[
"id",
"constantpower",
"constantcurrent",
"constantimpedance",
"powerfactor",
"constantimpedancezp",
"constantimpedancezq",
"constantcurrentip",
"constantcurrentiq",
"constantpowerpp",
"constantpowerpq",
],
mapp_customer_class,
)
)
duplicate_loads = set()
for sectionID in self.customer_loads.keys():
if sectionID.endswith("*"):
duplicate_loads.add(sectionID.lower().strip("*"))
for sectionID, settings in self.customer_loads.items():
sectionID = sectionID.strip("*").lower()
if sectionID in self.loads:
load_data = self.loads[sectionID]
else:
load_data = {}
if "connectedkva" in settings:
connectedkva = float(settings["connectedkva"])
else:
connectedkva = None
if "valuetype" in settings:
value_type = int(settings["valuetype"])
if "value1" in settings and "value2" in settings:
if (
float(settings["value1"]) == 0.0
and float(settings["value2"]) == 0.0
):
p = 0
q = 0
elif value_type == 0: # P and Q are given
try:
p, q = float(settings["value1"]), float(settings["value2"])
except:
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
elif value_type == 1: # KVA and PF are given
try:
kva, PF = (
float(settings["value1"]),
float(settings["value2"]) * 0.01,
)
if kva == 0 and "connectedkva" in settings:
kva = float(settings["connectedkva"])
p = kva * PF
q = math.sqrt(kva ** 2 - p ** 2)
except:
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
elif value_type == 2: # P and PF are given
try:
p, PF = float(settings["value1"]), float(settings["value2"])
if 0 <= PF <= 1:
q = p * math.sqrt((1 - PF ** 2) / PF ** 2)
elif 1 < PF <= 100:
PF /= 100.0
q = p * math.sqrt((1 - PF ** 2) / PF ** 2)
else:
logger.warning("problem with PF")
logger.warning(PF)
except:
logger.warning("Skipping load on section {}".format(sectionID))
continue
elif value_type == 3: # AMP and PF are given
# TODO
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
if p >= 0 or q >= 0:
if "loadphase" in settings:
phases = settings["loadphase"]
else:
phases = []
if sectionID in duplicate_loads:
fusion = True
if sectionID in self._loads:
api_load = self._loads[sectionID]
elif p != 0:
api_load = Load(model)
else:
fusion = False
api_load = Load(model)
if fusion and p == 0:
# logger.warning(
# "WARNING:: Skipping duplicate load on section {} with p=0".format(sectionID)
# )
continue
try:
if fusion and sectionID in self._loads:
api_load.name += "_" + reduce(
lambda x, y: x + "_" + y, phases
)
else:
api_load.name = (
"Load_"
+ sectionID
+ "_"
+ reduce(lambda x, y: x + "_" + y, phases)
)
except:
pass
try:
if not (fusion and sectionID in self._loads):
if connectedkva is not None:
api_load.transformer_connected_kva = (
connectedkva * 10 ** 3
) # DiTTo in var
elif connectedkva is not None:
if api_load.transformer_connected_kva is None:
api_load.transformer_connected_kva = (
connectedkva * 10 ** 3
) # DiTTo in var
else:
api_load.transformer_connected_kva += (
connectedkva * 10 ** 3
) # DiTTo in var
except:
pass
try:
if not (fusion and sectionID in self._loads):
api_load.connection_type = self.connection_configuration_mapping(
load_data["connection"]
)
except:
pass
if not (fusion and sectionID in self._loads):
if (
"loadtype" in settings
and settings["loadtype"] in self.customer_class
):
load_type_data = self.customer_class[settings["loadtype"]]
else:
load_type_data = {}
try:
if not (fusion and sectionID in self._loads):
api_load.connecting_element = self.section_phase_mapping[
sectionID
]["fromnodeid"]
except:
pass
api_load.feeder_name = self.section_feeder_mapping[sectionID]
api_load.num_users = float(settings["numberofcustomer"])
for ph in phases:
try:
api_phase_load = PhaseLoad(model)
except:
raise ValueError(
"Unable to instanciate PhaseLoad DiTTo object."
)
try:
api_phase_load.phase = ph
except:
pass
try:
api_phase_load.p, api_phase_load.q = (
10 ** 3 * p,
10 ** 3 * q,
)
except:
pass
# ZIP load parameters
try:
api_phase_load.ppercentcurrent = (
float(load_type_data["constantcurrentip"]) / 100.0
)
api_phase_load.qpercentcurrent = (
float(load_type_data["constantcurrentiq"]) / 100.0
)
api_phase_load.ppercentpower = (
float(load_type_data["constantpowerpp"]) / 100.0
)
api_phase_load.qpercentpower = (
float(load_type_data["constantpowerpq"]) / 100.0
)
api_phase_load.ppercentimpedance = (
float(load_type_data["constantimpedancezp"]) / 100.0
)
api_phase_load.qpercentimpedance = (
float(load_type_data["constantimpedancezq"]) / 100.0
)
# api_phase_load.use_zip=1
# api_phase_load.model=8
except:
pass
# CYME store phase loads with P=0 and Q=0.
# Do not add them to DiTTo (otherwise it will make the validation
# on the number of objects fail since we will have many more loads than there actually are...)
# if api_phase_load.p!=0 or api_phase_load.q!=0:
api_load.phase_loads.append(api_phase_load)
self._loads[sectionID] = api_load
return 1
def parse_dg(self, model):
""" Parse the Distributed Generation from CYME to DiTTo. May be respresented as ECGs or PVs.
This reads the objets [CONVERTER], [CONVERTER CONTROL SETTING], [LONG TERM DYNAMICS CURVE EXT] [DGGENERATIONMODEL] and in the case when PV is included [PHOTOVOLTAIC SETTINGS]"""
self._dgs = []
self.converter = {}
self.converter_settings = {}
self.long_term_dynamics = {}
self.photovoltaic_settings = {}
self.bess = {}
self.bess_settings = {}
self.dg_generation = {}
mapp_converter = {
"devicenumber": 0,
"devicetype": 1,
"converterrating": 2,
"activepowerrating": 3,
"reactivepowerrating": 4,
"minimumpowerfactor": 5,
"powerfalllimit": 23,
"powerriselimit": 24,
"risefallunit": 25,
}
mapp_converter_settings = {
"devicenumber": 0,
"devicetype": 1,
"controlindex": 2,
"timetriggerindex": 3,
"controltype": 4,
"fixedvarinjection": 5,
"injectionreference": 6,
"convertercontrolid": 7,
"powerreference": 8,
"powerfactor": 9,
}
mapp_photovoltaic_settings = {
"sectionid": 0,
"location": 1,
"devicenumber": 2,
"equipmentid": 6,
"eqphase": 7,
"ambienttemperature": 11,
}
mapp_bess = {
"id": 0,
"ratedstorageenergy": 1,
"maxchargingpower": 2,
"maxdischargingpower": 3,
"chargeefficiency": 4,
"dischargeefficiency": 5,
}
mapp_bess_settings = {
"sectionid": 0,
"devicenumber": 2,
"equipmentid": 6,
"phase": 7,
"maximumsoc": 10,
"minimumsoc": 11,
"initialsoc": 16,
}
mapp_long_term_dynamics = {
"devicenumber": 0,
"devicetype": 1,
"adjustmentsettings": 2,
"powercurvemodel": 3,
}
mapp_dg_generation_model = {
"devicenumber": 0,
"devicetype": 1,
"loadmodelname": 2,
"activegeneration": 3,
"powerfactor": 4,
}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# CONVERTER #
# #
#########################################
self.converter.update(
self.parser_helper(
line,
["converter"],
[
"devicenumber",
"devicetype",
"converterrating",
"activepowerrating",
"reactivepowerrating",
"minimumpowerfactor",
"powerfalllimit",
"powerriselimit",
"risefallunit",
],
mapp_converter,
{"type": "converter"},
)
)
#########################################
# #
# CONVERTER CONTROL SETTINGS #
# #
#########################################
self.converter_settings.update(
self.parser_helper(
line,
["converter_control_settings"],
[
"devicenumber",
"devicetype",
"controltype",
"fixedvarinjection",
"injectionreference",
"convertercontrolid",
"powerreference",
"powerfactor",
],
mapp_converter_settings,
{"type": "converter_settings"},
)
)
#########################################
# #
# PHOTOVOLTAIC SETTINGS #
# #
#########################################
self.photovoltaic_settings.update(
self.parser_helper(
line,
["photovoltaic_settings"],
["sectionid", "devicenumber", "eqphase", "ambienttemperature"],
mapp_photovoltaic_settings,
{"type": "photovoltaic_settings"},
)
)
#########################################
# #
# BESS SETTINGS #
# #
#########################################
self.bess_settings.update(
self.parser_helper(
line,
["bess_settings"],
[
"sectionid",
"devicenumber",
"equipmentid",
"phase",
"maximumsoc",
"minimumsoc",
"initialsoc",
],
mapp_bess_settings,
{"type": "bess_settings"},
)
)
#########################################
# #
# LONG TERM DYNAMICS CURVE EXT #
# #
#########################################
self.long_term_dynamics.update(
self.parser_helper(
line,
["long_term_dynamics_curve_ext"],
[
"devicenumber",
"devicetype",
"adjustmentsettings",
"powercurvemodel",
],
mapp_long_term_dynamics,
{"type": "long_term_dynamics"},
)
)
#########################################
# #
# DGGENERATIONMODEL #
# #
#########################################
self.dg_generation.update(
self.parser_helper(
line,
["dggenerationmodel"],
[
"devicenumber",
"devicetype",
"activegeneration",
"powerfactor",
"loadmodelname",
],
mapp_dg_generation_model,
{"type": "dg_generation_model"},
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# BESS #
# #
#########################################
#
self.bess.update(
self.parser_helper(
line,
["bess"],
[
"id",
"ratedstorageenergy",
"maxchargingpower",
"maxdischargingpower",
"chargeefficiency",
"dischargeefficiency",
],
mapp_bess,
)
)
api_photovoltaics = {}
api_bessi = {}
for sectionID, settings in self.photovoltaic_settings.items():
try:
api_photovoltaic = Photovoltaic(model)
except:
raise ValueError(
"Unable to instanciate photovoltaic {id}".format(id=sectionID)
)
try:
api_photovoltaic.name = "PV_" + settings["devicenumber"].lower()
api_photovoltaic.feeder_name = self.section_feeder_mapping[
sectionID.lower()
]
api_photovoltaics[settings["devicenumber"].lower()] = api_photovoltaic
except:
raise ValueError(
"Unable to set photovoltaic name for {id}".format(id=sectionID)
)
try:
api_photovoltaic.temperature = float(
settings["ambienttemperature"]
) # Not included in ECG SETTINGS
except:
pass
try:
api_photovoltaic.phases = [
Unicode(k) for k in list(settings["eqphase"])
]
except:
pass
try:
api_photovoltaic.connecting_element = self.section_phase_mapping[
sectionID.lower()
]["fromnodeid"]
except:
pass
for sectionID, settings in self.bess_settings.items():
try:
api_bess = Storage(model)
except:
raise ValueError("Unable to instanciate bess {id}".format(id=sectionID))
try:
api_bess.name = "BESS_" + settings["devicenumber"].lower()
api_bess.feeder_name = self.section_feeder_mapping[sectionID.lower()]
api_bessi[settings["devicenumber"].lower()] = api_bess
except:
raise ValueError(
"Unable to set bess name for {id}".format(id=sectionID)
)
phase_storages = []
if "phase" in settings:
phases = self.phase_mapping(settings["phase"])
else:
phases = ["A", "B", "C"]
for phase in phases:
phase_storage = PhaseStorage(model)
phase_storage.phase = phase
phase_storages.append(phase_storage)
api_bess.phase_storages = phase_storages
if "equipmentid" in settings:
dev_num = settings["equipmentid"]
else:
dev_num = None
if dev_num is not None and dev_num in self.bess:
bess_data = self.bess[dev_num]
try:
api_bess.rated_kWh = float(bess_data["ratedstorageenergy"])
except:
pass
try:
api_bess.chargeefficiency = float(bess_data["chargingefficiency"])
except:
pass
try:
api_bess.dischargeefficiency = float(
bess_data["dischargeefficiency"]
)
except:
pass
try:
charging = float("inf")
discharging = float("inf")
if "maxchargingpower" in bess_data:
charging = float(bess_data["maxchargingpower"])
if "maxdischargingpower" in bess_data:
discharging = float(bess_data["maxdischargingpower"])
power = min(charging, discharging) * 1000
if power < float("inf"):
average_power = power / float(len(phase_storages))
for ps in phase_storages:
ps.p = average_power
except:
pass
try:
api_bess.reserve = float(settings["maximumsoc"])
except:
pass
try:
api_bess.stored_kWh = (
float(settings["initialsoc"]) * api_bess.rated_kWh / 100.0
)
except:
pass
try:
api_bess.connecting_element = self.section_phase_mapping[
sectionID.lower()
]["fromnodeid"]
except:
pass
for deviceID, settings in self.dg_generation.items():
deviceID = deviceID.strip(
"*"
).lower() # TODO: Deal with multiple configurations for the same location
api_photovoltaic = api_photovoltaics[deviceID]
# Use the default setting if available
if (
"loadmodelname" in settings
and settings["loadmodelname"].lower() == "default"
):
try:
api_photovoltaic.active_rating = (
float(settings["activegeneration"]) * 1000
)
except:
pass
try:
api_photovoltaic.power_factor = (
float(settings["powerfactor"]) / 100.0
)
except:
pass
for deviceID, settings in self.converter.items():
deviceID = deviceID.strip(
"*"
).lower() # TODO: Deal with multiple configurations for the same location
if deviceID in api_photovoltaics:
api_photovoltaic = api_photovoltaics[deviceID]
try:
api_photovoltaic.rated_power = (
float(settings["activepowerrating"]) * 1000
)
except:
pass
try:
api_photovoltaic.reactive_rating = (
float(settings["reactivepowerrating"]) * 1000
)
except:
pass
try:
api_photovoltaic.min_powerfactor = (
float(settings["minimumpowerfactor"]) / 100.0
)
except:
pass
try:
api_photovoltaic.fall_limit = float(settings["powerfalllimit"])
except:
pass
try:
api_photovoltaic.rise_limit = float(settings["powerriselimit"])
except:
pass
# TODO: check the units being used
elif deviceID in api_bessi:
api_bess = api_bessi[deviceID]
try:
api_bess.rated_power = float(settings["activepowerrating"]) * 1000
except:
pass
try:
api_bess.reactive_rating = (
float(settings["reactivepowerrating"]) * 1000
)
except:
pass
try:
api_bess.min_powerfactor = (
float(settings["minimumpowerfactor"]) / 100.0
)
except:
pass
for deviceID, settings in self.converter_settings.items():
deviceID = deviceID.strip(
"*"
).lower() # TODO: Deal with multiple configurations for the same location
if deviceID in api_photovoltaics:
api_photovoltaic = api_photovoltaics[deviceID]
try:
control_type = str(settings["controltype"])
if control_type == "1":
api_photovoltaic.control_type = "voltvar_vars_over_watts"
if control_type == "0":
api_photovoltaic.control_type = "voltvar_watts_over_vars"
if control_type == "2":
api_photovoltaic.control_type = "voltvar_fixedvars"
if control_type == "3":
api_photovoltaic.control_type = "voltvar_novars"
if control_type == "5":
api_photovoltaic.control_type = "voltwatt"
if control_type == "6":
api_photovoltaic.control_type = "watt_powerfactor"
if control_type == "10":
api_photovoltaic.control_type = "powerfactor"
except:
pass
try:
api_photovoltaic.var_injection = float(
settings["fixedvarinjection"]
)
except:
pass
try:
curve = float(settings["convertercontrolid"])
if (
api_photovoltaic.control_type == "voltvar_watts_over_vars"
or api_photovoltaic.control_type == "voltvar_vars_over_watts"
):
api_photovoltaic.voltvar_curve = curve
if api_photovoltaic.control_type == "voltwatt":
api_photovoltaic.voltwatt_curve = curve
if api_photovoltaic.control_type == "watt_powerfactor":
api_photovoltaic.watt_powerfactor_curve = curve
except:
pass
try:
pf = float(settings["powerfactor"]) / 100.0
api_photovoltaic.power_factor = pf
except:
pass
|
/* Vivien suggested a non affine bounded domain...
*
* Here is a case with 8 points in 2-D, same as rotation01 but with
* assignments intead of updates.
*
* The case should be trivial for ASPIC because the number of states
* is bounded and small.
*
* The case is much harder for a transformer-based approach, because
* the transformations are not as easy to combine as the states. So
* transformer lists should be used and should be small because only 9
* different paths are possible when transformers are computed in
* context. Either no assignment is performed, or a sequence of
* assignments starts at any of the eight steps.
*
* The trick here is that body(s0)==s0 and hence s0 is the loop
* invariant and then all preconditions are known.
*
* More generally, if the number of reachable states by the loop body
* is bounded and small, the loop invariant should be easy to compute.
*/
void rotation02()
{
int x = 1, y = 0;
while(1) {
if(x==1&&y==0)
x=2;
if(x==2&&y==0)
x=3, y=1;
if(x==3&&y==1)
y=2;
if(x==3&&y==2)
x=2, y=3;
if(x==2&&y==3)
x=1;
if(x==1&&y==3)
x=0,y=2;
if(x==0&&y==2)
y=1;
if(x==0&&y==1)
x=1,y=0;
}
}
|
import arviz
import numpy as np
import pandas
import seaborn as sns
import torch
import torch.distributions as dist
from matplotlib import pyplot
import test_stan
import generate_data
sns.set()
# np.random.seed(1)
def user_simulator_typezero(action, W, a, educability=0.6):
# action is either a tuple, or -1 for educate.
# Educate action
if isinstance(action, int):
print("Educate!")
educate_o = dist.Bernoulli(educability).sample()
return educate_o
else:
probs = a + action @ W
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def user_simulator_typeone(action, W, a, educability=0.6):
# action is either a tuple, or -1 for educate.
# Educate action
if isinstance(action, int):
print("Educate!")
educate_o = dist.Bernoulli(educability).sample()
return educate_o
else:
probs = a + action @ W
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def user_simulator_switching(action, W, a, educability=0.1, user_type=0, forgetting=0.0):
# action is either a tuple, or -1 for educate.
# W[0] is the type-zero user weights, W[1] type-one.
# Educate action
educability_per_type = [educability, 1.0]
if isinstance(action, int):
user_type_ = int(dist.Bernoulli(educability_per_type[user_type]).sample().item())
#if user_type != user_type_:
# print("User Type Changed!")
return user_type_
else:
probs = a + action @ W[user_type]
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def test_user_typezero():
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone]}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 100
teacher_actions = list(np.random.choice(n_covars, n_iterations))
model_file = None
for i in range(n_iterations):
act_in = teacher_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_typezero(action, torch.tensor(W_typezero, dtype=torch.double), a=1.0)
if outcome == 1.0:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
arviz.plot_trace(fit)
pyplot.show()
def test_user_typeone():
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone]}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 20
teacher_actions = list(np.random.choice(n_covars, n_iterations))
model_file = None
for i in range(n_iterations):
act_in = teacher_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_typeone(action, torch.tensor(W_typeone, dtype=torch.double), a=1.0)
if outcome == 1.0:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
arviz.plot_trace(fit)
pyplot.show()
def test_user_switching(educability=0.01):
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
sns.heatmap(corr_mat)
pyplot.show()
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone], "educability": educability,
"forgetting": 0.0}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 100
recommend_actions = list(np.random.choice(n_covars, n_iterations))
educate_or_recommend = list(np.random.choice(2, n_iterations, p=(0.5, 0.5)))
educate_or_recommend[0] = 1
model_file = None
user_type = 0
change_point = 0
for i in range(n_iterations):
#print("Step: {}".format(i))
if educate_or_recommend[i] == 0:
act_in = -1
else:
act_in = recommend_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_switching(action, torch.tensor([W_typezero, W_typeone], dtype=torch.double), a=1.0,
educability=data_dict["educability"], user_type=user_type)
if outcome == 1:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
else:
_user_type = 0 + user_type
user_type = user_simulator_switching(act_in, torch.tensor([W_typezero, W_typeone], dtype=torch.double),
a=1.0, educability=data_dict["educability"], user_type=user_type)
action = [-1.0, -1.0]
outcome = 0
data_dict["x"].append(action)
data_dict["y"].append(outcome)
if user_type == 1 and _user_type == 0:
print("State Changed to Type 1 at iteration: {}".format(i))
change_point += i
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
# if i % 100 ==0:
s = fit.summary()
print(fit)
arviz.plot_trace(fit)
pyplot.show()
summary = pandas.DataFrame(s['summary'], columns=s['summary_colnames'], index=s['summary_rownames'])
print(summary.iloc[2:6, :])
strt = 6 + (3 * n_iterations)
endn = strt + n_iterations
print(summary.iloc[strt, :])
print(summary.iloc[endn, :])
pyplot.plot(list(summary.iloc[307:407, 0]))
pyplot.axvline(x=change_point, ymin=0, ymax=1, color='r', linestyle='--')
pyplot.scatter(x=np.arange(n_iterations), y=np.zeros(n_iterations), c=educate_or_recommend, s=1.5, marker="x",
cmap="bone")
pyplot.savefig("interaction_alpha_e{}_test.png".format(educability), dpi=300)
|
/**
* PHP Email Form Validation - v2.0
* URL: https://bootstrapmade.com/php-email-form/
* Author: BootstrapMade.com
*/
!(function($) {
"use strict";
$('form.php-email-form').submit(function(e) {
e.preventDefault();
var f = $(this).find('.form-group'),
ferror = false,
emailExp = /^[^\s()<>@,;:\/]+@\w[\w\.-]+\.[a-z]{2,}$/i;
f.children('input').each(function() { // run all inputs
var i = $(this); // current input
var rule = i.attr('data-rule');
if (rule !== undefined) {
var ierror = false; // error flag for current input
var pos = rule.indexOf(':', 0);
if (pos >= 0) {
var exp = rule.substr(pos + 1, rule.length);
rule = rule.substr(0, pos);
} else {
rule = rule.substr(pos + 1, rule.length);
}
switch (rule) {
case 'required':
if (i.val() === '') {
ferror = ierror = true;
}
break;
case 'minlen':
if (i.val().length < parseInt(exp)) {
ferror = ierror = true;
}
break;
case 'email':
if (!emailExp.test(i.val())) {
ferror = ierror = true;
}
break;
case 'checked':
if (! i.is(':checked')) {
ferror = ierror = true;
}
break;
case 'regexp':
exp = new RegExp(exp);
if (!exp.test(i.val())) {
ferror = ierror = true;
}
break;
}
i.next('.validate').html((ierror ? (i.attr('data-msg') !== undefined ? i.attr('data-msg') : 'wrong Input') : '')).show('blind');
}
});
f.children('textarea').each(function() { // run all inputs
var i = $(this); // current input
var rule = i.attr('data-rule');
if (rule !== undefined) {
var ierror = false; // error flag for current input
var pos = rule.indexOf(':', 0);
if (pos >= 0) {
var exp = rule.substr(pos + 1, rule.length);
rule = rule.substr(0, pos);
} else {
rule = rule.substr(pos + 1, rule.length);
}
switch (rule) {
case 'required':
if (i.val() === '') {
ferror = ierror = true;
}
break;
case 'minlen':
if (i.val().length < parseInt(exp)) {
ferror = ierror = true;
}
break;
}
i.next('.validate').html((ierror ? (i.attr('data-msg') != undefined ? i.attr('data-msg') : 'wrong Input') : '')).show('blind');
}
});
if (ferror) return false;
var this_form = $(this);
var action = $(this).attr('action');
if( ! action ) {
this_form.find('.loading').slideUp();
this_form.find('.error-message').slideDown().html('The form action property is not set!');
return false;
}
$('.error-message').slideUp();
$(".response").css('display','block');
//console.log(recptchaKey)
if (recptchaKey!='') {
var recaptcha_site_key = recptchaKey;
grecaptcha.ready(function() {
grecaptcha.execute(recaptcha_site_key, {action: 'php_email_form_submit'}).then(function(token) {
php_email_form_submit(this_form,action,this_form.serialize() + '&recaptcha-response=' + token);
});
});
} else {
php_email_form_submit(this_form,action,this_form.serialize());
}
return true;
});
function php_email_form_submit(this_form, action, data) {
$.ajax({
type: "POST",
url: action,
data: data,
timeout: 40000
}).done( function(msg){
console.log(msg)
if (msg == 'OK') {
$('.response').css('display','none');
$('#thanks').css('display','block');
} else if(msg='subscribed'){
$('.response').css('display','none');
$("#subscribed").html('Thank you for subscribing to our blog!')
}
else {
$("#pleaseWait").css('display','none');
this_form.find('.loading').slideUp();
if(!msg) {
msg = 'Form submission failed and no error message returned from: ' + action + '<br>';
}
this_form.find('.error-message').slideDown().html(msg);
}
}).fail( function(data){
$("#pleaseWait").css('display','none');
console.log(data);
var error_msg = "Form submission failed!<br>";
if(data.statusText || data.status) {
error_msg += 'Status:';
if(data.statusText) {
error_msg += ' ' + data.statusText;
}
if(data.status) {
error_msg += ' ' + data.status;
}
error_msg += '<br>';
}
if(data.responseText) {
error_msg += data.responseText;
}
this_form.find('.loading').slideUp();
this_form.find('.error-message').slideDown().html(error_msg);
});
}
})(jQuery);
|
# Copyright (C) 2015-2022 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
import re
MD_RE = re.compile(r"([*_`\[])")
MD2_RE = re.compile(r"([_*\[\]()~`>#+\-=|{}.!\\])")
def html(text: str) -> str:
"""Helper function to escape html symbols"""
return text.replace('&', '&').replace('<', '<').replace('>', '>')
def markdown(text: str) -> str:
"""Helper function to escape markdown symbols"""
return MD_RE.sub(r'\\\1', text)
def markdown2(text: str) -> str:
"""Helper function to escape markdown2 symbols"""
return MD2_RE.sub(r'\\\1', text)
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var Reconnect = function () {
function Reconnect() {
var options = arguments.length <= 0 || arguments[0] === undefined ? { repeatDelay: 500 } : arguments[0];
_classCallCheck(this, Reconnect);
this.repeatDelay = options.repeatDelay;
this.status = 'disconnected';
this.reconnectCount = 0;
this.timeoutId = false;
}
_createClass(Reconnect, [{
key: 'handleConnect',
value: function handleConnect(connect) {
this.serviceConnect = connect;
this.tryConnect();
}
}, {
key: 'tryConnect',
value: function tryConnect() {
var _this = this;
this.status = 'connecting';
this.service = this.serviceConnect(function () {
_this.status = 'connected';
}, this.handleError.bind(this));
}
}, {
key: 'isConnected',
value: function isConnected() {
return this.status === 'connected';
}
}, {
key: 'on',
value: function on(type, callback) {
if (typeof callback !== "function") {
throw new Error('2nd parameter must be a function');
}
if (type === 'error') {
this.onError = callback;
}
}
}, {
key: 'handleError',
value: function handleError(err) {
var _this2 = this;
if (this.timeoutId) {
console.error('Service errored (' + this.reconnectCount + ' times), already waiting for reconnect.');
return;
}
console.error('Service errored (' + this.reconnectCount + ' times), waiting ' + this.repeatDelay + 'ms to reconnect.');
this.status = 'errored';
if (typeof this.onError === "function") {
this.onError(err);
}
this.timeoutId = setTimeout(function () {
if (_this2.status !== 'errored') {
console.warn('Service has recovered.');
return;
}
_this2.reconnectCount += 1;
_this2.tryConnect();
_this2.timeoutId = false;
}, this.repeatDelay);
}
}]);
return Reconnect;
}();
exports.default = Reconnect;
|
new Vue({
el: '#app',
created() {
},
data: {
show:false,
dynamic:"dynamic"
},
computed:{
},
methods: {
}
});
|
// Copyright (c) 2014-2016 The Dash Developers
// Copyright (c) 2016-2017 The PIVX developers
// Copyright (c) 2018-2019 The Ion developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef OBFUSCATIONCONFIG_H
#define OBFUSCATIONCONFIG_H
#include <QDialog>
namespace Ui
{
class ObfuscationConfig;
}
class WalletModel;
/** Multifunctional dialog to ask for passphrases. Used for encryption, unlocking, and changing the passphrase.
*/
class ObfuscationConfig : public QDialog
{
Q_OBJECT
public:
ObfuscationConfig(QWidget* parent = 0);
~ObfuscationConfig();
void setModel(WalletModel* model);
private:
Ui::ObfuscationConfig* ui;
WalletModel* model;
void configure(bool enabled, int coins, int rounds);
private slots:
void clickBasic();
void clickHigh();
void clickMax();
};
#endif // OBFUSCATIONCONFIG_H
|
import unittest
import warnings
import numpy as np
from pydrake.solvers import mathematicalprogram as mp
from pydrake.solvers.clp import ClpSolver
class TestClpSolver(unittest.TestCase):
def _make_prog(self):
prog = mp.MathematicalProgram()
x = prog.NewContinuousVariables(4, "x")
prog.AddLinearCost(-3*x[0] - 2*x[1])
prog.AddLinearCost(x[1] - 5 * x[2] - x[3] + 2)
prog.AddLinearConstraint(3*x[0] + x[1] + 2*x[2] == 30)
prog.AddLinearConstraint(2*x[0] + x[1] + 3 * x[2] + x[3] >= 15)
prog.AddLinearConstraint(2 * x[1] + 3 * x[3] <= 25)
prog.AddLinearConstraint(
np.array([[1, 2]]), [-100], [40], [x[0], x[2]])
prog.AddBoundingBoxConstraint(0, np.inf, x)
prog.AddLinearConstraint(x[1] <= 10)
x_expected = np.array([0, 0, 15., 25./3])
return prog, x, x_expected
def test_clp_solver(self):
prog, x, x_expected = self._make_prog()
solver = ClpSolver()
self.assertTrue(solver.available())
self.assertEqual(solver.solver_id().name(), "CLP")
self.assertEqual(solver.SolverName(), "CLP")
self.assertEqual(solver.solver_type(), mp.SolverType.kClp)
result = solver.Solve(prog, None, None)
self.assertTrue(result.is_success())
self.assertTrue(np.allclose(result.GetSolution(x), x_expected))
self.assertEqual(result.get_solver_details().status, 0)
self.assertAlmostEqual(result.get_optimal_cost(), -244./3)
def unavailable(self):
"""Per the BUILD file, this test is only run when CLP is disabled."""
solver = ClpSolver()
self.assertFalse(solver.available())
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@id='rptZone_ctl03_ctl00_h1Title']",
'price' : "//div[@class='right']/p[@class='pCode']|//div[@class='right']/p[3]",
'category' : "",
'description' : "//div[@class='details']/p|//div[@class='right']/p[@class='des']",
'images' : "//img[@id='rptZone_ctl03_ctl00_imgThumb']/@src",
'canonical' : "//meta[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'mothergardenvn.com'
allowed_domains = ['mothergardenvn.com']
start_urls = ['http://mothergardenvn.com/trang-chu/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-/]+$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
# Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from pathlib import Path
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
cwd = Path(__file__).resolve().parent
package_name = "compressai"
version = "1.1.0"
git_hash = "unknown"
try:
git_hash = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode().strip()
)
except (FileNotFoundError, subprocess.CalledProcessError):
pass
def write_version_file():
path = cwd / package_name / "version.py"
with path.open("w") as f:
f.write(f'__version__ = "{version}"\n')
f.write(f'git_version = "{git_hash}"\n')
write_version_file()
def get_extensions():
ext_dirs = cwd / package_name / "cpp_exts"
ext_modules = []
# Add rANS module
rans_lib_dir = cwd / "third_party/ryg_rans"
rans_ext_dir = ext_dirs / "rans"
extra_compile_args = ["-std=c++17"]
if os.getenv("DEBUG_BUILD", None):
extra_compile_args += ["-O0", "-g", "-UNDEBUG"]
else:
extra_compile_args += ["-O3"]
ext_modules.append(
CppExtension(
name=f"{package_name}.ans",
sources=[str(s) for s in rans_ext_dir.glob("*.cpp")],
language="c++",
include_dirs=[rans_lib_dir, rans_ext_dir],
extra_compile_args=extra_compile_args,
)
)
# Add ops
ops_ext_dir = ext_dirs / "ops"
ext_modules.append(
CppExtension(
name=f"{package_name}._CXX",
sources=[str(s) for s in ops_ext_dir.glob("*.cpp")],
language="c++",
extra_compile_args=extra_compile_args,
)
)
return ext_modules
TEST_REQUIRES = ["pytest>=6.0.1", "pytest-cov>=2.10.1"]
DEV_REQUIRES = TEST_REQUIRES + [
"pylint>=2.6.0",
"black>=20.8b1",
"isort>=5.4.2",
"sphinx>=3.0.3",
]
def get_extra_requirements():
extras_require = {
"test": TEST_REQUIRES,
"dev": DEV_REQUIRES,
"tutorials": ["jupyter", "ipywidgets"],
}
extras_require["all"] = set(req for reqs in extras_require.values() for req in reqs)
return extras_require
setup(
name=package_name,
version=version,
description="A PyTorch library and evaluation platform for end-to-end compression research",
url="https://github.com/InterDigitalInc/CompressAI",
author="InterDigital AI Lab",
author_email="compressai@interdigital.com",
packages=find_packages(exclude=("tests",)),
zip_safe=False,
python_requires=">=3.6, <3.9",
install_requires=[
"numpy",
"scipy",
"matplotlib",
"torch>=1.4.0",
"torchvision>=0.5.0",
"pytorch-msssim==0.2.0",
],
extras_require=get_extra_requirements(),
license="Apache-2",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
ext_modules=get_extensions(),
cmdclass={
"build_ext": BuildExtension,
},
)
|
//
// UserAvatarController.h
// iOSPrinciple_AVFoundation
//
// Created by WhatsXie on 2018/6/13.
// Copyright © 2018年 WhatsXie. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface UserAvatarController : UIViewController
@end
|
export { default as SendButton } from './Send';
export { default as RespondForm } from './RespondForm';
|
module.exports={D:{"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0.00543,"26":0,"27":0,"28":0,"29":0.01086,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0.00543,"38":0.00543,"39":0.00543,"40":0.00543,"41":0.00543,"42":0,"43":0.01629,"44":0,"45":0,"46":0.00543,"47":0.03801,"48":0,"49":0.12489,"50":0,"51":0,"52":0.00543,"53":0.00543,"54":0,"55":0.01086,"56":0.01629,"57":0.02715,"58":0.05973,"59":0.02172,"60":0.01086,"61":0,"62":0.01086,"63":0.1629,"64":0.01086,"65":0.1086,"66":0.03801,"67":0.0543,"68":0.04887,"69":0.07059,"70":0.09774,"71":0.19548,"72":10.03464,"73":3.19827,"74":0.01629,"75":0,"76":0},C:{"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0.01086,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0.00543,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0.01629,"36":0.00543,"37":0,"38":0,"39":0.00543,"40":0,"41":0.00543,"42":0,"43":0.02172,"44":0.00543,"45":0.00543,"46":0,"47":0.01629,"48":0.02715,"49":0.00543,"50":0,"51":0,"52":0.0543,"53":0,"54":0,"55":0,"56":0.00543,"57":0.01086,"58":0.11403,"59":0,"60":0.07059,"61":0.03801,"62":0.01629,"63":0.02172,"64":0.04344,"65":2.03625,"66":0.56472,"67":0.02172,"68":0,"3.5":0,"3.6":0},F:{"9":0,"11":0,"12":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0.00543,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0.03258,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0.00543,"38":0.01086,"39":0,"40":0.00543,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0.01629,"50":0,"51":0.00543,"52":0,"53":0.00543,"54":0,"55":0,"56":0.01086,"57":0.01086,"58":1.12944,"9.5-9.6":0,"10.0-10.1":0,"10.5":0,"10.6":0,"11.1":0,"11.5":0,"11.6":0,"12.1":0.01629},E:{"4":0,"5":0,"6":0,"7":0,"8":0,"9":0.01629,"10":0.03801,"11":0.04344,"12":0.69504,_:"0","3.1":0,"3.2":0,"5.1":0,"6.1":0.00543,"7.1":0,"9.1":0.02172,"10.1":0.03258,"11.1":0.11946,"12.1":0.02715},G:{"8":0.095700814814815,"3.2":0.0028567407407407,"4.0-4.1":0.0014283703703704,"4.2-4.3":0.0014283703703704,"5.0-5.1":0.007856037037037,"6.0-6.1":0.0035709259259259,"7.0-7.1":0.014283703703704,"8.1-8.4":0.030709962962963,"9.0-9.2":0.019283,"9.3":0.13926611111111,"10.0-10.2":0.077132,"10.3":0.18640233333333,"11.0-11.2":0.2363952962963,"11.3-11.4":0.50778566666667,"12.0-12.1":5.7299077407407,"12.2":0.080702925925926},I:{"3":0.00181997965412,"4":0.25843711088505,_:"67","2.1":0.01091987792472,"2.2":0.023659735503561,"2.3":0.01637981688708,"4.1":0.14013843336724,"4.2-4.3":0.52233416073245,"4.4":0,"4.4.3-4.4.4":0.81535088504578},A:{"6":0,"7":0,"8":0.065442077922078,"9":0.054535064935065,"10":0.043628051948052,"11":3.6156748051948,"5.5":0},B:{"12":0.08688,"13":0.08688,"14":0.13032,"15":0.1086,"16":0.20091,"17":2.03082,"18":0.24435},K:{_:"0 10 11 12 11.1 11.5 12.1"},P:{"4":1.4485120696721,"5.0-5.4":0.06163881147541,"6.2-6.4":0.14382389344262,"7.2-7.4":0.42119854508197,"8.2":2.6915614344262},N:{"10":0.05027,"11":0.20108},J:{"7":0.010968,"10":0.043872},R:{_:"0"},M:{"0":0.27877},O:{"0":0.95056},Q:{"1.2":0.02742},H:{"0":4.7722185738832},L:{"0":46.527},S:{"2.5":0}};
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import time
import weakref
from typing import Dict, List, Optional
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.events import EventStorage, get_event_storage
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
::
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
iter += 1
hook.after_train()
Notes:
1. In the hook method, users can access ``self.trainer`` to access more
properties about the context (e.g., model, current iteration, or config
if using :class:`DefaultTrainer`).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
Attributes:
trainer (TrainerBase): A weak reference to the trainer object. Set by the trainer
when the hook is registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self):
"""
Called before each iteration.
"""
pass
def after_step(self):
"""
Called after each iteration.
"""
pass
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self) -> None:
self._hooks: List[HookBase] = []
self.iter: int
self.start_iter: int
self.max_iter: int
self.storage: EventStorage
def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
self.storage.iter = self.iter
for h in self._hooks:
h.after_train()
def before_step(self):
# Maintain the invariant that storage.iter == trainer.iter
# for the entire execution of each step
self.storage.iter = self.iter
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
def run_step(self):
raise NotImplementedError
class SimpleTrainer(TrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def _write_metrics(
self,
loss_dict: Dict[str, torch.Tensor],
data_time: float,
prefix: str = "",
):
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
"""
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
metrics_dict["data_time"] = data_time
# Gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(metrics_dict.values())
if not np.isfinite(total_losses_reduced):
raise FloatingPointError(
f"Loss became infinite or NaN at iteration={self.iter}!\n"
f"loss_dict = {metrics_dict}"
)
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
if len(metrics_dict) > 1:
storage.put_scalars(**metrics_dict)
class AMPTrainer(SimpleTrainer):
"""
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
in the training loop.
"""
def __init__(self, model, data_loader, optimizer, grad_scaler=None):
"""
Args:
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
grad_scaler: torch GradScaler to automatically scale gradients.
"""
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
super().__init__(model, data_loader, optimizer)
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
"""
Implement the AMP training logic.
"""
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
with autocast():
loss_dict = self.model(data)
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
|
"""Custom Pydantic Fields/Types."""
# Standard Library
import re
from typing import TypeVar
# Third Party
from pydantic import StrictInt, StrictFloat, constr
IntFloat = TypeVar("IntFloat", StrictInt, StrictFloat)
SupportedDriver = constr(regex=r"(scrapli|netmiko|hyperglass_agent)")
class StrictBytes(bytes):
"""Custom data type for a strict byte string.
Used for validating the encoded JWT request payload.
"""
@classmethod
def __get_validators__(cls):
"""Yield Pydantic validator function.
See: https://pydantic-docs.helpmanual.io/usage/types/#custom-data-types
Yields:
{function} -- Validator
"""
yield cls.validate
@classmethod
def validate(cls, value):
"""Validate type.
Arguments:
value {Any} -- Pre-validated input
Raises:
TypeError: Raised if value is not bytes
Returns:
{object} -- Instantiated class
"""
if not isinstance(value, bytes):
raise TypeError("bytes required")
return cls()
def __repr__(self):
"""Return representation of object.
Returns:
{str} -- Representation
"""
return f"StrictBytes({super().__repr__()})"
class AnyUri(str):
"""Custom field type for HTTP URI, e.g. /example."""
@classmethod
def __get_validators__(cls):
"""Pydantic custim field method."""
yield cls.validate
@classmethod
def validate(cls, value):
"""Ensure URI string contains a leading forward-slash."""
uri_regex = re.compile(r"^(\/.*)$")
if not isinstance(value, str):
raise TypeError("AnyUri type must be a string")
match = uri_regex.fullmatch(value)
if not match:
raise ValueError(
"Invalid format. A URI must begin with a forward slash, e.g. '/example'"
)
return cls(match.group())
def __repr__(self):
"""Stringify custom field representation."""
return f"AnyUri({super().__repr__()})"
|
import React, {Component} from 'react';
class NotFound extends Component {
render() {
return (
<h1>404</h1>
);
}
}
export default NotFound;
|
'use strict'
const { Order } = require('bfx-api-node-models')
const { args: { apiKey, apiSecret }, debug } = require('../util/setup')
const WSv2 = require('../../lib/transports/ws2')
const oA = new Order({
symbol: 'tBTCUSD',
price: 200,
amount: 1,
type: 'EXCHANGE LIMIT'
})
const oB = new Order({
symbol: 'tETHUSD',
price: 50,
amount: 1,
type: 'EXCHANGE LIMIT'
})
const oC = new Order({
symbol: 'tETHBTC',
price: 1,
amount: 1,
type: 'EXCHANGE LIMIT'
})
async function execute () {
const ws = new WSv2({
apiKey,
apiSecret,
transform: true
})
ws.on('error', e => debug('WSv2 error: %s', e.message | e))
await ws.open()
await ws.auth()
oA.registerListeners(ws)
oB.registerListeners(ws)
oC.registerListeners(ws)
let oAClosed = false
let oBClosed = false
let oCClosed = false
oA.on('close', async () => {
debug('order A cancelled: %s', oA.status)
oAClosed = true
if (oBClosed && oCClosed) return ws.close()
})
oB.on('close', async () => {
debug('order B cancelled: %s', oB.status)
oBClosed = true
if (oAClosed && oCClosed) return ws.close()
})
oC.on('close', async () => {
debug('order C cancelled: %s', oC.status)
oCClosed = true
if (oAClosed && oBClosed) return ws.close()
})
await oA.submit()
debug('created order A')
await oB.submit()
debug('created order B')
await oC.submit()
debug('created order C')
ws.submitOrderMultiOp([
['oc', { id: oA.id }],
['oc_multi', { id: [oB.id, oC.id] }]
])
debug('sent ox_multi to cancel order A and orders [B, C]')
}
execute()
|
"""
Given a parent directory, return all the paths to
image class paths
"""
import sys
import glob
if __name__ == "__main__":
parent_dir = sys.argv[1]
all_dirs = glob.glob(parent_dir + "/*/*/*")
for i in all_dirs:
print(i)
|
/***************************************************************************//**
* @file
* @brief EFR32ZG13P_RTC_COMP register and bit field definitions
*******************************************************************************
* # License
* <b>Copyright 2020 Silicon Laboratories Inc. www.silabs.com</b>
*******************************************************************************
*
* SPDX-License-Identifier: Zlib
*
* The licensor of this software is Silicon Laboratories Inc.
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*
******************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(__ICCARM__)
#pragma system_include /* Treat file as system include file. */
#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#pragma clang system_header /* Treat file as system include file. */
#endif
/***************************************************************************//**
* @addtogroup Parts
* @{
******************************************************************************/
/***************************************************************************//**
* @brief RTC_COMP RTC COMP Register
* @ingroup EFR32ZG13P_RTC
******************************************************************************/
typedef struct {
__IOM uint32_t COMP; /**< Compare Value Register X */
} RTC_COMP_TypeDef;
/** @} End of group Parts */
#ifdef __cplusplus
}
#endif
|
# coding: utf-8
"""
Strava API v3
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class ExplorerResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'segments': 'list[ExplorerSegment]'
}
attribute_map = {
'segments': 'segments'
}
def __init__(self, segments=None, _configuration=None): # noqa: E501
"""ExplorerResponse - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._segments = None
self.discriminator = None
if segments is not None:
self.segments = segments
@property
def segments(self):
"""Gets the segments of this ExplorerResponse. # noqa: E501
The set of segments matching an explorer request # noqa: E501
:return: The segments of this ExplorerResponse. # noqa: E501
:rtype: list[ExplorerSegment]
"""
return self._segments
@segments.setter
def segments(self, segments):
"""Sets the segments of this ExplorerResponse.
The set of segments matching an explorer request # noqa: E501
:param segments: The segments of this ExplorerResponse. # noqa: E501
:type: list[ExplorerSegment]
"""
self._segments = segments
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExplorerResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExplorerResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ExplorerResponse):
return True
return self.to_dict() != other.to_dict()
|
# Copyright 2020-present, Apstra, Inc. All rights reserved.
#
# This source code is licensed under End User License Agreement found in the
# LICENSE file at http://www.apstra.com/eula
import json
from aos.client import AosClient
from scripts.utils import deserialize_fixture, render_jinja_template
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# You will need to update the connection details below with your
# specific AOS instance
AOS_IP = "<aos-IP>"
AOS_PORT = 443
AOS_USER = "admin"
AOS_PW = "aos-aos"
# Login
aos = AosClient(protocol="https", host=AOS_IP, port=AOS_PORT)
aos.auth.login(AOS_USER, AOS_PW)
# Find Blueprint and Default Routing-Zone by Name
bp_name = "apstra-pod1"
bp = aos.blueprint.get_id_by_name(label=bp_name)
default_rz = aos.blueprint.find_sz_by_name(bp.id, "default")
ct_id = "external-router-peering"
# Create Connectivity-Template
context = {"default_rz_id": default_rz.id}
ct_template = "ext_rtr_ct_default.jinja"
ext_rtr_ct = json.loads(render_jinja_template(ct_template, context))
aos.blueprint.create_connectivity_template_from_json(bp.id, data=ext_rtr_ct)
# Assign interfaces to CT
ct_intfs = aos.blueprint.get_endpoint_policy_app_points(bp.id, ct_id)
rlink_interfaces = list()
# here we assume the external generic system interfaces were tagged
# with "Router". The below logic uses this tag to identify the interfaces
# to assign.
def find_tags(d, intf, tag):
for child in d:
if child["children_count"] == 0 and child["tags"] == [tag]:
intf.append(child["id"])
find_tags(child["children"], intf, tag)
return intf
find_tags(ct_intfs["application_points"]["children"], rlink_interfaces, "Router")
data = {"application_points": []}
for intf_id in rlink_interfaces:
data["application_points"].append(
{
"id": intf_id,
"policies": [{"policy": "external-router-peering", "used": True}],
}
)
# Create the Connectivity-Template
aos.blueprint.update_connectivity_template(bp.id, data)
|
from setuptools import setup, find_packages
def main():
setup(
name='lieu',
version='1.1.1',
install_requires=[
'six',
'postal>=1.1.6',
'rocksdb',
'python-geohash',
'phonenumbers',
'mrjob',
],
package_dir={'': 'lib'},
packages=find_packages('lib'),
scripts=['scripts/dedupe_geojson'],
zip_safe=False,
url='https://github.com/openvenues/lieu',
description='Dedupe addresses and venues around the world with libpostal',
license='MIT License',
maintainer='mapzen.com',
maintainer_email='pelias@mapzen.com',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Topic :: Text Processing :: Linguistic',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
if __name__ == '__main__':
main()
|
import json
import logging
from collections import defaultdict
import requests
from requests.exceptions import ConnectionError
from pythonapm.metrics import METRIC_TYPE
from . import Surfacer
logger = logging.getLogger(__name__)
class RequestScopedHTTPSurfacer(Surfacer):
def __init__(self,
http_host='localhost',
http_port='',
http_path='/',
post_fn=None):
self.http_url = self._build_url(http_host, http_port, http_path)
self.metrics = defaultdict(list)
self.post_fn = post_fn or requests.post
def _build_url(self, host, port, path):
return 'http://{}{}{}'.format(host, port, path)
def clear(self):
logger.debug('initializing surfacer')
self.metrics = defaultdict(list)
def flush(self):
to_flush = {'metrics': dict(self.metrics)}
logger.debug('flushing metrics: {}'.format(json.dumps(to_flush)))
try:
response = self.post_fn(self.http_url, json=to_flush)
except ConnectionError as e:
logger.error('error submitting metrics: {}'.format(e))
else:
if not response.ok:
logger.error('error submitting metrics: {}'.format(response))
def record(self, metric):
"""
Records a metric. If the metric is a count it will take the
last count.
If the metric is a histogram or a gauge it will keep track of all
requests in the order they are seen.
:param metric:
:return:
"""
if metric.mtype == METRIC_TYPE.COUNTER:
# always replace with the most current metric
self.metrics[metric.name] = [metric.dict()]
else:
self.metrics[metric.name].append(metric.dict())
|
import React from "react";
import ReactDOM from "react-dom";
import App from "./App";
import { MoralisProvider } from "react-moralis";
import "./index.css";
import { MoralisDappProvider } from "./providers/MoralisDappProvider/MoralisDappProvider";
/** Get your free Moralis Account https://moralis.io/ */
const APP_ID = process.env.REACT_APP_MORALIS_APPLICATION_ID;
const SERVER_URL = process.env.REACT_APP_MORALIS_SERVER_URL;
const Application = () => {
const isServerInfo = (APP_ID && SERVER_URL) ? true : false;
if (isServerInfo)
return (
<MoralisProvider appId={APP_ID} serverUrl={SERVER_URL}>
<MoralisDappProvider>
<App isServerInfo />
</MoralisDappProvider>
</MoralisProvider>
);
else {
return (
<div style={{ display: "flex", justifyContent: "center" }}>
Coming soon ...
</div>
);
}
};
ReactDOM.render(
// <React.StrictMode>
<Application />,
// </React.StrictMode>,
document.getElementById("root")
);
|
/* v3_skey.c */
/*
* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project
* 1999.
*/
/* ====================================================================
* Copyright (c) 1999 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* licensing@OpenSSL.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com). */
#include <stdio.h>
#include <string.h>
#include <openssl/digest.h>
#include <openssl/err.h>
#include <openssl/obj.h>
#include <openssl/x509v3.h>
static ASN1_OCTET_STRING *s2i_skey_id(X509V3_EXT_METHOD *method,
X509V3_CTX *ctx, char *str);
const X509V3_EXT_METHOD v3_skey_id = {
NID_subject_key_identifier, 0, ASN1_ITEM_ref(ASN1_OCTET_STRING),
0, 0, 0, 0,
(X509V3_EXT_I2S)i2s_ASN1_OCTET_STRING,
(X509V3_EXT_S2I)s2i_skey_id,
0, 0, 0, 0,
NULL
};
char *i2s_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method, ASN1_OCTET_STRING *oct)
{
return hex_to_string(oct->data, oct->length);
}
ASN1_OCTET_STRING *s2i_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method,
X509V3_CTX *ctx, char *str)
{
ASN1_OCTET_STRING *oct;
long length;
if (!(oct = M_ASN1_OCTET_STRING_new())) {
OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE);
return NULL;
}
if (!(oct->data = string_to_hex(str, &length))) {
M_ASN1_OCTET_STRING_free(oct);
return NULL;
}
oct->length = length;
return oct;
}
static ASN1_OCTET_STRING *s2i_skey_id(X509V3_EXT_METHOD *method,
X509V3_CTX *ctx, char *str)
{
ASN1_OCTET_STRING *oct;
ASN1_BIT_STRING *pk;
unsigned char pkey_dig[EVP_MAX_MD_SIZE];
unsigned int diglen;
if (strcmp(str, "hash"))
return s2i_ASN1_OCTET_STRING(method, ctx, str);
if (!(oct = M_ASN1_OCTET_STRING_new())) {
OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE);
return NULL;
}
if (ctx && (ctx->flags == CTX_TEST))
return oct;
if (!ctx || (!ctx->subject_req && !ctx->subject_cert)) {
OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_PUBLIC_KEY);
goto err;
}
if (ctx->subject_req)
pk = ctx->subject_req->req_info->pubkey->public_key;
else
pk = ctx->subject_cert->cert_info->key->public_key;
if (!pk) {
OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_PUBLIC_KEY);
goto err;
}
if (!EVP_Digest
(pk->data, pk->length, pkey_dig, &diglen, EVP_sha1(), NULL))
goto err;
if (!M_ASN1_OCTET_STRING_set(oct, pkey_dig, diglen)) {
OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE);
goto err;
}
return oct;
err:
M_ASN1_OCTET_STRING_free(oct);
return NULL;
}
|
def timer(f):
import functools
@functools.wraps(f)
def f2(*args, **kwargs):
import time
import inspect
t0 = time.time()
result = f(*args, **kwargs)
t1 = time.time()
fname = inspect.stack()[1][4][0].split('(')[0].strip()
print 'time for %s = %.2f' % (fname, t1-t0)
return result
return f2
|
/*
BootstrapWebPage.h
Created by John Romkey - https://romkey.com/
December 6 2017
MIT License
*/
#ifndef BOOTSTRAP_WEB_PAGE_H
#define BOOTSTRAP_WEB_PAGE_H
#include <Arduino.h>
#include <BootstrapWebSite.h>
class BootstrapWebPage {
public:
BootstrapWebPage(BootstrapWebSite* webSite);
BootstrapWebPage(BootstrapWebSite* webSite, String title);
void addHeading(String heading, int level);
void addHeading(String heading) { addHeading(heading, 1); };
void addParagraph(String text);
static String createLink(String url, String content);
void addLink(String url, String content);
void addContent(String content);
void addList(String item) { addList(item, "", "", "", "", "", "", "", "", ""); };
void addList(String item1, String item2) { addList(item1, item2, "", "", "", "", "", "", "", ""); };
void addList(String item1, String item2, String item3) { addList(item1, item2, item3, "", "", "", "", "", "", ""); };
void addList(String item1, String item2, String item3, String item4) { addList(item1, item2, item3, item4, "", "", "", "", "", ""); };
void addList(String item1, String item2, String item3, String item4, String item5) { addList(item1, item2, item3, item4, item5, "", "", "", "", ""); };
void addList(String item1, String item2, String item3, String item4, String item5, String item6) { addList(item1, item2, item3, item4, item5, item6, "", "", "", ""); };
void addList(String item1, String item2, String item3, String item4, String item5, String item6, String item7) { addList(item1, item2, item3, item4, item5, item6, item7, "", "", ""); };
void addList(String item1, String item2, String item3, String item4, String item5, String item6, String item7, String item8) { addList(item1, item2, item3, item4, item5, item6, item7, item8, "", ""); };
void addList(String item1, String item2, String item3, String item4, String item5, String item6, String item7, String item8, String item9) { addList(item1, item2, item3, item4, item5, item6, item7, item8, item9, ""); };
void addList(String item1, String item2, String item3, String item4, String item5, String item6, String item7, String item8, String item9, String item10);
String listItem(String item);
String getHTML(void);
private:
BootstrapWebSite *_site;
String _title;
String _content;
};
#endif // BOOTSTRAP_WEB_PAGE_H
|
from typing import Dict
from pytest import fixture
from blurr.core.errors import InvalidTypeError, RequiredAttributeError, SpecNotFoundError
from blurr.core.field_simple import IntegerFieldSchema
from blurr.core.schema_loader import SchemaLoader
from blurr.core.transformer_streaming import StreamingTransformerSchema
from blurr.core.type import Type
from blurr.core.validator import ATTRIBUTE_TYPE
from blurr.store.memory_store import MemoryStore
@fixture
def nested_schema_spec_bad_type() -> Dict:
return {
'Name': 'test',
'Type': 'Blurr:Unknown',
'Ignored': 2,
'Aggregates': [{
'Name': 'test_group',
'Fields': [{
"Name": "country",
"Type": "string",
"Value": "source.country"
}, {
"Name": "events",
"Type": "integer",
"Value": "test_group.events+1"
}]
}]
}
@fixture
def nested_schema_spec() -> Dict:
return {
'Name': 'test',
'Type': Type.BLURR_TRANSFORM_STREAMING,
"Version": "2018-03-01",
"Time": "parser.parse(source.event_time)",
"Identity": "source.user_id",
'Ignored': 2,
'Aggregates': [{
'Name': 'test_group',
'Type': Type.BLURR_AGGREGATE_IDENTITY,
'Fields': [{
"Type": "string",
"Name": "country",
"Value": "source.country"
}, {
"Type": "integer",
"Name": "events",
"Value": "test_group.events+1"
}]
}]
}
@fixture
def schema_loader(nested_schema_spec) -> SchemaLoader:
schema_loader = SchemaLoader()
schema_loader.add_schema_spec(nested_schema_spec)
return schema_loader
def test_add_invalid_schema() -> None:
schema_loader = SchemaLoader()
assert schema_loader.add_schema_spec('') is None
assert schema_loader.add_schema_spec(['test']) is None
assert schema_loader.add_schema_spec({'test': 1}) is None
def test_add_valid_simple_schema() -> None:
schema_loader = SchemaLoader()
assert schema_loader.add_schema_spec({'Name': 'test', 'Type': 'test_type'}) == 'test'
assert schema_loader.get_schema_spec('test') == {'Name': 'test', 'Type': 'test_type'}
def test_add_valid_simple_schema_with_parent() -> None:
schema_loader = SchemaLoader()
assert schema_loader.add_schema_spec({'Name': 'test', 'Type': 'test_type'}, 'parent') == 'test'
assert schema_loader.get_schema_spec('parent.test') == {'Name': 'test', 'Type': 'test_type'}
def test_add_valid_nested_schema(nested_schema_spec_bad_type: Dict) -> None:
schema_loader = SchemaLoader()
assert schema_loader.add_schema_spec(nested_schema_spec_bad_type) == 'test'
assert schema_loader.get_schema_spec('test.test_group') == nested_schema_spec_bad_type[
'Aggregates'][0]
assert schema_loader.get_schema_spec('test.test_group.country') == nested_schema_spec_bad_type[
'Aggregates'][0]['Fields'][0]
assert schema_loader.get_schema_spec('test.test_group.events') == nested_schema_spec_bad_type[
'Aggregates'][0]['Fields'][1]
def test_get_schema_object_error(nested_schema_spec_bad_type: Dict) -> None:
schema_loader = SchemaLoader()
fqn = schema_loader.add_schema_spec(nested_schema_spec_bad_type)
errors = schema_loader.get_errors()
assert len(schema_loader.get_errors('test', True)) == 2
assert InvalidTypeError('test', nested_schema_spec_bad_type, ATTRIBUTE_TYPE,
InvalidTypeError.Reason.TYPE_NOT_DEFINED) in errors
assert RequiredAttributeError('test.test_group', nested_schema_spec_bad_type['Aggregates'],
ATTRIBUTE_TYPE) in errors
schema = schema_loader.get_schema_object('test')
errors = schema_loader.get_errors()
assert len(schema_loader.get_errors('test', True)) == 3
assert schema is None
assert InvalidTypeError(fqn, nested_schema_spec_bad_type, ATTRIBUTE_TYPE,
InvalidTypeError.Reason.TYPE_NOT_LOADED) in errors
def test_get_schema_object(schema_loader: SchemaLoader) -> None:
assert isinstance(schema_loader.get_schema_object('test'), StreamingTransformerSchema) is True
field_schema = schema_loader.get_schema_object('test.test_group.events')
assert isinstance(field_schema, IntegerFieldSchema) is True
# Assert that the same object is returned and a new one is not created.
assert field_schema.when is None
field_schema.when = 'True'
assert schema_loader.get_schema_object('test.test_group.events').when == 'True'
def test_get_nested_schema_object(schema_loader: SchemaLoader):
assert isinstance(
schema_loader.get_nested_schema_object('test.test_group', 'events'),
IntegerFieldSchema) is True
def test_get_fully_qualified_name() -> None:
assert SchemaLoader.get_fully_qualified_name('parent', 'child') == 'parent.child'
def test_get_schemas_of_type(schema_loader: SchemaLoader, nested_schema_spec: Dict) -> None:
assert schema_loader.get_schema_specs_of_type(Type.INTEGER) == {
'test.test_group.events': nested_schema_spec['Aggregates'][0]['Fields'][1]
}
def test_get_transformer_name() -> None:
assert SchemaLoader.get_transformer_name('test.child1.child2') == 'test'
def test_get_store_error_not_declared(schema_loader: SchemaLoader):
schema_loader.get_store('test.memstore')
error = schema_loader.get_errors('test.memstore', False)[0]
assert isinstance(error, SpecNotFoundError)
def test_get_store_error_wrong_type(schema_loader: SchemaLoader) -> None:
schema_loader.get_store('test')
error = next(
x for x in schema_loader.get_errors('test', False) if isinstance(x, InvalidTypeError))
assert isinstance(error, InvalidTypeError)
assert error.reason == InvalidTypeError.Reason.INCORRECT_BASE
assert error.expected_base_type == InvalidTypeError.BaseTypes.STORE
def test_get_store_success(nested_schema_spec: Dict) -> None:
nested_schema_spec['Store'] = {'Name': 'memstore', 'Type': Type.BLURR_STORE_MEMORY}
schema_loader = SchemaLoader()
schema_loader.add_schema_spec(nested_schema_spec)
assert isinstance(schema_loader.get_store('test.memstore'), MemoryStore)
def test_get_all_stores(nested_schema_spec: Dict) -> None:
nested_schema_spec['Store'] = {'Name': 'memstore', 'Type': Type.BLURR_STORE_MEMORY}
schema_loader = SchemaLoader()
schema_loader.add_schema_spec(nested_schema_spec)
# No store instantiated yet.
assert schema_loader.get_all_stores() == []
assert isinstance(schema_loader.get_store('test.memstore'), MemoryStore)
stores = schema_loader.get_all_stores()
assert len(stores) == 1
assert isinstance(stores[0], MemoryStore)
|
import module3 from '../src/module-3';
describe('module-3', () => {
it('needs tests');
});
|
import serial, time
ser = serial.Serial ('/dev/ttyUSB0',9600, timeout=.5)
while True:
ser.write("L")
readText = ser.read(10)
line = ser.readline()
print readText, "Line: ",line
time.sleep(3)
|
// @flow
export { default as StatelessAvatar } from './StatelessAvatar';
|
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['rls-movies.com']
self.base_link = 'http://www.rls-movies.com'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
u = client.parseDOM(post, 'enclosure', ret='url', attrs={'type': 'video.+?'})
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GiB|MiB))', post)
s = s[0] if s else '0'
items += [(t, i, s) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GiB|MiB))', item[2])[-1]
div = 1 if size.endswith('GiB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
import React from 'react'
import './Footer.css'
export default function Header(){
return(
<div className="footer-class">
<ul class="foot-top-nav foot-nav-ul">
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-about">About</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-jobs">Jobs</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-blog">Blog</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-developers">Developers</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-guidelines">Guidelines</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-privacy">Privacy</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-terms">Terms</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-help">Help</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="footer-abuse">Report abuse</a></li>
<li class="foot-li"><a href="/" class="footer-link" data-track="">Help forum</a></li>
</ul>
</div>
);
}
|
/*
* Driver for Analog Devices ADV748X video decoder and HDMI receiver
*
* Copyright (C) 2017 Renesas Electronics Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Authors:
* Koji Matsuoka <koji.matsuoka.xm@renesas.com>
* Niklas Söderlund <niklas.soderlund@ragnatech.se>
* Kieran Bingham <kieran.bingham@ideasonboard.com>
*
* The ADV748x range of receivers have the following configurations:
*
* Analog HDMI MHL 4-Lane 1-Lane
* In In CSI CSI
* ADV7480 X X X
* ADV7481 X X X X X
* ADV7482 X X X X
*/
#include <linux/i2c.h>
#ifndef _ADV748X_H_
#define _ADV748X_H_
enum adv748x_page {
ADV748X_PAGE_IO,
ADV748X_PAGE_DPLL,
ADV748X_PAGE_CP,
ADV748X_PAGE_HDMI,
ADV748X_PAGE_EDID,
ADV748X_PAGE_REPEATER,
ADV748X_PAGE_INFOFRAME,
ADV748X_PAGE_CBUS,
ADV748X_PAGE_CEC,
ADV748X_PAGE_SDP,
ADV748X_PAGE_TXB,
ADV748X_PAGE_TXA,
ADV748X_PAGE_MAX,
/* Fake pages for register sequences */
ADV748X_PAGE_WAIT, /* Wait x msec */
ADV748X_PAGE_EOR, /* End Mark */
};
/**
* enum adv748x_ports - Device tree port number definitions
*
* The ADV748X ports define the mapping between subdevices
* and the device tree specification
*/
enum adv748x_ports {
ADV748X_PORT_AIN0 = 0,
ADV748X_PORT_AIN1 = 1,
ADV748X_PORT_AIN2 = 2,
ADV748X_PORT_AIN3 = 3,
ADV748X_PORT_AIN4 = 4,
ADV748X_PORT_AIN5 = 5,
ADV748X_PORT_AIN6 = 6,
ADV748X_PORT_AIN7 = 7,
ADV748X_PORT_HDMI = 8,
ADV748X_PORT_TTL = 9,
ADV748X_PORT_TXA = 10,
ADV748X_PORT_TXB = 11,
ADV748X_PORT_MAX = 12,
};
enum adv748x_csi2_pads {
ADV748X_CSI2_SINK,
ADV748X_CSI2_SOURCE,
ADV748X_CSI2_NR_PADS,
};
/* CSI2 transmitters can have 2 internal connections, HDMI/AFE */
#define ADV748X_CSI2_MAX_SUBDEVS 2
struct adv748x_csi2 {
struct adv748x_state *state;
struct v4l2_mbus_framefmt format;
unsigned int page;
struct media_pad pads[ADV748X_CSI2_NR_PADS];
struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_ctrl *pixel_rate;
struct v4l2_subdev sd;
};
#define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier)
#define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd)
enum adv748x_hdmi_pads {
ADV748X_HDMI_SINK,
ADV748X_HDMI_SOURCE,
ADV748X_HDMI_NR_PADS,
};
struct adv748x_hdmi {
struct media_pad pads[ADV748X_HDMI_NR_PADS];
struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_subdev sd;
struct v4l2_mbus_framefmt format;
struct v4l2_dv_timings timings;
struct v4l2_fract aspect_ratio;
struct {
u8 edid[512];
u32 present;
unsigned int blocks;
} edid;
};
#define adv748x_ctrl_to_hdmi(ctrl) \
container_of(ctrl->handler, struct adv748x_hdmi, ctrl_hdl)
#define adv748x_sd_to_hdmi(sd) container_of(sd, struct adv748x_hdmi, sd)
enum adv748x_afe_pads {
ADV748X_AFE_SINK_AIN0,
ADV748X_AFE_SINK_AIN1,
ADV748X_AFE_SINK_AIN2,
ADV748X_AFE_SINK_AIN3,
ADV748X_AFE_SINK_AIN4,
ADV748X_AFE_SINK_AIN5,
ADV748X_AFE_SINK_AIN6,
ADV748X_AFE_SINK_AIN7,
ADV748X_AFE_SOURCE,
ADV748X_AFE_NR_PADS,
};
struct adv748x_afe {
struct media_pad pads[ADV748X_AFE_NR_PADS];
struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_subdev sd;
struct v4l2_mbus_framefmt format;
bool streaming;
v4l2_std_id curr_norm;
unsigned int input;
};
#define adv748x_ctrl_to_afe(ctrl) \
container_of(ctrl->handler, struct adv748x_afe, ctrl_hdl)
#define adv748x_sd_to_afe(sd) container_of(sd, struct adv748x_afe, sd)
/**
* struct adv748x_state - State of ADV748X
* @dev: (OF) device
* @client: I2C client
* @mutex: protect global state
*
* @endpoints: parsed device node endpoints for each port
*
* @i2c_addresses I2C Page addresses
* @i2c_clients I2C clients for the page accesses
* @regmap regmap configuration pages.
*
* @hdmi: state of HDMI receiver context
* @afe: state of AFE receiver context
* @txa: state of TXA transmitter context
* @txb: state of TXB transmitter context
*/
struct adv748x_state {
struct device *dev;
struct i2c_client *client;
struct mutex mutex;
struct device_node *endpoints[ADV748X_PORT_MAX];
struct i2c_client *i2c_clients[ADV748X_PAGE_MAX];
struct regmap *regmap[ADV748X_PAGE_MAX];
struct adv748x_hdmi hdmi;
struct adv748x_afe afe;
struct adv748x_csi2 txa;
struct adv748x_csi2 txb;
};
#define adv748x_hdmi_to_state(h) container_of(h, struct adv748x_state, hdmi)
#define adv748x_afe_to_state(a) container_of(a, struct adv748x_state, afe)
#define adv_err(a, fmt, arg...) dev_err(a->dev, fmt, ##arg)
#define adv_info(a, fmt, arg...) dev_info(a->dev, fmt, ##arg)
#define adv_dbg(a, fmt, arg...) dev_dbg(a->dev, fmt, ##arg)
/* Register Mappings */
/* IO Map */
#define ADV748X_IO_PD 0x00 /* power down controls */
#define ADV748X_IO_PD_RX_EN BIT(6)
#define ADV748X_IO_REG_04 0x04
#define ADV748X_IO_REG_04_FORCE_FR BIT(0) /* Force CP free-run */
#define ADV748X_IO_DATAPATH 0x03 /* datapath cntrl */
#define ADV748X_IO_DATAPATH_VFREQ_M 0x70
#define ADV748X_IO_DATAPATH_VFREQ_SHIFT 4
#define ADV748X_IO_VID_STD 0x05
#define ADV748X_IO_10 0x10 /* io_reg_10 */
#define ADV748X_IO_10_CSI4_EN BIT(7)
#define ADV748X_IO_10_CSI1_EN BIT(6)
#define ADV748X_IO_10_PIX_OUT_EN BIT(5)
#define ADV748X_IO_CHIP_REV_ID_1 0xdf
#define ADV748X_IO_CHIP_REV_ID_2 0xe0
#define ADV748X_IO_SLAVE_ADDR_BASE 0xf2
/* HDMI RX Map */
#define ADV748X_HDMI_LW1 0x07 /* line width_1 */
#define ADV748X_HDMI_LW1_VERT_FILTER BIT(7)
#define ADV748X_HDMI_LW1_DE_REGEN BIT(5)
#define ADV748X_HDMI_LW1_WIDTH_MASK 0x1fff
#define ADV748X_HDMI_F0H1 0x09 /* field0 height_1 */
#define ADV748X_HDMI_F0H1_HEIGHT_MASK 0x1fff
#define ADV748X_HDMI_F1H1 0x0b /* field1 height_1 */
#define ADV748X_HDMI_F1H1_INTERLACED BIT(5)
#define ADV748X_HDMI_HFRONT_PORCH 0x20 /* hsync_front_porch_1 */
#define ADV748X_HDMI_HFRONT_PORCH_MASK 0x1fff
#define ADV748X_HDMI_HSYNC_WIDTH 0x22 /* hsync_pulse_width_1 */
#define ADV748X_HDMI_HSYNC_WIDTH_MASK 0x1fff
#define ADV748X_HDMI_HBACK_PORCH 0x24 /* hsync_back_porch_1 */
#define ADV748X_HDMI_HBACK_PORCH_MASK 0x1fff
#define ADV748X_HDMI_VFRONT_PORCH 0x2a /* field0_vs_front_porch_1 */
#define ADV748X_HDMI_VFRONT_PORCH_MASK 0x3fff
#define ADV748X_HDMI_VSYNC_WIDTH 0x2e /* field0_vs_pulse_width_1 */
#define ADV748X_HDMI_VSYNC_WIDTH_MASK 0x3fff
#define ADV748X_HDMI_VBACK_PORCH 0x32 /* field0_vs_back_porch_1 */
#define ADV748X_HDMI_VBACK_PORCH_MASK 0x3fff
#define ADV748X_HDMI_TMDS_1 0x51 /* hdmi_reg_51 */
#define ADV748X_HDMI_TMDS_2 0x52 /* hdmi_reg_52 */
/* HDMI RX Repeater Map */
#define ADV748X_REPEATER_EDID_SZ 0x70 /* primary_edid_size */
#define ADV748X_REPEATER_EDID_SZ_SHIFT 4
#define ADV748X_REPEATER_EDID_CTL 0x74 /* hdcp edid controls */
#define ADV748X_REPEATER_EDID_CTL_EN BIT(0) /* man_edid_a_enable */
/* SDP Main Map */
#define ADV748X_SDP_INSEL 0x00 /* user_map_rw_reg_00 */
#define ADV748X_SDP_VID_SEL 0x02 /* user_map_rw_reg_02 */
#define ADV748X_SDP_VID_SEL_MASK 0xf0
#define ADV748X_SDP_VID_SEL_SHIFT 4
/* Contrast - Unsigned*/
#define ADV748X_SDP_CON 0x08 /* user_map_rw_reg_08 */
#define ADV748X_SDP_CON_MIN 0
#define ADV748X_SDP_CON_DEF 128
#define ADV748X_SDP_CON_MAX 255
/* Brightness - Signed */
#define ADV748X_SDP_BRI 0x0a /* user_map_rw_reg_0a */
#define ADV748X_SDP_BRI_MIN -128
#define ADV748X_SDP_BRI_DEF 0
#define ADV748X_SDP_BRI_MAX 127
/* Hue - Signed, inverted*/
#define ADV748X_SDP_HUE 0x0b /* user_map_rw_reg_0b */
#define ADV748X_SDP_HUE_MIN -127
#define ADV748X_SDP_HUE_DEF 0
#define ADV748X_SDP_HUE_MAX 128
/* Test Patterns / Default Values */
#define ADV748X_SDP_DEF 0x0c /* user_map_rw_reg_0c */
#define ADV748X_SDP_DEF_VAL_EN BIT(0) /* Force free run mode */
#define ADV748X_SDP_DEF_VAL_AUTO_EN BIT(1) /* Free run when no signal */
#define ADV748X_SDP_MAP_SEL 0x0e /* user_map_rw_reg_0e */
#define ADV748X_SDP_MAP_SEL_RO_MAIN 1
/* Free run pattern select */
#define ADV748X_SDP_FRP 0x14
#define ADV748X_SDP_FRP_MASK GENMASK(3, 1)
/* Saturation */
#define ADV748X_SDP_SD_SAT_U 0xe3 /* user_map_rw_reg_e3 */
#define ADV748X_SDP_SD_SAT_V 0xe4 /* user_map_rw_reg_e4 */
#define ADV748X_SDP_SAT_MIN 0
#define ADV748X_SDP_SAT_DEF 128
#define ADV748X_SDP_SAT_MAX 255
/* SDP RO Main Map */
#define ADV748X_SDP_RO_10 0x10
#define ADV748X_SDP_RO_10_IN_LOCK BIT(0)
/* CP Map */
#define ADV748X_CP_PAT_GEN 0x37 /* int_pat_gen_1 */
#define ADV748X_CP_PAT_GEN_EN BIT(7)
/* Contrast Control - Unsigned */
#define ADV748X_CP_CON 0x3a /* contrast_cntrl */
#define ADV748X_CP_CON_MIN 0 /* Minimum contrast */
#define ADV748X_CP_CON_DEF 128 /* Default */
#define ADV748X_CP_CON_MAX 255 /* Maximum contrast */
/* Saturation Control - Unsigned */
#define ADV748X_CP_SAT 0x3b /* saturation_cntrl */
#define ADV748X_CP_SAT_MIN 0 /* Minimum saturation */
#define ADV748X_CP_SAT_DEF 128 /* Default */
#define ADV748X_CP_SAT_MAX 255 /* Maximum saturation */
/* Brightness Control - Signed */
#define ADV748X_CP_BRI 0x3c /* brightness_cntrl */
#define ADV748X_CP_BRI_MIN -128 /* Luma is -512d */
#define ADV748X_CP_BRI_DEF 0 /* Luma is 0 */
#define ADV748X_CP_BRI_MAX 127 /* Luma is 508d */
/* Hue Control */
#define ADV748X_CP_HUE 0x3d /* hue_cntrl */
#define ADV748X_CP_HUE_MIN 0 /* -90 degree */
#define ADV748X_CP_HUE_DEF 0 /* -90 degree */
#define ADV748X_CP_HUE_MAX 255 /* +90 degree */
#define ADV748X_CP_VID_ADJ 0x3e /* vid_adj_0 */
#define ADV748X_CP_VID_ADJ_ENABLE BIT(7) /* Enable colour controls */
#define ADV748X_CP_DE_POS_HIGH 0x8b /* de_pos_adj_6 */
#define ADV748X_CP_DE_POS_HIGH_SET BIT(6)
#define ADV748X_CP_DE_POS_END_LOW 0x8c /* de_pos_adj_7 */
#define ADV748X_CP_DE_POS_START_LOW 0x8d /* de_pos_adj_8 */
#define ADV748X_CP_VID_ADJ_2 0x91
#define ADV748X_CP_VID_ADJ_2_INTERLACED BIT(6)
#define ADV748X_CP_VID_ADJ_2_INTERLACED_3D BIT(4)
#define ADV748X_CP_CLMP_POS 0xc9 /* clmp_pos_cntrl_4 */
#define ADV748X_CP_CLMP_POS_DIS_AUTO BIT(0) /* dis_auto_param_buff */
/* CSI : TXA/TXB Maps */
#define ADV748X_CSI_VC_REF 0x0d /* csi_tx_top_reg_0d */
#define ADV748X_CSI_VC_REF_SHIFT 6
#define ADV748X_CSI_FS_AS_LS 0x1e /* csi_tx_top_reg_1e */
#define ADV748X_CSI_FS_AS_LS_UNKNOWN BIT(6) /* Undocumented bit */
/* Register handling */
int adv748x_read(struct adv748x_state *state, u8 addr, u8 reg);
int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value);
int adv748x_write_block(struct adv748x_state *state, int client_page,
unsigned int init_reg, const void *val,
size_t val_len);
#define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
#define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
#define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
#define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
#define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
#define repeater_write(s, r, v) adv748x_write(s, ADV748X_PAGE_REPEATER, r, v)
#define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
#define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
#define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
#define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
#define txa_read(s, r) adv748x_read(s, ADV748X_PAGE_TXA, r)
#define txb_read(s, r) adv748x_read(s, ADV748X_PAGE_TXB, r)
#define tx_read(t, r) adv748x_read(t->state, t->page, r)
#define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
static inline struct v4l2_subdev *adv748x_get_remote_sd(struct media_pad *pad)
{
pad = media_entity_remote_pad(pad);
if (!pad)
return NULL;
return media_entity_to_v4l2_subdev(pad->entity);
}
void adv748x_subdev_init(struct v4l2_subdev *sd, struct adv748x_state *state,
const struct v4l2_subdev_ops *ops, u32 function,
const char *ident);
int adv748x_register_subdevs(struct adv748x_state *state,
struct v4l2_device *v4l2_dev);
int adv748x_txa_power(struct adv748x_state *state, bool on);
int adv748x_txb_power(struct adv748x_state *state, bool on);
int adv748x_afe_init(struct adv748x_afe *afe);
void adv748x_afe_cleanup(struct adv748x_afe *afe);
int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx);
void adv748x_csi2_cleanup(struct adv748x_csi2 *tx);
int adv748x_csi2_set_pixelrate(struct v4l2_subdev *sd, s64 rate);
int adv748x_hdmi_init(struct adv748x_hdmi *hdmi);
void adv748x_hdmi_cleanup(struct adv748x_hdmi *hdmi);
#endif /* _ADV748X_H_ */
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
)
setup(**d)
|
export function rIC(callback) {
if ('requestIdleCallback' in window) {
window.requestIdleCallback(callback);
}
else {
setTimeout(callback, 32);
}
}
export function hasShadowDom(el) {
return !!el.shadowRoot && !!el.attachShadow;
}
export function findItemLabel(componentEl) {
const itemEl = componentEl.closest('ion-item');
if (itemEl) {
return itemEl.querySelector('ion-label');
}
return null;
}
export function renderHiddenInput(always, container, name, value, disabled) {
if (always || hasShadowDom(container)) {
let input = container.querySelector('input.aux-input');
if (!input) {
input = container.ownerDocument.createElement('input');
input.type = 'hidden';
input.classList.add('aux-input');
container.appendChild(input);
}
input.disabled = disabled;
input.name = name;
input.value = value || '';
}
}
export function clamp(min, n, max) {
return Math.max(min, Math.min(n, max));
}
export function assert(actual, reason) {
if (!actual) {
const message = 'ASSERT: ' + reason;
console.error(message);
debugger; // tslint:disable-line
throw new Error(message);
}
}
export function now(ev) {
return ev.timeStamp || Date.now();
}
export function pointerCoord(ev) {
// get X coordinates for either a mouse click
// or a touch depending on the given event
if (ev) {
const changedTouches = ev.changedTouches;
if (changedTouches && changedTouches.length > 0) {
const touch = changedTouches[0];
return { x: touch.clientX, y: touch.clientY };
}
if (ev.pageX !== undefined) {
return { x: ev.pageX, y: ev.pageY };
}
}
return { x: 0, y: 0 };
}
/**
* @hidden
* Given a side, return if it should be on the end
* based on the value of dir
* @param side the side
* @param isRTL whether the application dir is rtl
*/
export function isEndSide(side) {
const isRTL = document.dir === 'rtl';
switch (side) {
case 'start': return isRTL;
case 'end': return !isRTL;
default:
throw new Error(`"${side}" is not a valid value for [side]. Use "start" or "end" instead.`);
}
}
export function deferEvent(event) {
return debounceEvent(event, 0);
}
export function debounceEvent(event, wait) {
const original = event._original || event;
return {
_original: event,
emit: debounce(original.emit.bind(original), wait)
};
}
export function debounce(func, wait = 0) {
let timer;
return (...args) => {
clearTimeout(timer);
timer = setTimeout(func, wait, ...args);
};
}
|
import { REQUEST_STATUS } from '~/util/constants'
export class ComputeBadgeValueConverter {
toView(value) {
switch(value) {
case REQUEST_STATUS.APPROVED:
return 'list-group-item-success';
case REQUEST_STATUS.REJECTED:
return 'list-group-item-danger';
case REQUEST_STATUS.PENDING:
return 'list-group-item-info';
}
}
}
|
import Controller from "./wrappers/Controller";
export default rootPath => {
return wrapClass => new Controller(wrapClass, rootPath)
}
|
from elasticsearch import Elasticsearch
import time
import json
es = Elasticsearch(['<host:port>'])
print(es)
#Create index
if es.indices.exists(index='customer'):
es.indices.delete(index='customer')
create_index = es.indices.create(
index='customer',
body={
"settings" : {
"index" : {
"number_of_shards" : 5,
"number_of_replicas" : 0
}
},
"mappings": {
"customer" : {
"properties" : {
"username": { "type" : "text"},
"first_name" : { "type" : "text"},
"last_name" : { "type" : "text"},
"time_epoch" : { "type" : "long" }
}
}
}
},
ignore=400)
print(create_index)
#Adding a document to the index
record1={
"username":"john123",
"first_name":"Henry",
"last_name":"Gates",
"time_epoch": int(time.time())
}
record2={
"username":"billg",
"first_name":"Bill",
"last_name":"Gates",
"time_epoch": int(time.time())
}
put_record_1 = es.index(index='customer',doc_type='customer',id=1,body=record1)
put_record_2 = es.index(index='customer',doc_type='customer',id=2,body=record2)
print(put_record_1,put_record_2)
#Get document by id
print("\n\n")
abc = es.get(index='customer',doc_type='customer',id=1)
print(abc['_source'])
#Search document with first_name
print("\n\n")
time.sleep(1)
res = es.search(index='customer', body={'query':{'match':{'first_name':'Bill'}}})
print("%d documents found" % res['hits']['total'])
for doc in res['hits']['hits']:
print(json.dumps(doc['_source'], indent=4, sort_keys=True))
|
def test_seek_offset(self):
"""Tests the seek_offset function."""
if not unittest.source:
return
${library_name_suffix}_${type_name} = ${python_module_name}.${type_name}()
${library_name_suffix}_${type_name}.open(unittest.source)
file_size = ${library_name_suffix}_${type_name}.get_size()
${library_name_suffix}_${type_name}.seek_offset(16, os.SEEK_SET)
offset = ${library_name_suffix}_${type_name}.get_offset()
self.assertEqual(offset, 16)
${library_name_suffix}_${type_name}.seek_offset(16, os.SEEK_CUR)
offset = ${library_name_suffix}_${type_name}.get_offset()
self.assertEqual(offset, 32)
${library_name_suffix}_${type_name}.seek_offset(-16, os.SEEK_CUR)
offset = ${library_name_suffix}_${type_name}.get_offset()
self.assertEqual(offset, 16)
${library_name_suffix}_${type_name}.seek_offset(-16, os.SEEK_END)
offset = ${library_name_suffix}_${type_name}.get_offset()
self.assertEqual(offset, file_size - 16)
${library_name_suffix}_${type_name}.seek_offset(16, os.SEEK_END)
offset = ${library_name_suffix}_${type_name}.get_offset()
self.assertEqual(offset, file_size + 16)
# TODO: change IOError into ValueError
with self.assertRaises(IOError):
${library_name_suffix}_${type_name}.seek_offset(-1, os.SEEK_SET)
# TODO: change IOError into ValueError
with self.assertRaises(IOError):
${library_name_suffix}_${type_name}.seek_offset(-32 - file_size, os.SEEK_CUR)
# TODO: change IOError into ValueError
with self.assertRaises(IOError):
${library_name_suffix}_${type_name}.seek_offset(-32 - file_size, os.SEEK_END)
# TODO: change IOError into ValueError
with self.assertRaises(IOError):
${library_name_suffix}_${type_name}.seek_offset(0, -1)
${library_name_suffix}_${type_name}.close()
# Test the seek without open.
with self.assertRaises(IOError):
${library_name_suffix}_${type_name}.seek_offset(16, os.SEEK_SET)
|
import {
MODE_ERASER, MODE_REDUCER, MODE_WRITE,
Actions, AUTO_SIZE, REDUCE_RATIO, MOVE_COUNT,
} from "./index";
import * as utils from "./utils";
import Drawer from "./drawer";
import PlotTable from "./plot_table";
import History from "./history";
export default class Manager {
constructor(height, width, maxCount, fileName) {
this.table = new PlotTable(height, width);
this.drawer = new Drawer();
this.history = new History();
this.mode = MODE_WRITE;
this.down = false;
this.pointer = { x: width / 2, y: height / 2 };
this.pointerSize = 25;
this.pointCount = 5;
this.maxCount = maxCount;
this.fileName = fileName;
this.moveCount = 0;
}
get count() {
return this.table.size;
}
get canPrev() {
return this.history.canPrev;
}
get canNext() {
return this.history.canNext;
}
changeMode(mode) {
console.log("change mode to : " + mode);
this.mode = mode;
this.refreshPointer();
}
updatePointCount(count) {
this.pointCount = count;
if (count <= 1) {
this.pointerSize = 1;
} else if (count <= 3) {
this.pointerSize = count * 5;
} else if (count <= 20) {
this.pointerSize = count * 10;
} else {
this.pointCount = 100;
this.pointerSize = 100;
}
this.refreshPointer();
}
updateCanvas(canvas) {
this.drawer.updateCanvas(canvas);
}
updateOverlay(overlay) {
this.drawer.updateOverlay(overlay);
this.refreshPointer();
}
download() {
this.reload();
const base64 = this.drawer.dataUrl();
const blob = utils.Base64toBlob(base64);
utils.saveBlob(blob, this.fileName);
}
onMouseDown(e) {
this.down = true;
this.history.buildStep();
console.log("onMouseDown");
this.moveCount = 0;
this.onMouseMove(e);
}
onMouseMove(e) {
const { x, y } = this.pointer = utils.getPosition(e);
this.refreshPointer();
if (this.down) {
switch (this.mode) {
case MODE_WRITE:
if (this.moveCount == 0) {
console.log("generate", x, y);
this.generatePoints(x, y);
}
this.moveCount = ++this.moveCount % MOVE_COUNT;
break;
case MODE_ERASER:
console.log("erace", x, y);
this.erace(x, y);
this.reload();
break;
case MODE_REDUCER:
console.log("reduce", x, y);
this.reduce(x, y);
this.reload();
break;
}
}
}
onMouseUp(e) {
console.log("onMouseUp");
this.down = false;
this.reload();
}
generatePoints(baseX, baseY) {
for (let i = 0; i < this.pointCount; i++) {
const { x, y } = utils.randomCircle(baseX, baseY, this.pointerSize);
if (this.table.add(x, y)) {
this.drawer.add(x, y);
this.history.add({ type: Actions.ADD, x: x, y: y });
}
}
}
erace(baseX, baseY) {
const half = this.pointerSize;
const delPoints = this.table.select(baseX - half, baseX + half, baseY - half, baseY + half);
delPoints.forEach((p) => {
if (this.table.del(p.x, p.y)) {
this.history.add({ type: Actions.DEL, x: p.x, y: p.y });
}
});
}
reduce(baseX, baseY) {
const half = this.pointerSize;
const delPoints = this.table.select(baseX - half, baseX + half, baseY - half, baseY + half);
for (let delCount = Math.ceil(delPoints.length * REDUCE_RATIO); delCount > 0; delCount--) {
const i = Math.trunc(Math.random() * delPoints.length);
if (this.table.del(delPoints[i].x, delPoints[i].y)) {
this.history.add({ type: Actions.DEL, x: delPoints[i].x, y: delPoints[i].y });
}
delPoints.splice(i, 1);
}
}
reload() {
this.drawer.drawAll(this.table.all());
}
refreshPointer() {
const { x, y } = this.pointer;
this.drawer.currentPointer(x, y, this.pointerSize, this.mode);
}
autoFit() {
this.history.buildStep();
while (this.table.size > 0 && this.table.size < this.maxCount) {
this.generateAuto();
}
this.removeToMax();
this.reload();
}
generateAuto() {
this.table.all().forEach((point) => {
const { x, y } = utils.randomNorm(point.x, point.y, AUTO_SIZE);
if (this.table.add(x, y)) {
this.history.add({ type: Actions.ADD, x: x, y: y });
}
});
}
removeToMax() {
while (this.table.size > this.maxCount) {
const { x, y } = this.table.removeIndex(Math.trunc(Math.random() * this.table.size));
if (x >= 0) {
this.history.add({ type: Actions.DEL, "x": x, "y": y });
}
}
}
goNext() {
const list = this.history.next();
list.forEach((action) => {
switch (action.type) {
case Actions.ADD:
this.table.add(action.x, action.y);
break;
case Actions.DEL:
this.table.del(action.x, action.y);
break;
}
});
this.reload()
}
goPrev() {
const list = this.history.prev();
list.reverse().forEach((action) => {
switch (action.type) {
case Actions.ADD:
this.table.del(action.x, action.y);
break;
case Actions.DEL:
this.table.add(action.x, action.y);
break;
}
});
this.reload()
}
}
|
/* @generated */
// prettier-ignore
if (Intl.RelativeTimeFormat && typeof Intl.RelativeTimeFormat.__addLocaleData === 'function') {
Intl.RelativeTimeFormat.__addLocaleData({"data":{"qu-EC":{"nu":["latn"],"year":{"0":"kunan wata","1":"hamuq wata","future":{"other":"+{0} y"},"past":{"other":"-{0} y"},"-1":"qayna wata"},"year-short":{"0":"kunan wata","1":"hamuq wata","future":{"other":"+{0} y"},"past":{"other":"-{0} y"},"-1":"qayna wata"},"year-narrow":{"0":"kunan wata","1":"hamuq wata","future":{"other":"+{0} y"},"past":{"other":"-{0} y"},"-1":"qayna wata"},"quarter":{"0":"kunan kimsa killa","1":"hamuq kimsa killa","future":{"other":"+{0} Q"},"past":{"other":"-{0} Q"},"-1":"qayna kimsa killa"},"quarter-short":{"0":"kunan kimsa killa","1":"hamuq kimsa killa","future":{"other":"+{0} Q"},"past":{"other":"-{0} Q"},"-1":"qayna kimsa killa"},"quarter-narrow":{"0":"kunan kimsa killa","1":"hamuq kimsa killa","future":{"other":"+{0} Q"},"past":{"other":"-{0} Q"},"-1":"qayna kimsa killa"},"month":{"0":"kunan killa","1":"hamuq killa","future":{"other":"+{0} m"},"past":{"other":"-{0} m"},"-1":"qayna killa"},"month-short":{"0":"kunan killa","1":"hamuq killa","future":{"other":"+{0} m"},"past":{"other":"-{0} m"},"-1":"qayna killa"},"month-narrow":{"0":"kunan killa","1":"hamuq killa","future":{"other":"+{0} m"},"past":{"other":"-{0} m"},"-1":"qayna killa"},"week":{"0":"kunan semana","1":"hamuq semana","future":{"other":"+{0} w"},"past":{"other":"-{0} w"},"-1":"qayna semana"},"week-short":{"0":"kunan semana","1":"hamuq semana","future":{"other":"+{0} w"},"past":{"other":"-{0} w"},"-1":"qayna semana"},"week-narrow":{"0":"kunan semana","1":"hamuq semana","future":{"other":"+{0} w"},"past":{"other":"-{0} w"},"-1":"qayna semana"},"day":{"0":"kunan punchaw","1":"paqarin","future":{"other":"+{0} d"},"past":{"other":"-{0} d"},"-1":"qayna punchaw"},"day-short":{"0":"kunan punchaw","1":"paqarin","future":{"other":"+{0} d"},"past":{"other":"-{0} d"},"-1":"qayna punchaw"},"day-narrow":{"0":"kunan punchaw","1":"paqarin","future":{"other":"+{0} d"},"past":{"other":"-{0} d"},"-1":"qayna punchaw"},"hour":{"0":"kay hora","future":{"other":"+{0} h"},"past":{"other":"-{0} h"}},"hour-short":{"0":"kay hora","future":{"other":"+{0} h"},"past":{"other":"-{0} h"}},"hour-narrow":{"0":"kay hora","future":{"other":"+{0} h"},"past":{"other":"-{0} h"}},"minute":{"0":"kay minuto","future":{"other":"+{0} min"},"past":{"other":"-{0} min"}},"minute-short":{"0":"kay minuto","future":{"other":"+{0} min"},"past":{"other":"-{0} min"}},"minute-narrow":{"0":"kay minuto","future":{"other":"+{0} min"},"past":{"other":"-{0} min"}},"second":{"0":"now","future":{"other":"+{0} s"},"past":{"other":"-{0} s"}},"second-short":{"0":"now","future":{"other":"+{0} s"},"past":{"other":"-{0} s"}},"second-narrow":{"0":"now","future":{"other":"+{0} s"},"past":{"other":"-{0} s"}}}},"availableLocales":["qu-EC"]}
)
}
|
// Copyright (c) 2019 GitHub, Inc.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
#ifndef SHELL_BROWSER_NET_PROXYING_URL_LOADER_FACTORY_H_
#define SHELL_BROWSER_NET_PROXYING_URL_LOADER_FACTORY_H_
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "base/optional.h"
#include "content/public/browser/content_browser_client.h"
#include "extensions/browser/api/web_request/web_request_info.h"
#include "mojo/public/cpp/bindings/binding.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "mojo/public/cpp/bindings/pending_remote.h"
#include "mojo/public/cpp/bindings/receiver_set.h"
#include "mojo/public/cpp/bindings/remote.h"
#include "services/network/public/cpp/resource_request.h"
#include "services/network/public/mojom/network_context.mojom.h"
#include "services/network/public/mojom/url_loader.mojom.h"
#include "services/network/public/mojom/url_response_head.mojom.h"
#include "shell/browser/net/atom_url_loader_factory.h"
namespace electron {
// Defines the interface for WebRequest API, implemented by api::WebRequest.
class WebRequestAPI {
public:
virtual ~WebRequestAPI() {}
using BeforeSendHeadersCallback =
base::OnceCallback<void(const std::set<std::string>& removed_headers,
const std::set<std::string>& set_headers,
int error_code)>;
virtual bool HasListener() const = 0;
virtual int OnBeforeRequest(extensions::WebRequestInfo* info,
const network::ResourceRequest& request,
net::CompletionOnceCallback callback,
GURL* new_url) = 0;
virtual int OnBeforeSendHeaders(extensions::WebRequestInfo* info,
const network::ResourceRequest& request,
BeforeSendHeadersCallback callback,
net::HttpRequestHeaders* headers) = 0;
virtual int OnHeadersReceived(
extensions::WebRequestInfo* info,
const network::ResourceRequest& request,
net::CompletionOnceCallback callback,
const net::HttpResponseHeaders* original_response_headers,
scoped_refptr<net::HttpResponseHeaders>* override_response_headers,
GURL* allowed_unsafe_redirect_url) = 0;
virtual void OnSendHeaders(extensions::WebRequestInfo* info,
const network::ResourceRequest& request,
const net::HttpRequestHeaders& headers) = 0;
virtual void OnBeforeRedirect(extensions::WebRequestInfo* info,
const network::ResourceRequest& request,
const GURL& new_location) = 0;
virtual void OnResponseStarted(extensions::WebRequestInfo* info,
const network::ResourceRequest& request) = 0;
virtual void OnErrorOccurred(extensions::WebRequestInfo* info,
const network::ResourceRequest& request,
int net_error) = 0;
virtual void OnCompleted(extensions::WebRequestInfo* info,
const network::ResourceRequest& request,
int net_error) = 0;
virtual void OnRequestWillBeDestroyed(extensions::WebRequestInfo* info) = 0;
};
// This class is responsible for following tasks when NetworkService is enabled:
// 1. handling intercepted protocols;
// 2. implementing webRequest module;
//
// For the task #2, the code is referenced from the
// extensions::WebRequestProxyingURLLoaderFactory class.
class ProxyingURLLoaderFactory
: public network::mojom::URLLoaderFactory,
public network::mojom::TrustedURLLoaderHeaderClient {
public:
class InProgressRequest : public network::mojom::URLLoader,
public network::mojom::URLLoaderClient,
public network::mojom::TrustedHeaderClient {
public:
InProgressRequest(
ProxyingURLLoaderFactory* factory,
int64_t web_request_id,
int32_t routing_id,
int32_t network_service_request_id,
uint32_t options,
const network::ResourceRequest& request,
const net::MutableNetworkTrafficAnnotationTag& traffic_annotation,
network::mojom::URLLoaderRequest loader_request,
mojo::PendingRemote<network::mojom::URLLoaderClient> client);
~InProgressRequest() override;
void Restart();
// network::mojom::URLLoader:
void FollowRedirect(const std::vector<std::string>& removed_headers,
const net::HttpRequestHeaders& modified_headers,
const base::Optional<GURL>& new_url) override;
void SetPriority(net::RequestPriority priority,
int32_t intra_priority_value) override;
void PauseReadingBodyFromNet() override;
void ResumeReadingBodyFromNet() override;
// network::mojom::URLLoaderClient:
void OnReceiveResponse(network::mojom::URLResponseHeadPtr head) override;
void OnReceiveRedirect(const net::RedirectInfo& redirect_info,
network::mojom::URLResponseHeadPtr head) override;
void OnUploadProgress(int64_t current_position,
int64_t total_size,
OnUploadProgressCallback callback) override;
void OnReceiveCachedMetadata(mojo_base::BigBuffer data) override;
void OnTransferSizeUpdated(int32_t transfer_size_diff) override;
void OnStartLoadingResponseBody(
mojo::ScopedDataPipeConsumerHandle body) override;
void OnComplete(const network::URLLoaderCompletionStatus& status) override;
void OnLoaderCreated(
mojo::PendingReceiver<network::mojom::TrustedHeaderClient> receiver);
// network::mojom::TrustedHeaderClient:
void OnBeforeSendHeaders(const net::HttpRequestHeaders& headers,
OnBeforeSendHeadersCallback callback) override;
void OnHeadersReceived(const std::string& headers,
const net::IPEndPoint& endpoint,
OnHeadersReceivedCallback callback) override;
private:
// These two methods combined form the implementation of Restart().
void UpdateRequestInfo();
void RestartInternal();
void ContinueToBeforeSendHeaders(int error_code);
void ContinueToSendHeaders(const std::set<std::string>& removed_headers,
const std::set<std::string>& set_headers,
int error_code);
void ContinueToStartRequest(int error_code);
void ContinueToHandleOverrideHeaders(int error_code);
void ContinueToResponseStarted(int error_code);
void ContinueToBeforeRedirect(const net::RedirectInfo& redirect_info,
int error_code);
void HandleBeforeRequestRedirect();
void HandleResponseOrRedirectHeaders(
net::CompletionOnceCallback continuation);
void OnRequestError(const network::URLLoaderCompletionStatus& status);
ProxyingURLLoaderFactory* factory_;
network::ResourceRequest request_;
const base::Optional<url::Origin> original_initiator_;
const uint64_t request_id_;
const int32_t routing_id_;
const int32_t network_service_request_id_;
const uint32_t options_;
const net::MutableNetworkTrafficAnnotationTag traffic_annotation_;
mojo::Binding<network::mojom::URLLoader> proxied_loader_binding_;
mojo::Remote<network::mojom::URLLoaderClient> target_client_;
base::Optional<extensions::WebRequestInfo> info_;
network::mojom::URLResponseHeadPtr current_response_;
scoped_refptr<net::HttpResponseHeaders> override_headers_;
GURL redirect_url_;
mojo::Receiver<network::mojom::URLLoaderClient> proxied_client_receiver_{
this};
network::mojom::URLLoaderPtr target_loader_;
bool request_completed_ = false;
// If |has_any_extra_headers_listeners_| is set to true, the request will be
// sent with the network::mojom::kURLLoadOptionUseHeaderClient option, and
// we expect events to come through the
// network::mojom::TrustedURLLoaderHeaderClient binding on the factory. This
// is only set to true if there is a listener that needs to view or modify
// headers set in the network process.
bool has_any_extra_headers_listeners_ = false;
bool current_request_uses_header_client_ = false;
OnBeforeSendHeadersCallback on_before_send_headers_callback_;
OnHeadersReceivedCallback on_headers_received_callback_;
mojo::Receiver<network::mojom::TrustedHeaderClient> header_client_receiver_{
this};
// If |has_any_extra_headers_listeners_| is set to false and a redirect is
// in progress, this stores the parameters to FollowRedirect that came from
// the client. That way we can combine it with any other changes that
// extensions made to headers in their callbacks.
struct FollowRedirectParams {
FollowRedirectParams();
~FollowRedirectParams();
std::vector<std::string> removed_headers;
net::HttpRequestHeaders modified_headers;
base::Optional<GURL> new_url;
DISALLOW_COPY_AND_ASSIGN(FollowRedirectParams);
};
std::unique_ptr<FollowRedirectParams> pending_follow_redirect_params_;
base::WeakPtrFactory<InProgressRequest> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(InProgressRequest);
};
ProxyingURLLoaderFactory(
WebRequestAPI* web_request_api,
const HandlersMap& intercepted_handlers,
content::BrowserContext* browser_context,
int render_process_id,
std::unique_ptr<extensions::ExtensionNavigationUIData> navigation_ui_data,
base::Optional<int64_t> navigation_id,
network::mojom::URLLoaderFactoryRequest loader_request,
mojo::PendingRemote<network::mojom::URLLoaderFactory>
target_factory_remote,
mojo::PendingReceiver<network::mojom::TrustedURLLoaderHeaderClient>
header_client_receiver,
content::ContentBrowserClient::URLLoaderFactoryType loader_factory_type);
~ProxyingURLLoaderFactory() override;
// network::mojom::URLLoaderFactory:
void CreateLoaderAndStart(
mojo::PendingReceiver<network::mojom::URLLoader> loader,
int32_t routing_id,
int32_t request_id,
uint32_t options,
const network::ResourceRequest& request,
mojo::PendingRemote<network::mojom::URLLoaderClient> client,
const net::MutableNetworkTrafficAnnotationTag& traffic_annotation)
override;
void Clone(mojo::PendingReceiver<network::mojom::URLLoaderFactory>
loader_receiver) override;
// network::mojom::TrustedURLLoaderHeaderClient:
void OnLoaderCreated(
int32_t request_id,
mojo::PendingReceiver<network::mojom::TrustedHeaderClient> receiver)
override;
void OnLoaderForCorsPreflightCreated(
const network::ResourceRequest& request,
mojo::PendingReceiver<network::mojom::TrustedHeaderClient> receiver)
override {}
WebRequestAPI* web_request_api() { return web_request_api_; }
bool IsForServiceWorkerScript() const;
private:
void OnTargetFactoryError();
void OnProxyBindingError();
void RemoveRequest(int32_t network_service_request_id, uint64_t request_id);
void MaybeDeleteThis();
bool ShouldIgnoreConnectionsLimit(const network::ResourceRequest& request);
// Passed from api::WebRequest.
WebRequestAPI* web_request_api_;
// This is passed from api::Protocol.
//
// The Protocol instance lives through the lifetime of BrowserContenxt,
// which is guarenteed to cover the lifetime of URLLoaderFactory, so the
// reference is guarenteed to be valid.
//
// In this way we can avoid using code from api namespace in this file.
const HandlersMap& intercepted_handlers_;
content::BrowserContext* const browser_context_;
const int render_process_id_;
std::unique_ptr<extensions::ExtensionNavigationUIData> navigation_ui_data_;
base::Optional<int64_t> navigation_id_;
mojo::ReceiverSet<network::mojom::URLLoaderFactory> proxy_receivers_;
mojo::Remote<network::mojom::URLLoaderFactory> target_factory_;
mojo::Receiver<network::mojom::TrustedURLLoaderHeaderClient>
url_loader_header_client_receiver_{this};
const content::ContentBrowserClient::URLLoaderFactoryType
loader_factory_type_;
// Mapping from our own internally generated request ID to an
// InProgressRequest instance.
std::map<uint64_t, std::unique_ptr<InProgressRequest>> requests_;
// A mapping from the network stack's notion of request ID to our own
// internally generated request ID for the same request.
std::map<int32_t, uint64_t> network_request_id_to_web_request_id_;
std::vector<std::string> ignore_connections_limit_domains_;
DISALLOW_COPY_AND_ASSIGN(ProxyingURLLoaderFactory);
};
} // namespace electron
#endif // SHELL_BROWSER_NET_PROXYING_URL_LOADER_FACTORY_H_
|
import processRadioSchedule, {
getLink,
getProgramState,
} from './processRadioSchedule';
import persianRadioScheduleData from '#data/persian/bbc_persian_radio/schedule.json';
describe('getLink', () => {
let program;
let service;
beforeAll(() => {
service = 'persian';
program = {
...persianRadioScheduleData.schedules[0],
serviceId: 'bbc_dari_radio',
episode: { pid: 'p07zbtbf' },
};
});
it('should return liveradio link when state is live', () => {
expect(getLink('live', program, service)).toBe(
'/persian/bbc_dari_radio/liveradio',
);
});
it('should return program link when state is not live', () => {
expect(getLink('anyotherstate', program, service)).toBe(
'/persian/bbc_dari_radio/p07zbtbf',
);
});
});
describe('getProgramState', () => {
it('should return `live` when currentTime is greater than startTime but less than endTime', () => {
const currentTime = Date.now();
const startTime = currentTime - 1000;
const endTime = currentTime + 1000;
expect(getProgramState(currentTime, startTime, endTime)).toBe('live');
});
it('should return `onDemand` when currentTime is greater than endTime', () => {
const currentTime = Date.now();
const startTime = currentTime - 1000;
const endTime = currentTime - 500;
expect(getProgramState(currentTime, startTime, endTime)).toBe('onDemand');
});
it('should return `next` when startTime is greater than currentTime', () => {
const currentTime = Date.now();
const startTime = currentTime + 1000;
const endTime = currentTime + 2000;
expect(getProgramState(currentTime, startTime, endTime)).toBe('next');
});
});
describe('processRadioSchedule', () => {
let programs;
const service = 'persian';
describe('Complete schedule data', () => {
beforeAll(() => {
programs = processRadioSchedule(
persianRadioScheduleData,
service,
Date.now(),
);
});
it('should return an array of four programs', () => {
expect(programs).toHaveLength(4);
});
it('should return the programs ordered by start time, newest first', () => {
expect(programs[0].startTime).toBeGreaterThan(programs[1].startTime);
expect(programs[1].startTime).toBeGreaterThan(programs[2].startTime);
expect(programs[2].startTime).toBeGreaterThan(programs[3].startTime);
});
it('should return a program that has the right fields', () => {
programs.forEach(program => {
expect(program).toHaveProperty('id');
expect(program).toHaveProperty('state');
expect(program).toHaveProperty('startTime');
expect(program).toHaveProperty('link');
expect(program).toHaveProperty('brandTitle');
expect(program).toHaveProperty('summary');
expect(program).toHaveProperty('duration');
});
});
});
describe('Incomplete schedule data', () => {
// Reduce the number of schedules to less than 4
beforeEach(() => {
persianRadioScheduleData.schedules.splice(
3,
persianRadioScheduleData.schedules.length - 1,
);
programs = processRadioSchedule(
persianRadioScheduleData,
service,
Date.now(),
);
});
it('should return undefined when schedule data is incomplete', () => {
expect(programs).toBeUndefined();
});
});
});
|
import React from 'react'
const Main = ({ children }) => (
<div className="container">
{children}
<style jsx>
{`
.container {
width: 87vw;
margin: 0 auto;
margin-top: 16px;
display: grid;
grid-template-columns: repeat(6, 0.1666667fr);
grid-template-rows: auto;
grid-gap: 10px;
grid-column-gap: 10px;
grid-row-gap: 16px;
}
`}
</style>
</div>
)
export default Main
|
const defaultTheme = require("tailwindcss/defaultTheme");
const plugin = require("tailwindcss/plugin");
const Color = require("color");
module.exports = {
purge: [
"./vendor/laravel/framework/src/Illuminate/Pagination/resources/views/*.blade.php",
"./storage/framework/views/*.php",
"./resources/views/**/*.blade.php",
],
theme: {
themeVariants: ["dark"],
Forms: (theme) => ({
default: {
"input, textarea": {
"&::placeholder": {
color: theme("colors.gray.400"),
},
},
},
}),
colors: {
transparent: 'transparent',
white: '#ffffff',
black: '#000000',
redd: '#D11A2A',
gray: {
'50': '#f9fafb',
'100': '#f4f5f7',
'200': '#e5e7eb',
'300': '#d5d6d7',
'400': '#9e9e9e',
'500': '#707275',
'600': '#4c4f52',
'700': '#24262d',
'800': '#1a1c23',
'900': '#121317',
},
'cool-gray': {
'50': '#fbfdfe',
'100': '#f1f5f9',
'200': '#e2e8f0',
'300': '#cfd8e3',
'400': '#97a6ba',
'500': '#64748b',
'600': '#475569',
'700': '#364152',
'800': '#27303f',
'900': '#1a202e',
},
red: {
'50': '#fdf2f2',
'100': '#fde8e8',
'200': '#fbd5d5',
'300': '#f8b4b4',
'400': '#f98080',
'500': '#f05252',
'600': '#e02424',
'700': '#c81e1e',
'800': '#9b1c1c',
'900': '#771d1d',
},
orange: {
'50': '#fff8f1',
'100': '#feecdc',
'200': '#fcd9bd',
'300': '#fdba8c',
'400': '#ff8a4c',
'500': '#ff5a1f',
'600': '#d03801',
'700': '#b43403',
'800': '#8a2c0d',
'900': '#771d1d',
},
yellow: {
'50': '#fdfdea',
'100': '#fdf6b2',
'200': '#fce96a',
'300': '#faca15',
'400': '#e3a008',
'500': '#c27803',
'600': '#9f580a',
'700': '#8e4b10',
'800': '#723b13',
'900': '#633112',
},
green: {
'50': '#f3faf7',
'100': '#def7ec',
'200': '#bcf0da',
'300': '#84e1bc',
'400': '#31c48d',
'500': '#0e9f6e',
'600': '#057a55',
'700': '#046c4e',
'800': '#03543f',
'900': '#014737',
},
teal: {
'50': '#edfafa',
'100': '#d5f5f6',
'200': '#afecef',
'300': '#7edce2',
'400': '#16bdca',
'500': '#0694a2',
'600': '#047481',
'700': '#036672',
'800': '#05505c',
'900': '#014451',
},
blue: {
'50': '#ebf5ff',
'100': '#e1effe',
'200': '#c3ddfd',
'300': '#a4cafe',
'400': '#76a9fa',
'500': '#3f83f8',
'600': '#1c64f2',
'700': '#1a56db',
'800': '#1e429f',
'900': '#233876',
},
indigo: {
'50': '#f0f5ff',
'100': '#e5edff',
'200': '#cddbfe',
'300': '#b4c6fc',
'400': '#8da2fb',
'500': '#6875f5',
'600': '#5850ec',
'700': '#5145cd',
'800': '#42389d',
'900': '#362f78',
},
purple: {
'50': '#f6f5ff',
'100': '#edebfe',
'200': '#dcd7fe',
'300': '#cabffd',
'400': '#ac94fa',
'500': '#9061f9',
'600': '#7e3af2',
'700': '#6c2bd9',
'800': '#5521b5',
'900': '#4a1d96',
},
pink: {
'50': '#fdf2f8',
'100': '#fce8f3',
'200': '#fad1e8',
'300': '#f8b4d9',
'400': '#f17eb8',
'500': '#e74694',
'600': '#d61f69',
'700': '#bf125d',
'800': '#99154b',
'900': '#751a3d',
},
},
extend: {
maxHeight: {
0: "0",
xl: "36rem",
},
fontFamily: {
sans: ["Inter", ...defaultTheme.fontFamily.sans],
},
},
},
variants: {
backgroundColor: [
"hover",
"focus",
"active",
"odd",
],
display: ["responsive", "dark"],
textColor: [
"focus-within",
"hover",
"active",
],
placeholderColor: ["focus"],
borderColor: ["focus", "hover"],
boxShadow: ["focus"],
},
plugins: [
require("@tailwindcss/forms")
],
};
|
from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^login/$', views.login_view, name='login'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^register/$', views.register_view, name='register'),
url(r'board/$', views.board, name='board'),
url(r'board/post/$', views.add_post, name='add_post'),
url(r'board/messages/(?P<user>\D+|\w+|\W+)/$', views.user_view, name='user_view'),
url(r'board/get_comments/$', views.get_comments, name='get_comments'),
url(r'board/delete/$', views.remove_message, name='remove_message'),
url(r'^$', views.index, name='index'),
]
|
import datetime
import re
def regex_proc(reg, txt, postproc=None):
found = re.search(reg, txt, flags=re.IGNORECASE)
if found:
target = found.group(1)
return postproc(target) if postproc else target
return None
def _liter_pp(v):
return _tofloat(v) * 1000
def _cl_pp(v):
return _tofloat(v) * 10
def _tofloat(v):
return float(v)
def size(txt):
regexs = [
(r'(\d+)(:?\s+)?ml', _tofloat),
(r'(\d+)(:?\s+)?cl', _cl_pp),
(r'(\d+\.\d)+(:?\s+)?litre', _liter_pp),
(r'([\.\d])+(:?\s+)?litre', _liter_pp),
]
for r, pp in regexs:
res = regex_proc(r, txt, postproc=pp)
if res is not None:
return res
return None
def abv(txt):
regexs = [
(r'(\d+\.\d)+%', _tofloat),
(r'(\d+)%', _tofloat),
(r'(\d+\.\d)+', _tofloat),
]
for r, pp in regexs:
res = regex_proc(r, txt, postproc=pp)
if res is not None:
return res
return None
def cask_no(txt):
regexs = [
(r'Cask\s+?#([-\d]+)', None),
(r'Cask\s+No\.?#?([-\d]+)', None),
(r'#([-\d]+)', None),
]
for r, pp in regexs:
res = regex_proc(r, txt, postproc=pp)
if res is not None:
return res
return None
def make_distillery_parser(distilleries):
processed = [
re.sub(r'\(\w+\)', '', dist).strip() for dist in distilleries]
def distillery_parser(txt):
for dist in processed:
if txt.find(dist) != -1:
return dist
return None
return distillery_parser
def _make_check_year(nowtime):
def check_year(val):
year = int(val)
if 1800 <= year <= nowtime.year:
return year
return None
return check_year
def age(txt, nowtime=None):
nowtime = nowtime or datetime.datetime.utcnow()
regexs = [
(r'(\d{1,3})\s+Years?\s+Old', lambda v: int(v)),
(r'(\d{1,3})\s+Years', lambda v: int(v)),
]
for r, pp in regexs:
res = regex_proc(r, txt, postproc=pp)
if res is not None:
return res
return None
def _to_1900(v):
return 1900 + int(v)
def vintage(txt, nowtime=None):
nowtime = nowtime or datetime.datetime.utcnow()
regexs = [
(r"(?:'|’)(\d{2})", _to_1900),
(r'(\d{4})', _make_check_year(nowtime)),
]
for r, pp in regexs:
res = regex_proc(r, txt, postproc=pp)
if res is not None:
return res
return None
def currency(txt):
if 'usd' in txt.lower() or '$' in txt:
return 'USD'
if u'円' in txt or u'¥' in txt:
return 'JPY'
def take_first_nonempty(fn):
def _inner(vals):
for val in vals:
res = fn(val)
if res:
return res
return _inner
|
# from django.forms import ModelForm
from django.conf import settings
from django.contrib.gis import forms
from django.contrib.gis.geos import GEOSGeometry
from django.forms import ValidationError
from django.contrib.auth.models import User
from django.contrib.gis.forms.widgets import OSMWidget
from djcelery.models import IntervalSchedule, PeriodicTask
from .models import GISLayerMaster, GISLayer, SpatialReport, SpatialReportItem
class GISLayerMasterChoiceFieldLabelMixin(object):
"""
Overrides the regular gis layer selector labels so we can see the app/type prefixes on layers.
I would much rather NOT hard code the model types (eg developmentgislyaer, ecosystemsgislayer, etc.). Please
find a better way.
"""
def label_from_instance(self, obj):
try:
return str(obj.developmentgislayer)
except obj.DoesNotExist:
pass
try:
return str(obj.ecosystemsgislayer)
except obj.DoesNotExist:
pass
try:
return str(obj.heritagegislayer)
except obj.DoesNotExist:
pass
return str(obj)
class GISLayerMasterModelChoiceField(GISLayerMasterChoiceFieldLabelMixin, forms.ModelChoiceField):
"""
Use this field on a layer select field form where you want to see the layer name prefixed with the DEV, HER, ECO stuff.
"""
pass
class GISLayerMasterModelMultipleChoiceField(GISLayerMasterChoiceFieldLabelMixin, forms.ModelMultipleChoiceField):
"""
Use this field on a layer multi-select field form where you want to see the layer name prefixed with the DEV, HER, ECO stuff.
"""
pass
# The default widget don't work!
class GeoinfoOSMWidget(OSMWidget):
template_name = 'geoinfo/geoinfo-openlayers-osm.html'
class GISLayerForm(forms.ModelForm):
"""
The top-level GIS layer form. Should be used by other apps for their
own GIS layer stuff - Development, GISLayerAdmin.
When subclassing, see https://docs.djangoproject.com/en/1.8/topics/forms/modelforms/#form-inheritance
"""
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super(GISLayerForm, self).__init__(*args, **kwargs)
self.user_instance = user
default_interval = None
obj = kwargs.get('instance', None)
if obj is not None:
try:
ptask = PeriodicTask.objects.get(task="geoinfo.tasks.reload_layer_features", args=str([obj.pk]))
default_interval = ptask.interval
except PeriodicTask.DoesNotExist:
pass
self.fields['refresh_interval'] = forms.ModelChoiceField(
required=False,
queryset=IntervalSchedule.objects.all(),
initial=default_interval,
empty_label="Never"
)
def save(self, commit=True):
obj = super(GISLayerForm, self).save(commit=False)
if not obj.author:
obj.author = self.user_instance
try:
# Identify any periodic tasks that refer to this layer... There should only be one, or none.
ptask = PeriodicTask.objects.get(task="geoinfo.tasks.reload_layer_features", args=str([obj.pk]))
# A task exists... lets just attempt to update it.
if self.cleaned_data['refresh_interval']:
# Lets only update the task if the interval field has changed.
# We get the initial directly from the field because we put it there. It won't be in the modelform
if self.fields['refresh_interval'].initial != self.cleaned_data['refresh_interval']:
ptask.interval = self.cleaned_data['refresh_interval']
ptask.save()
else:
ptask.delete()
except PeriodicTask.DoesNotExist:
# No task exists, lets create one!
if self.cleaned_data['refresh_interval']:
PeriodicTask.objects.create(
name="Update features for gislayer: {}".format(str(obj)),
task="geoinfo.tasks.reload_layer_features",
interval=self.cleaned_data['refresh_interval'],
args=str([obj.pk])
)
if commit:
obj.save()
return obj
def clean_wkt(self):
wkt = self.cleaned_data['wkt']
try:
g = GEOSGeometry(wkt)
return wkt
except ValueError as err:
# raise forms.ValidationError("Invalid well-known-text.")
return None
def clean_wfs_password(self):
pw = self.cleaned_data['wfs_password']
if self.instance and self.instance.wfs_password and not pw:
return self.instance.wfs_password
return pw
# Add validation for input type:
def clean(self):
cleaned_data = super(GISLayerForm, self).clean()
input_type = cleaned_data.get('input_type')
if input_type == 'wkt':
if cleaned_data.get('wkt', None) is None:
self.add_error('input_type', "Valid well-known-text is required if input type \"WKT\" is selected.")
self.add_error('wkt', "Invalid well-known-text.")
# raise forms.ValidationError("Valid well-known-text is required if input type \"WKT\" is selected.")
# TODO Actually validate shapefile input.
if input_type == 'file':
if cleaned_data.get('file', None) is None:
self.add_error('input_type', "A valid shapefile is required if input type \"file\" is selected.")
self.add_error('file', "Invalid file.")
# raise forms.ValidationError("A valid shapefile is required if input type \"file\" is selected.")
# TODO Actually validate draw input.
if input_type == 'map':
if cleaned_data.get('draw', None) is None:
self.add_error('input_type', "A valid shape is required on the map if input type \"draw on map\" is selected.")
self.add_error('draw', "Invalid map drawing. ")
# raise forms.ValidationError("A valid shape is required on the map if input type \"draw on map\" is selected.")
return cleaned_data
class Meta:
model = GISLayer
# We need to be explicit about which fields we want so that they can
# be added to by any GISLayerForm subclasses. __all__ messed that up.
fields = (
'name',
'input_type',
'wkt',
'draw',
'feature_titles_template',
'file',
'geomark',
'wfs_geojson',
'wfs_username',
'wfs_password',
'notes',
'author',
'reload_features',
'polygon_style',
'polyline_style',
'point_style'
)
widgets = {
'draw': GeoinfoOSMWidget(attrs={
'default_lon': getattr(settings, 'OPENLAYERS_DRAW_ON_MAP_LON', -126),
'default_lat': getattr(settings, 'OPENLAYERS_DRAW_ON_MAP_LAT', 54.9),
'default_zoom': getattr(settings, 'OPENLAYERS_DRAW_ON_MAP_ZOOM', 4)
}),
'wfs_password': forms.PasswordInput()
}
# This is just a straight inheritance from the GISLayerForm to preserve
# other code and provides a place for further admin-page mods.
class GISLayerAdminForm(GISLayerForm):
pass
class SpatialReportForm(forms.ModelForm):
report_on = GISLayerMasterModelMultipleChoiceField(queryset=GISLayerMaster.objects.all())
class Meta:
model = SpatialReport
fields = (
'name',
'distance_cap',
'report_on'
)
class SpatialReportItemForm(forms.ModelForm):
layer = GISLayerMasterModelChoiceField(queryset=GISLayerMaster.objects.all())
def __init__(self, *args, **kwargs):
spatialreport_id = kwargs.pop('report_id', None)
if spatialreport_id is None:
raise TypeError('report_id is a required kwarg of SpatialReportItemForm')
super(SpatialReportItemForm, self).__init__(*args, **kwargs)
self.fields['report'].queryset = SpatialReport.objects.filter(id=spatialreport_id)
self.fields['report'].initial = SpatialReport.objects.get(id=spatialreport_id)
self.spatialreport_instance = self.fields['report'].initial
def clean_report(self):
data = self.cleaned_data['report']
if data != self.spatialreport_instance:
raise forms.ValidationError("Spatial Report must be set to:", str(self.spatialreport_instance))
return data
class Meta:
model = SpatialReportItem
fields = (
'report',
'distance_cap',
'layer'
)
class GeneralSpatialReportForm(forms.Form):
name = forms.CharField(required=True)
distance_cap = forms.CharField(required=True, help_text=SpatialReport._meta.get_field('distance_cap').help_text)
layers = forms.MultipleChoiceField(choices=[], required=False)
def __init__(self, *args, **kwargs):
super(GeneralSpatialReportForm, self).__init__(*args, **kwargs)
self.fields['layers'].choices = self.get_item_choices()
def get_item_choices(self):
return [(x.pk, str(x)) for x in GISLayer.objects.all()]
|
const menuIcon = document.getElementById("menu-icon");
const slideoutMenu = document.getElementById("slideout-menu");
const searchIcon = document.getElementById("search-icon");
const searchBox = document.getElementById("searchbox");
searchIcon.addEventListener("click", function() {
if (searchBox.style.top == "72px") {
searchBox.style.top = "24px";
searchBox.style.pointerEvents = "none";
} else {
searchBox.style.top = "72px";
searchBox.style.pointerEvents = "auto";
}
});
menuIcon.addEventListener("click", function() {
if (slideoutMenu.style.opacity == "1") {
slideoutMenu.style.opacity = "0";
slideoutMenu.style.pointerEvents = "none";
} else {
slideoutMenu.style.opacity = "1";
slideoutMenu.style.pointerEvents = "auto";
}
});
|
import os
import re
import sys
import time
import copy
import thread
import socket
import threading
import logging
import inspect
import argparse
import telnetlib
import redis
import random
import redis
import json
import glob
import commands
from collections import defaultdict
from argparse import RawTextHelpFormatter
from string import Template
PWD = os.path.dirname(os.path.realpath(__file__))
WORKDIR = os.path.join(PWD, '../')
def getenv(key, default):
if key in os.environ:
return os.environ[key]
return default
logfile = getenv('TEST_LOGFILE', 't.log')
if logfile == '-':
logging.basicConfig(format="%(asctime)-15s [%(threadName)s] [%(levelname)s] %(message)s", level=logging.DEBUG)
else:
logging.basicConfig(filename=logfile, format="%(asctime)-15s [%(threadName)s] [%(levelname)s] %(message)s", level=logging.DEBUG)
logging.info("test running!!!!!!")
def strstr(s1, s2):
return s1.find(s2) != -1
def lets_sleep(SLEEP_TIME = 0.1):
time.sleep(SLEEP_TIME)
def TT(template, args): #todo: modify all
return Template(template).substitute(args)
def nothrow(ExceptionToCheck=Exception, logger=None):
def deco_retry(f):
def f_retry(*args, **kwargs):
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
if logger:
logger.info(e)
else:
print str(e)
return f_retry # true decorator
return deco_retry
@nothrow(Exception)
def test_nothrow():
raise Exception('exception: xx')
def json_encode(j):
return json.dumps(j, indent=4, cls=MyEncoder)
def json_decode(j):
return json.loads(j)
#commands dose not work on windows..
def system(cmd, log_fun=logging.info):
if log_fun: log_fun(cmd)
r = commands.getoutput(cmd)
return r
def shorten(s, l=80):
if len(s)<=l:
return s
return s[:l-3] + '...'
def assert_true(a):
assert a, 'assert fail: except true, got %s' % a
def assert_equal(a, b):
assert a == b, 'assert fail: %s vs %s' % (shorten(str(a)), shorten(str(b)))
def assert_raises(exception_cls, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except exception_cls as e:
return e
except Exception as e:
assert False, 'assert_raises %s but raised: %s' % (exception_cls, e)
assert False, 'assert_raises %s but nothing raise' % (exception_cls)
def assert_fail(err_response, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except Exception as e:
#assert strstr(str(e), err_response), 'assert "%s" but got "%s"' % (err_response, e)
assert re.search(err_response, str(e)), 'assert "%s" but got "%s"' % (err_response, e)
return
assert False, 'assert_fail %s but nothing raise' % (err_response)
if __name__ == "__main__":
test_nothrow()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
"""Fixtures for test case
@Author: NguyenKhacThanh
"""
import os
import json
import pytest
from wipm import create_app
@pytest.fixture(scope="class")
def inject_client(request):
"""Inject flask client app into test case class
"""
if not hasattr(request.cls, "client"):
app = create_app()
app.config["TESTING"] = True
app.config["DEBUG"] = False
setattr(request.cls, "client", app.test_client())
yield
@pytest.fixture(scope="class")
def inject_data(request):
client = request.cls.client
with open(os.path.join("tests", "datafiles", "push_dataset.json"), "r") as f:
payload = json.load(f)
setattr(request.cls, "payload", payload)
response = client.post(
"/api/v1/dataset",
json={"number_of_input": len(payload["inputs"][0])}
)
id_dataset = response.get_json().get("id")
setattr(request.cls, "id_dataset", id_dataset)
yield
client.delete(f"/api/v1/dataset/{id_dataset}")
@pytest.fixture(scope="class")
def inject_params_model_regression(request):
with open(os.path.join("tests", "datafiles", "params_regression_model.json"), "r") as f:
params = json.load(f)
setattr(request.cls, "params", params)
yield
@pytest.fixture(scope="class")
def inject_push_data(request):
client = request.cls.client
res = client.put(
f"/api/v1/dataset/{request.cls.id_dataset}",
json=request.cls.payload
)
return
@pytest.fixture(scope="class")
def inject_func_call_create_model(request):
client = request.cls.client
with open(os.path.join("tests", "datafiles", "params_regression_model.json"), "r") as f:
params = json.load(f)
id_model = None
def create_model(self, type_model):
url = f"/api/v1/regression/{type_model}"
response = client.post(url, json=params[type_model])
id_model = response.get_json()["id"]
return id_model
setattr(request.cls, "create_model", create_model)
yield
client.delete(f"/api/v1/regressiopn/{id_model}")
|
#pragma once
#include <stdio.h>
#include <string.h>
#include <assert.h>
#ifdef _WINDOWS
#include <WinSock2.h>
#include <Mswsock.h>
#include <io.h>
#else //_WINDOWS
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <arpa/inet.h>
#include <netinet/tcp.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#endif //_WINDOWS
#include <string>
#include <vector>
#include <list>
#include <map>
#include <algorithm>
#include <unordered_map>
#include <unordered_set>
using namespace std;
#include "CommonTypes.h"
#include "Log.h"
|
/*
* This definitions of the PIC18F4455 MCU.
*
* This file is part of the GNU PIC library for SDCC, originally
* created by Molnar Karoly <molnarkaroly@users.sf.net> 2016.
*
* This file is generated automatically by the cinc2h.pl, 2016-04-13 17:23:46 UTC.
*
* SDCC is licensed under the GNU Public license (GPL) v2. Note that
* this license covers the code to the compiler and other executables,
* but explicitly does not cover any code or objects generated by sdcc.
*
* For pic device libraries and header files which are derived from
* Microchip header (.inc) and linker script (.lkr) files Microchip
* requires that "The header files should state that they are only to be
* used with authentic Microchip devices" which makes them incompatible
* with the GPL. Pic device libraries and header files are located at
* non-free/lib and non-free/include directories respectively.
* Sdcc should be run with the --use-non-free command line option in
* order to include non-free header files and libraries.
*
* See http://sdcc.sourceforge.net/ for the latest information on sdcc.
*/
#include <pic18f4455.h>
//==============================================================================
__at(0x0F62) __sfr SPPDATA;
__at(0x0F63) __sfr SPPCFG;
__at(0x0F63) volatile __SPPCFGbits_t SPPCFGbits;
__at(0x0F64) __sfr SPPEPS;
__at(0x0F64) volatile __SPPEPSbits_t SPPEPSbits;
__at(0x0F65) __sfr SPPCON;
__at(0x0F65) volatile __SPPCONbits_t SPPCONbits;
__at(0x0F66) __sfr UFRM;
__at(0x0F66) __sfr UFRML;
__at(0x0F66) volatile __UFRMLbits_t UFRMLbits;
__at(0x0F67) __sfr UFRMH;
__at(0x0F67) volatile __UFRMHbits_t UFRMHbits;
__at(0x0F68) __sfr UIR;
__at(0x0F68) volatile __UIRbits_t UIRbits;
__at(0x0F69) __sfr UIE;
__at(0x0F69) volatile __UIEbits_t UIEbits;
__at(0x0F6A) __sfr UEIR;
__at(0x0F6A) volatile __UEIRbits_t UEIRbits;
__at(0x0F6B) __sfr UEIE;
__at(0x0F6B) volatile __UEIEbits_t UEIEbits;
__at(0x0F6C) __sfr USTAT;
__at(0x0F6C) volatile __USTATbits_t USTATbits;
__at(0x0F6D) __sfr UCON;
__at(0x0F6D) volatile __UCONbits_t UCONbits;
__at(0x0F6E) __sfr UADDR;
__at(0x0F6E) volatile __UADDRbits_t UADDRbits;
__at(0x0F6F) __sfr UCFG;
__at(0x0F6F) volatile __UCFGbits_t UCFGbits;
__at(0x0F70) __sfr UEP0;
__at(0x0F70) volatile __UEP0bits_t UEP0bits;
__at(0x0F71) __sfr UEP1;
__at(0x0F71) volatile __UEP1bits_t UEP1bits;
__at(0x0F72) __sfr UEP2;
__at(0x0F72) volatile __UEP2bits_t UEP2bits;
__at(0x0F73) __sfr UEP3;
__at(0x0F73) volatile __UEP3bits_t UEP3bits;
__at(0x0F74) __sfr UEP4;
__at(0x0F74) volatile __UEP4bits_t UEP4bits;
__at(0x0F75) __sfr UEP5;
__at(0x0F75) volatile __UEP5bits_t UEP5bits;
__at(0x0F76) __sfr UEP6;
__at(0x0F76) volatile __UEP6bits_t UEP6bits;
__at(0x0F77) __sfr UEP7;
__at(0x0F77) volatile __UEP7bits_t UEP7bits;
__at(0x0F78) __sfr UEP8;
__at(0x0F78) volatile __UEP8bits_t UEP8bits;
__at(0x0F79) __sfr UEP9;
__at(0x0F79) volatile __UEP9bits_t UEP9bits;
__at(0x0F7A) __sfr UEP10;
__at(0x0F7A) volatile __UEP10bits_t UEP10bits;
__at(0x0F7B) __sfr UEP11;
__at(0x0F7B) volatile __UEP11bits_t UEP11bits;
__at(0x0F7C) __sfr UEP12;
__at(0x0F7C) volatile __UEP12bits_t UEP12bits;
__at(0x0F7D) __sfr UEP13;
__at(0x0F7D) volatile __UEP13bits_t UEP13bits;
__at(0x0F7E) __sfr UEP14;
__at(0x0F7E) volatile __UEP14bits_t UEP14bits;
__at(0x0F7F) __sfr UEP15;
__at(0x0F7F) volatile __UEP15bits_t UEP15bits;
__at(0x0F80) __sfr PORTA;
__at(0x0F80) volatile __PORTAbits_t PORTAbits;
__at(0x0F81) __sfr PORTB;
__at(0x0F81) volatile __PORTBbits_t PORTBbits;
__at(0x0F82) __sfr PORTC;
__at(0x0F82) volatile __PORTCbits_t PORTCbits;
__at(0x0F83) __sfr PORTD;
__at(0x0F83) volatile __PORTDbits_t PORTDbits;
__at(0x0F84) __sfr PORTE;
__at(0x0F84) volatile __PORTEbits_t PORTEbits;
__at(0x0F89) __sfr LATA;
__at(0x0F89) volatile __LATAbits_t LATAbits;
__at(0x0F8A) __sfr LATB;
__at(0x0F8A) volatile __LATBbits_t LATBbits;
__at(0x0F8B) __sfr LATC;
__at(0x0F8B) volatile __LATCbits_t LATCbits;
__at(0x0F8C) __sfr LATD;
__at(0x0F8C) volatile __LATDbits_t LATDbits;
__at(0x0F8D) __sfr LATE;
__at(0x0F8D) volatile __LATEbits_t LATEbits;
__at(0x0F92) __sfr DDRA;
__at(0x0F92) volatile __DDRAbits_t DDRAbits;
__at(0x0F92) __sfr TRISA;
__at(0x0F92) volatile __TRISAbits_t TRISAbits;
__at(0x0F93) __sfr DDRB;
__at(0x0F93) volatile __DDRBbits_t DDRBbits;
__at(0x0F93) __sfr TRISB;
__at(0x0F93) volatile __TRISBbits_t TRISBbits;
__at(0x0F94) __sfr DDRC;
__at(0x0F94) volatile __DDRCbits_t DDRCbits;
__at(0x0F94) __sfr TRISC;
__at(0x0F94) volatile __TRISCbits_t TRISCbits;
__at(0x0F95) __sfr DDRD;
__at(0x0F95) volatile __DDRDbits_t DDRDbits;
__at(0x0F95) __sfr TRISD;
__at(0x0F95) volatile __TRISDbits_t TRISDbits;
__at(0x0F96) __sfr DDRE;
__at(0x0F96) volatile __DDREbits_t DDREbits;
__at(0x0F96) __sfr TRISE;
__at(0x0F96) volatile __TRISEbits_t TRISEbits;
__at(0x0F9B) __sfr OSCTUNE;
__at(0x0F9B) volatile __OSCTUNEbits_t OSCTUNEbits;
__at(0x0F9D) __sfr PIE1;
__at(0x0F9D) volatile __PIE1bits_t PIE1bits;
__at(0x0F9E) __sfr PIR1;
__at(0x0F9E) volatile __PIR1bits_t PIR1bits;
__at(0x0F9F) __sfr IPR1;
__at(0x0F9F) volatile __IPR1bits_t IPR1bits;
__at(0x0FA0) __sfr PIE2;
__at(0x0FA0) volatile __PIE2bits_t PIE2bits;
__at(0x0FA1) __sfr PIR2;
__at(0x0FA1) volatile __PIR2bits_t PIR2bits;
__at(0x0FA2) __sfr IPR2;
__at(0x0FA2) volatile __IPR2bits_t IPR2bits;
__at(0x0FA6) __sfr EECON1;
__at(0x0FA6) volatile __EECON1bits_t EECON1bits;
__at(0x0FA7) __sfr EECON2;
__at(0x0FA8) __sfr EEDATA;
__at(0x0FA9) __sfr EEADR;
__at(0x0FAB) __sfr RCSTA;
__at(0x0FAB) volatile __RCSTAbits_t RCSTAbits;
__at(0x0FAC) __sfr TXSTA;
__at(0x0FAC) volatile __TXSTAbits_t TXSTAbits;
__at(0x0FAD) __sfr TXREG;
__at(0x0FAE) __sfr RCREG;
__at(0x0FAF) __sfr SPBRG;
__at(0x0FB0) __sfr SPBRGH;
__at(0x0FB1) __sfr T3CON;
__at(0x0FB1) volatile __T3CONbits_t T3CONbits;
__at(0x0FB2) __sfr TMR3;
__at(0x0FB2) __sfr TMR3L;
__at(0x0FB3) __sfr TMR3H;
__at(0x0FB4) __sfr CMCON;
__at(0x0FB4) volatile __CMCONbits_t CMCONbits;
__at(0x0FB5) __sfr CVRCON;
__at(0x0FB5) volatile __CVRCONbits_t CVRCONbits;
__at(0x0FB6) __sfr CCP1AS;
__at(0x0FB6) volatile __CCP1ASbits_t CCP1ASbits;
__at(0x0FB6) __sfr ECCP1AS;
__at(0x0FB6) volatile __ECCP1ASbits_t ECCP1ASbits;
__at(0x0FB7) __sfr CCP1DEL;
__at(0x0FB7) volatile __CCP1DELbits_t CCP1DELbits;
__at(0x0FB7) __sfr ECCP1DEL;
__at(0x0FB7) volatile __ECCP1DELbits_t ECCP1DELbits;
__at(0x0FB8) __sfr BAUDCON;
__at(0x0FB8) volatile __BAUDCONbits_t BAUDCONbits;
__at(0x0FB8) __sfr BAUDCTL;
__at(0x0FB8) volatile __BAUDCTLbits_t BAUDCTLbits;
__at(0x0FBA) __sfr CCP2CON;
__at(0x0FBA) volatile __CCP2CONbits_t CCP2CONbits;
__at(0x0FBB) __sfr CCPR2;
__at(0x0FBB) __sfr CCPR2L;
__at(0x0FBC) __sfr CCPR2H;
__at(0x0FBD) __sfr CCP1CON;
__at(0x0FBD) volatile __CCP1CONbits_t CCP1CONbits;
__at(0x0FBD) __sfr ECCP1CON;
__at(0x0FBD) volatile __ECCP1CONbits_t ECCP1CONbits;
__at(0x0FBE) __sfr CCPR1;
__at(0x0FBE) __sfr CCPR1L;
__at(0x0FBF) __sfr CCPR1H;
__at(0x0FC0) __sfr ADCON2;
__at(0x0FC0) volatile __ADCON2bits_t ADCON2bits;
__at(0x0FC1) __sfr ADCON1;
__at(0x0FC1) volatile __ADCON1bits_t ADCON1bits;
__at(0x0FC2) __sfr ADCON0;
__at(0x0FC2) volatile __ADCON0bits_t ADCON0bits;
__at(0x0FC3) __sfr ADRES;
__at(0x0FC3) __sfr ADRESL;
__at(0x0FC4) __sfr ADRESH;
__at(0x0FC5) __sfr SSPCON2;
__at(0x0FC5) volatile __SSPCON2bits_t SSPCON2bits;
__at(0x0FC6) __sfr SSPCON1;
__at(0x0FC6) volatile __SSPCON1bits_t SSPCON1bits;
__at(0x0FC7) __sfr SSPSTAT;
__at(0x0FC7) volatile __SSPSTATbits_t SSPSTATbits;
__at(0x0FC8) __sfr SSPADD;
__at(0x0FC9) __sfr SSPBUF;
__at(0x0FCA) __sfr T2CON;
__at(0x0FCA) volatile __T2CONbits_t T2CONbits;
__at(0x0FCB) __sfr PR2;
__at(0x0FCC) __sfr TMR2;
__at(0x0FCD) __sfr T1CON;
__at(0x0FCD) volatile __T1CONbits_t T1CONbits;
__at(0x0FCE) __sfr TMR1;
__at(0x0FCE) __sfr TMR1L;
__at(0x0FCF) __sfr TMR1H;
__at(0x0FD0) __sfr RCON;
__at(0x0FD0) volatile __RCONbits_t RCONbits;
__at(0x0FD1) __sfr WDTCON;
__at(0x0FD1) volatile __WDTCONbits_t WDTCONbits;
__at(0x0FD2) __sfr HLVDCON;
__at(0x0FD2) volatile __HLVDCONbits_t HLVDCONbits;
__at(0x0FD2) __sfr LVDCON;
__at(0x0FD2) volatile __LVDCONbits_t LVDCONbits;
__at(0x0FD3) __sfr OSCCON;
__at(0x0FD3) volatile __OSCCONbits_t OSCCONbits;
__at(0x0FD5) __sfr T0CON;
__at(0x0FD5) volatile __T0CONbits_t T0CONbits;
__at(0x0FD6) __sfr TMR0;
__at(0x0FD6) __sfr TMR0L;
__at(0x0FD7) __sfr TMR0H;
__at(0x0FD8) __sfr STATUS;
__at(0x0FD8) volatile __STATUSbits_t STATUSbits;
__at(0x0FD9) __sfr FSR2L;
__at(0x0FDA) __sfr FSR2H;
__at(0x0FDB) __sfr PLUSW2;
__at(0x0FDC) __sfr PREINC2;
__at(0x0FDD) __sfr POSTDEC2;
__at(0x0FDE) __sfr POSTINC2;
__at(0x0FDF) __sfr INDF2;
__at(0x0FE0) __sfr BSR;
__at(0x0FE1) __sfr FSR1L;
__at(0x0FE2) __sfr FSR1H;
__at(0x0FE3) __sfr PLUSW1;
__at(0x0FE4) __sfr PREINC1;
__at(0x0FE5) __sfr POSTDEC1;
__at(0x0FE6) __sfr POSTINC1;
__at(0x0FE7) __sfr INDF1;
__at(0x0FE8) __sfr WREG;
__at(0x0FE9) __sfr FSR0L;
__at(0x0FEA) __sfr FSR0H;
__at(0x0FEB) __sfr PLUSW0;
__at(0x0FEC) __sfr PREINC0;
__at(0x0FED) __sfr POSTDEC0;
__at(0x0FEE) __sfr POSTINC0;
__at(0x0FEF) __sfr INDF0;
__at(0x0FF0) __sfr INTCON3;
__at(0x0FF0) volatile __INTCON3bits_t INTCON3bits;
__at(0x0FF1) __sfr INTCON2;
__at(0x0FF1) volatile __INTCON2bits_t INTCON2bits;
__at(0x0FF2) __sfr INTCON;
__at(0x0FF2) volatile __INTCONbits_t INTCONbits;
__at(0x0FF3) __sfr PROD;
__at(0x0FF3) __sfr PRODL;
__at(0x0FF4) __sfr PRODH;
__at(0x0FF5) __sfr TABLAT;
__at(0x0FF6) __sfr TBLPTR;
__at(0x0FF6) __sfr TBLPTRL;
__at(0x0FF7) __sfr TBLPTRH;
__at(0x0FF8) __sfr TBLPTRU;
__at(0x0FF9) __sfr PC;
__at(0x0FF9) __sfr PCL;
__at(0x0FFA) __sfr PCLATH;
__at(0x0FFB) __sfr PCLATU;
__at(0x0FFC) __sfr STKPTR;
__at(0x0FFC) volatile __STKPTRbits_t STKPTRbits;
__at(0x0FFD) __sfr TOS;
__at(0x0FFD) __sfr TOSL;
__at(0x0FFE) __sfr TOSH;
__at(0x0FFF) __sfr TOSU;
|
(function() {
'use strict';
angular
.module('antidote')
.factory('DrugsService', DrugsService);
/** @ngInject */
function DrugsService($resource) {
return $resource('/api/drugs/:id',
{
id: '@id'
},
{
update: {
method: 'PUT'
},
getReviews: {
method: 'GET',
url: '/api/drugs/:id/reviews'
},
postReview: {
method: 'POST',
url: '/api/drugs/:id/reviews'
},
query: {
method: 'GET'
},
queryAutocomplete: {
url: '/api/autocomplete/drugs',
method: 'GET',
isArray: true
},
getAlternatives: {
method: 'GET',
url: '/api/drugs/:id/alternatives'
},
voteOnReview: {
method: 'POST',
url: '/api/drug-reviews/:id/vote'
}
}
);
}
})();
|
from __future__ import unicode_literals
from django.db import models
from django.db import transaction
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin, BaseUserManager)
from django.conf import settings
from datetime import datetime as date_time, timedelta
import jwt
class UserManager(BaseUserManager):
"""
Django requires that custom users define their own Manager class. By
inheriting from `BaseUserManager`, we get a lot of the same code used by
Django to create a `User` for free.
All we have to do is override the `create_user` function which we will use
to create `User` objects.
"""
def create_user(self, username, email, password=None):
"""Create and return a `User` with an email, username and password."""
if username is None:
raise TypeError('Users must have a username.')
if email is None:
raise TypeError('Users must have an email address.')
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(password)
user.save()
return user
def create_superuser(self, username, email, password):
"""
Create and return a `User` with superuser powers.
Superuser powers means that this use is an admin that can do anything
they want.
"""
if password is None:
raise TypeError('Superusers must have a password.')
user = self.create_user(username, email, password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin):
"""
implements a fully featured User Model woth admin compliant permissions
"""
email = models.EmailField(unique=True, db_index=True, null=False)
username = models.CharField(max_length=30, blank=True, null=False, unique=True)
date_joined = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def save(self, *args, **kwargs):
super(User, self).save(*args, **kwargs)
return self
def __str__(self):
"""
Returns a string representation of this `User`.
This string is used when a `User` is printed in the console.
"""
return '%d : %s' % (self.id, self.username)
@property
def token(self):
"""
This method allows us to get the token by calling 'user.token'
"""
return self.generate_jwt_token()
def generate_jwt_token(self):
"""This method generates a JSON Web Token during user signup"""
user_details = {'email': self.email,
'username': self.username}
token = jwt.encode(
{
'user_data': user_details,
'exp': date_time.now() + timedelta(days=7)
}, settings.SECRET_KEY, algorithm='HS256'
)
return token.decode('utf-8')
|
"""empty message
Revision ID: ed87b85aedc7
Revises: None
Create Date: 2019-06-25 13:30:30.898811
"""
# revision identifiers, used by Alembic.
revision = 'ed87b85aedc7'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_type', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('key', sa.String(length=20), nullable=True),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('password', sa.String(length=120), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('username')
)
op.create_table('task',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('total', sa.Integer(), nullable=True),
sa.Column('path', sa.String(length=100), nullable=True),
sa.Column('mode', sa.String(length=100), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['owner_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('path')
)
op.create_table('job',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.Integer(), nullable=False),
sa.Column('status', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['task_id'], ['task.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('sentence_id', sa.Integer(), nullable=False),
sa.Column('changes', sa.String(length=1024), nullable=True),
sa.ForeignKeyConstraint(['job_id'], ['job.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('history')
op.drop_table('job')
op.drop_table('task')
op.drop_table('user')
# ### end Alembic commands ###
|
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Mar 29 2017 23:22:24).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <objc/NSObject.h>
@class MMTimer, NSDate, NSMutableData, NSString, NSURLConnection, ProtobufCGIWrap;
@protocol NotifyFromPrtlDelegate;
@interface UrlChannel : NSObject
{
unsigned int m_uiRetryCount;
unsigned int m_uiChannelStatus;
unsigned int m_uiRetStatusCode;
unsigned int m_uiRetContentLength;
unsigned long long m_ui64StartTime;
unsigned long long m_ui64ConnectStartTime;
unsigned long long m_ui64SendStartTime;
unsigned long long m_ui64ReceiveStartTime;
unsigned int m_uiMaxRetryCount;
ProtobufCGIWrap *m_wrapInfo;
id <NotifyFromPrtlDelegate> m_delNotifyFromPrtl;
NSURLConnection *m_ucChannel;
MMTimer *m_tmCheck;
NSDate *m_tLastRecvData;
NSMutableData *m_dtResponseData;
NSString *m_nsIP;
}
@property(copy, nonatomic) NSString *m_nsIP; // @synthesize m_nsIP;
@property(retain, nonatomic) NSMutableData *m_dtResponseData; // @synthesize m_dtResponseData;
@property(nonatomic) unsigned int m_uiMaxRetryCount; // @synthesize m_uiMaxRetryCount;
@property(retain, nonatomic) NSDate *m_tLastRecvData; // @synthesize m_tLastRecvData;
@property(retain, nonatomic) MMTimer *m_tmCheck; // @synthesize m_tmCheck;
@property(retain, nonatomic) NSURLConnection *m_ucChannel; // @synthesize m_ucChannel;
@property(nonatomic) __weak id <NotifyFromPrtlDelegate> m_delNotifyFromPrtl; // @synthesize m_delNotifyFromPrtl;
@property(retain, nonatomic) ProtobufCGIWrap *m_wrapInfo; // @synthesize m_wrapInfo;
- (void).cxx_destruct;
- (void)onReceiveDataLength:(id)arg1;
- (void)connectionDidFinishLoading:(id)arg1;
- (void)connection:(id)arg1 didFailWithError:(id)arg2;
- (void)connection:(id)arg1 didReceiveData:(id)arg2;
- (void)connection:(id)arg1 didReceiveResponse:(id)arg2;
- (void)connection:(id)arg1 didSendBodyData:(long long)arg2 totalBytesWritten:(long long)arg3 totalBytesExpectedToWrite:(long long)arg4;
- (void)connection:(id)arg1 didReceiveAuthenticationChallenge:(id)arg2;
- (_Bool)connection:(id)arg1 canAuthenticateAgainstProtectionSpace:(id)arg2;
- (id)connection:(id)arg1 willSendRequest:(id)arg2 redirectResponse:(id)arg3;
- (void)CheckTimeOut;
- (void)Connect;
- (void)ResetChannel;
- (void)TryReportFailIP;
- (void)Stop;
- (_Bool)Start;
- (void)InitPrtl:(id)arg1;
- (void)dealloc;
- (id)init;
@end
|
import React from 'react';
import { makeStyles } from '@material-ui/core/styles';
import RestoreIcon from '@material-ui/icons/Restore';
import Paper from '@material-ui/core/Paper';
import Table from '@material-ui/core/Table';
import TableBody from '@material-ui/core/TableBody';
import TableCell from '@material-ui/core/TableCell';
import TableContainer from '@material-ui/core/TableContainer';
import TableHead from '@material-ui/core/TableHead';
import TableRow from '@material-ui/core/TableRow';
import IconButton from '@material-ui/core/IconButton';
import { formatDate, getLabel } from '../utils';
const useStyles = makeStyles((theme) => ({
table: {
minWidth: 650,
},
tableHeaderCell: {
backgroundColor: theme.palette.grey[400],
color: theme.palette.common.black,
},
tableRow: {
'&:nth-of-type(odd)': {
backgroundColor: theme.palette.action.hover,
},
cursor: 'pointer',
},
}));
export default function ConfigsTable(props) {
const classes = useStyles();
const { configs, onRestoreClick } = props;
return (
<TableContainer component={Paper}>
<Table className={classes.table} size="small">
<TableHead>
<TableRow>
<TableCell className={classes.tableHeaderCell}>Region</TableCell>
<TableCell className={classes.tableHeaderCell}>Instance Type</TableCell>
<TableCell className={classes.tableHeaderCell}>Operating System</TableCell>
<TableCell className={classes.tableHeaderCell}>Expires at</TableCell>
<TableCell className={classes.tableHeaderCell} />
</TableRow>
</TableHead>
<TableBody>
{configs.map((config, index) => (
<TableRow
key={index}
className={classes.tableRow}
onClick={(e) => onRestoreClick(e, config)}
>
<TableCell className={classes.tableCell}>
{ getLabel('regions', config.region) }
</TableCell>
<TableCell className={classes.tableCell}>
{ getLabel('instanceTypes', config.instanceType) }
</TableCell>
<TableCell className={classes.tableCell}>{config.operatingSystem}</TableCell>
<TableCell className={classes.tableCell}>{formatDate(config.expiry)}</TableCell>
<TableCell className={classes.tableCell} align="center">
<IconButton
variant="contained"
color="primary"
className={classes.button}
onClick={(e) => onRestoreClick(e, config)}
>
<RestoreIcon fontSize="inherit" />
</IconButton>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</TableContainer>
);
}
|
import Route from '@ember/routing/route';
import $ from 'jquery';
import RSVP from 'rsvp';
export default Route.extend({
model() {
let { symbol } = this.paramsFor('dashboard.research');
const newsStoriesURL = '/api/getStories?symbol=' + symbol;
const basicInfoURL = '/api/basicInfo?symbol=' + symbol;
const newsStoriesAPI = $.ajax({
url: newsStoriesURL,
types: 'GET',
dataType: 'jsonp'
});
const basicInfoAPI = $.ajax({
url: basicInfoURL,
types: 'GET',
dataType: 'jsonp',
});
return RSVP.hash({
newsStories: newsStoriesAPI,
basicInfo: basicInfoAPI
})
}
});
|
from django.shortcuts import render
# Create your views here.
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.user.models import WxUserProfile, UserProfile
from apps.user.serializers import WxLoginSerializer
from apps.utils.wxChecker import checkdata
class WxLoginView(APIView):
def post(self, request):
"""
用户扫码登陆
:param request:
:return:
"""
wx_data = WxLoginSerializer(data=request.data)
if wx_data.is_valid():
code = wx_data.validated_data.get('code')
encrypteddata = wx_data.validated_data.get('encrypteddata')
iv = wx_data.validated_data.get('iv')
# 检查用户
res = checkdata(code, encrypteddata, iv)
errorinfo = res.get('error', None)
if errorinfo:
print(errorinfo, "错误的信息!")
return Response(status=status.HTTP_400_BAD_REQUEST, data=errorinfo)
openid = res['openId']
# 创建新用户
s = WxUserProfile.objects.filter(openid=openid).first()
if s:
# res["role"] = s.get_role()
# if res["role"] == 'null':
# res["uid"] = 'null'
# else:
# res["uid"] = s.student.id
s.cookie = res["cookie"]
s.save()
else:
# TODO (yxc): 如何做好事物
# first create a commonUser
new_common_user = UserProfile.objects.create(
username=res["nickName"],
)
# then create a wxUser
new_user = WxUserProfile.objects.create(
openid=openid,
cookie=res['cookie'],
nickname=res['nickName'],
city=res['city'],
province=res['province'],
gender=res['gender'],
country=res['country'],
avatar_url=res['avatarUrl'],
uesr=new_common_user
)
# then create a System User
new_user.set_password(raw_password=openid)
new_user.save()
res["role"] = "null"
res["uid"] = "null"
return Response(status=status.HTTP_201_CREATED, data=res)
else:
return Response(status=status.HTTP_400_BAD_REQUEST, data=wx_data.error_messages)
|
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import functools
class FunctionStaticVariable(object):
def __init__(self, *args, **kwargs):
self.variable = args[0]
def __call__(self, func):
return functools.partial(FunctionStaticVariable.retrieve_attr, func, self.variable)
@staticmethod
def retrieve_attr(func, var_name, *args, **kwargs):
if not hasattr(func, var_name):
result = func(*args, **kwargs)
setattr(func, var_name, result)
return getattr(func, var_name)
with_variable = functools.partial(FunctionStaticVariable)
@with_variable('logger')
def k2o_logger(): # type: () -> logging.Logger
logger = logging.getLogger('keras2onnx')
if not logger.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
logger.setLevel(logging.WARNING)
return logger
def set_logger_level(lvl):
logger = k2o_logger()
if logger.level != lvl:
logger.setLevel(lvl)
for h_ in logger.handlers:
h_.setLevel(lvl)
@with_variable('batch_size')
def get_default_batch_size():
return 'N'
def count_dynamic_dim(shape):
num = 0
for s_ in shape:
if isinstance(s_, int) and s_ >= 0:
num += 1
return len(shape) - num
def get_producer():
"""
Internal helper function to return the producer
"""
from .. import __producer__
return __producer__
def get_producer_version():
"""
Internal helper function to return the producer version
"""
from .. import __producer_version__
return __producer_version__
def get_domain():
"""
Internal helper function to return the model domain
"""
from .. import __domain__
return __domain__
def get_model_version():
"""
Internal helper function to return the model version
"""
from .. import __model_version__
return __model_version__
|
#ifndef __MODULEINPUT_H__
#define __MODULEINPUT_H__
#include "Module.h"
#include "Globals.h"
#define MAX_MOUSE_BUTTONS 5
enum KEY_STATE
{
KEY_IDLE = 0,
KEY_DOWN,
KEY_REPEAT,
KEY_UP
};
class ModuleInput : public Module
{
public:
ModuleInput(Application* app, bool start_enabled = true);
~ModuleInput();
bool Init();
update_status PreUpdate(float dt);
bool CleanUp();
KEY_STATE GetKey(int id) const
{
return keyboard[id];
}
KEY_STATE GetMouseButton(int id) const
{
return mouse_buttons[id];
}
int GetMouseX() const
{
return mouse_x;
}
int GetMouseY() const
{
return mouse_y;
}
int GetMouseZ() const
{
return mouse_z;
}
int GetMouseXMotion() const
{
return mouse_x_motion;
}
int GetMouseYMotion() const
{
return mouse_y_motion;
}
private:
KEY_STATE* keyboard;
KEY_STATE mouse_buttons[MAX_MOUSE_BUTTONS];
int mouse_x;
int mouse_y;
int mouse_z;
int mouse_x_motion;
int mouse_y_motion;
};
#endif
|
import styled from 'styled-components';
export const Container = styled.div`
display: flex;
flex-direction: row;
background: #fff;
width: 28%;
position: absolute;
left: 2%;
top: 3%;
height: 94%;
box-shadow: 0px 0px 10px 2px rgba(0, 0, 0, 0.25);
border-radius: 5px;
overflow: auto;
ul {
margin: 15px;
list-style: none;
width: 100%;
li {
display: flex;
align-items: center;
justify-content: space-between;
padding: 10px 0 10px 0;
border-bottom: 0.5px solid #999;
div {
display: flex;
img {
border: '5px solid transparent';
border-color: #9b65e6;
border-radius: 50px;
width: 48px;
height: 48px;
}
div {
display: flex;
flex-direction: column;
align-items: flex-start;
margin-left: 10px;
strong {
font-weight: bold;
}
span {
font-weight: normal;
font-size: 12px;
color: #999;
}
}
}
div {
display: flex;
align-items: center;
i {
cursor: pointer;
}
i.remove {
color: #f00;
margin-right: 10px;
}
i.show {
color: #999;
}
}
}
}
`;
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Hepmc(CMakePackage):
"""The HepMC package is an object oriented, C++ event record for
High Energy Physics Monte Carlo generators and simulation."""
homepage = "https://hepmc.web.cern.ch/hepmc/"
url = "https://hepmc.web.cern.ch/hepmc/releases/hepmc2.06.11.tgz"
tags = ['hep']
version('2.06.11', sha256='86b66ea0278f803cde5774de8bd187dd42c870367f1cbf6cdaec8dc7cf6afc10')
version('2.06.10', sha256='5adedd9e3f7447e1e5fc01b72f745ab87da2c1611df89208bb3d7c6ea94c11a4')
version('2.06.09', sha256='e0f8fddd38472c5615210894444686ac5d72df3be682f7d151b562b236d9b422')
version('2.06.08', sha256='8be6c1793e0a045f07ddb88bb64b46de7e66a52e75fb72b3f82f9a3e3ba8a8ce')
version('2.06.07', sha256='a0bdd6f36a3cc4cb59d6eb15cef9d46ce9b3739cae3324e81ebb2df6943e4594')
version('2.06.06', sha256='8cdff26c10783ed4248220a84a43b7e1f9b59cc2c9a29bd634d024ca469db125')
version('2.06.05', sha256='4c411077cc97522c03b74f973264b8d9fd2b6ccec0efc7ceced2645371c73618')
variant('length', default='MM', values=('CM', 'MM'), multi=False,
description='Unit of length')
variant('momentum', default='GEV', values=('GEV', 'MEV'), multi=False,
description='Unit of momentum')
depends_on('cmake@2.8.9:', type='build')
def cmake_args(self):
return [
self.define_from_variant('momentum'),
self.define_from_variant('length')
]
def url_for_version(self, version):
if version <= Version("2.06.08"):
url = "http://lcgapp.cern.ch/project/simu/HepMC/download/HepMC-{0}.tar.gz"
else:
url = "https://hepmc.web.cern.ch/hepmc/releases/hepmc{0}.tgz"
return url.format(version)
|
/**
* Copyright 2014 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Receiver / Player sample
* <p>
* This sample demonstrates how to build your own Receiver for use with Google
* Cast. One of the goals of this sample is to be fully UX compliant.
* </p>
* <p>
* A receiver is typically an HTML5 application with a html, css, and JavaScript
* components. It demonstrates the following Cast Receiver API's:
* </p>
* <ul>
* <li>CastReceiverManager</li>
* <li>MediaManager</li>
* <li>Media Player Library</li>
* </ul>
* <p>
* It also demonstrates the following player functions:
* </p>
* <ul>
* <li>Branding Screen</li>
* <li>Playback Complete image</li>
* <li>Limited Animation</li>
* <li>Buffering Indicator</li>
* <li>Seeking</li>
* <li>Pause indicator</li>
* <li>Loading Indicator</li>
* </ul>
*
*/
'use strict';
/**
* Creates the namespace
*/
var sampleplayer = sampleplayer || {};
/**
* <p>
* Cast player constructor - This does the following:
* </p>
* <ol>
* <li>Bind a listener to visibilitychange</li>
* <li>Set the default state</li>
* <li>Bind event listeners for img & video tags<br />
* error, stalled, waiting, playing, pause, ended, timeupdate, seeking, &
* seeked</li>
* <li>Find and remember the various elements</li>
* <li>Create the MediaManager and bind to onLoad & onStop</li>
* </ol>
*
* @param {!Element} element the element to attach the player
* @struct
* @constructor
* @export
*/
sampleplayer.CastPlayer = function(element) {
/**
* The debug setting to control receiver, MPL and player logging.
* @private {boolean}
*/
this.debug_ = sampleplayer.DISABLE_DEBUG_;
if (this.debug_) {
cast.player.api.setLoggerLevel(cast.player.api.LoggerLevel.DEBUG);
cast.receiver.logger.setLevelValue(cast.receiver.LoggerLevel.DEBUG);
}
/**
* The DOM element the player is attached.
* @private {!Element}
*/
this.element_ = element;
/**
* The current type of the player.
* @private {sampleplayer.Type}
*/
this.type_;
/**
* Whether player is showing live content.
* @private {boolean}
*/
this.isLiveStream_ = false;
this.setType_(sampleplayer.Type.UNKNOWN, false);
/**
* The current state of the player.
* @private {sampleplayer.State}
*/
this.state_;
/**
* Timestamp when state transition happened last time.
* @private {number}
*/
this.lastStateTransitionTime_ = 0;
this.setState_(sampleplayer.State.LAUNCHING, false);
/**
* The id returned by setInterval for the screen burn timer
* @private {number|undefined}
*/
this.burnInPreventionIntervalId_;
/**
* The id returned by setTimeout for the idle timer
* @private {number|undefined}
*/
this.idleTimerId_;
/**
* The id of timer to handle seeking UI.
* @private {number|undefined}
*/
this.seekingTimerId_;
/**
* The id of timer to defer setting state.
* @private {number|undefined}
*/
this.setStateDelayTimerId_;
/**
* Current application state.
* @private {string|undefined}
*/
this.currentApplicationState_;
/**
* The DOM element for the inner portion of the progress bar.
* @private {!Element}
*/
this.progressBarInnerElement_ = this.getElementByClass_(
'.controls-progress-inner');
/**
* The DOM element for the thumb portion of the progress bar.
* @private {!Element}
*/
this.progressBarThumbElement_ = this.getElementByClass_(
'.controls-progress-thumb');
/**
* The DOM element for the current time label.
* @private {!Element}
*/
this.curTimeElement_ = this.getElementByClass_('.controls-cur-time');
/**
* The DOM element for the total time label.
* @private {!Element}
*/
this.totalTimeElement_ = this.getElementByClass_('.controls-total-time');
/**
* The DOM element for the preview time label.
* @private {!Element}
*/
this.previewModeTimerElement_ = this.getElementByClass_('.preview-mode-timer-countdown');
/**
* Handler for buffering-related events for MediaElement.
* @private {function()}
*/
this.bufferingHandler_ = this.onBuffering_.bind(this);
/**
* Media player to play given manifest.
* @private {cast.player.api.Player}
*/
this.player_ = null;
/**
* Media player used to preload content.
* @private {cast.player.api.Player}
*/
this.preloadPlayer_ = null;
/**
* Text Tracks currently supported.
* @private {?sampleplayer.TextTrackType}
*/
this.textTrackType_ = null;
/**
* Whether player app should handle autoplay behavior.
* @private {boolean}
*/
this.playerAutoPlay_ = false;
/**
* Whether player app should display the preview mode UI.
* @private {boolean}
*/
this.displayPreviewMode_ = false;
/**
* Id of deferred play callback
* @private {?number}
*/
this.deferredPlayCallbackId_ = null;
/**
* Whether the player is ready to receive messages after a LOAD request.
* @private {boolean}
*/
this.playerReady_ = false;
/**
* Whether the player has received the metadata loaded event after a LOAD
* request.
* @private {boolean}
*/
this.metadataLoaded_ = false;
/**
* The media element.
* @private {HTMLMediaElement}
*/
this.mediaElement_ = /** @type {HTMLMediaElement} */
(this.element_.querySelector('video'));
this.mediaElement_.addEventListener('error', this.onError_.bind(this), false);
this.mediaElement_.addEventListener('playing', this.onPlaying_.bind(this),
false);
this.mediaElement_.addEventListener('pause', this.onPause_.bind(this), false);
this.mediaElement_.addEventListener('ended', this.onEnded_.bind(this), false);
this.mediaElement_.addEventListener('abort', this.onAbort_.bind(this), false);
this.mediaElement_.addEventListener('timeupdate', this.onProgress_.bind(this),
false);
this.mediaElement_.addEventListener('seeking', this.onSeekStart_.bind(this),
false);
this.mediaElement_.addEventListener('seeked', this.onSeekEnd_.bind(this),
false);
/**
* The cast receiver manager.
* @private {!cast.receiver.CastReceiverManager}
*/
this.receiverManager_ = cast.receiver.CastReceiverManager.getInstance();
this.receiverManager_.onReady = this.onReady_.bind(this);
this.receiverManager_.onSenderDisconnected =
this.onSenderDisconnected_.bind(this);
this.receiverManager_.onVisibilityChanged =
this.onVisibilityChanged_.bind(this);
this.receiverManager_.setApplicationState(
sampleplayer.getApplicationState_());
/**
* The remote media object.
* @private {cast.receiver.MediaManager}
*/
this.mediaManager_ = new cast.receiver.MediaManager(this.mediaElement_);
/**
* The original load callback.
* @private {?function(cast.receiver.MediaManager.Event)}
*/
this.onLoadOrig_ =
this.mediaManager_.onLoad.bind(this.mediaManager_);
this.mediaManager_.onLoad = this.onLoad_.bind(this);
/**
* The original editTracksInfo callback
* @private {?function(!cast.receiver.MediaManager.Event)}
*/
this.onEditTracksInfoOrig_ =
this.mediaManager_.onEditTracksInfo.bind(this.mediaManager_);
this.mediaManager_.onEditTracksInfo = this.onEditTracksInfo_.bind(this);
/**
* The original metadataLoaded callback
* @private {?function(!cast.receiver.MediaManager.LoadInfo)}
*/
this.onMetadataLoadedOrig_ =
this.mediaManager_.onMetadataLoaded.bind(this.mediaManager_);
this.mediaManager_.onMetadataLoaded = this.onMetadataLoaded_.bind(this);
/**
* The original stop callback.
* @private {?function(cast.receiver.MediaManager.Event)}
*/
this.onStopOrig_ =
this.mediaManager_.onStop.bind(this.mediaManager_);
this.mediaManager_.onStop = this.onStop_.bind(this);
/**
* The original metadata error callback.
* @private {?function(!cast.receiver.MediaManager.LoadInfo)}
*/
this.onLoadMetadataErrorOrig_ =
this.mediaManager_.onLoadMetadataError.bind(this.mediaManager_);
this.mediaManager_.onLoadMetadataError = this.onLoadMetadataError_.bind(this);
/**
* The original error callback
* @private {?function(!Object)}
*/
this.onErrorOrig_ =
this.mediaManager_.onError.bind(this.mediaManager_);
this.mediaManager_.onError = this.onError_.bind(this);
this.mediaManager_.customizedStatusCallback =
this.customizedStatusCallback_.bind(this);
this.mediaManager_.onPreload = this.onPreload_.bind(this);
this.mediaManager_.onCancelPreload = this.onCancelPreload_.bind(this);
};
/**
* The amount of time in a given state before the player goes idle.
*/
sampleplayer.IDLE_TIMEOUT = {
LAUNCHING: 1000 * 60 * 5, // 5 minutes
LOADING: 1000 * 60 * 5, // 5 minutes
PAUSED: 1000 * 60 * 20, // 20 minutes
DONE: 1000 * 60 * 5, // 5 minutes
IDLE: 1000 * 60 * 5 // 5 minutes
};
/**
* Describes the type of media being played.
*
* @enum {string}
*/
sampleplayer.Type = {
AUDIO: 'audio',
VIDEO: 'video',
UNKNOWN: 'unknown'
};
/**
* Describes the type of captions being used.
*
* @enum {string}
*/
sampleplayer.TextTrackType = {
SIDE_LOADED_TTML: 'ttml',
SIDE_LOADED_VTT: 'vtt',
SIDE_LOADED_UNSUPPORTED: 'unsupported',
EMBEDDED: 'embedded'
};
/**
* Describes the type of captions being used.
*
* @enum {string}
*/
sampleplayer.CaptionsMimeType = {
TTML: 'application/ttml+xml',
VTT: 'text/vtt'
};
/**
* Describes the type of track.
*
* @enum {string}
*/
sampleplayer.TrackType = {
AUDIO: 'audio',
VIDEO: 'video',
TEXT: 'text'
};
/**
* Describes the state of the player.
*
* @enum {string}
*/
sampleplayer.State = {
LAUNCHING: 'launching',
LOADING: 'loading',
BUFFERING: 'buffering',
PLAYING: 'playing',
PAUSED: 'paused',
DONE: 'done',
IDLE: 'idle'
};
/**
* CORS Proxy URL to be used in streaming media with no CORS headers
*
* @type {string}
*/
sampleplayer.CORS_PROXY_URL = 'http://hoydaa-tv.appspot.com/proxy/';
/**
* The amount of time (in ms) a screen should stay idle before burn in
* prevention kicks in
*
* @type {number}
*/
sampleplayer.BURN_IN_TIMEOUT = 30 * 1000;
/**
* The minimum duration (in ms) that media info is displayed.
*
* @const @private {number}
*/
sampleplayer.MEDIA_INFO_DURATION_ = 3 * 1000;
/**
* Transition animation duration (in sec).
*
* @const @private {number}
*/
sampleplayer.TRANSITION_DURATION_ = 1.5;
/**
* Const to enable debugging.
*
* @const @private {boolean}
*/
sampleplayer.ENABLE_DEBUG_ = true;
/**
* Const to disable debugging.
*
* #@const @private {boolean}
*/
sampleplayer.DISABLE_DEBUG_ = false;
/**
* Returns the element with the given class name
*
* @param {string} className The class name of the element to return.
* @return {!Element}
* @throws {Error} If given class cannot be found.
* @private
*/
sampleplayer.CastPlayer.prototype.getElementByClass_ = function(className) {
var element = this.element_.querySelector(className);
if (element) {
return element;
} else {
throw Error('Cannot find element with class: ' + className);
}
};
/**
* Returns this player's media element.
*
* @return {HTMLMediaElement} The media element.
* @export
*/
sampleplayer.CastPlayer.prototype.getMediaElement = function() {
return this.mediaElement_;
};
/**
* Returns this player's media manager.
*
* @return {cast.receiver.MediaManager} The media manager.
* @export
*/
sampleplayer.CastPlayer.prototype.getMediaManager = function() {
return this.mediaManager_;
};
/**
* Returns this player's MPL player.
*
* @return {cast.player.api.Player} The current MPL player.
* @export
*/
sampleplayer.CastPlayer.prototype.getPlayer = function() {
return this.player_;
};
/**
* Starts the player.
*
* @export
*/
sampleplayer.CastPlayer.prototype.start = function() {
this.receiverManager_.start();
};
/**
* Preloads the given data.
*
* @param {!cast.receiver.media.MediaInformation} mediaInformation The
* asset media information.
* @return {boolean} Whether the media can be preloaded.
* @export
*/
sampleplayer.CastPlayer.prototype.preload = function(mediaInformation) {
this.log_('preload');
// For video formats that cannot be preloaded (mp4...), display preview UI.
if (sampleplayer.canDisplayPreview_(mediaInformation || {})) {
this.showPreviewMode_(mediaInformation);
return true;
}
if (!sampleplayer.supportsPreload_(mediaInformation || {})) {
this.log_('preload: no supportsPreload_');
return false;
}
if (this.preloadPlayer_) {
this.preloadPlayer_.unload();
this.preloadPlayer_ = null;
}
// Only videos are supported for now
var couldPreload = this.preloadVideo_(mediaInformation);
if (couldPreload) {
this.showPreviewMode_(mediaInformation);
}
this.log_('preload: couldPreload=' + couldPreload);
return couldPreload;
};
/**
* Display preview mode metadata.
*
* @param {boolean} show whether player is showing preview mode metadata
* @export
*/
sampleplayer.CastPlayer.prototype.showPreviewModeMetadata = function(show) {
this.element_.setAttribute('preview-mode', show.toString());
};
/**
* Show the preview mode UI.
*
* @param {!cast.receiver.media.MediaInformation} mediaInformation The
* asset media information.
* @private
*/
sampleplayer.CastPlayer.prototype.showPreviewMode_ = function(mediaInformation) {
this.displayPreviewMode_ = true;
this.loadPreviewModeMetadata_(mediaInformation);
this.showPreviewModeMetadata(true);
};
/**
* Hide the preview mode UI.
*
* @private
*/
sampleplayer.CastPlayer.prototype.hidePreviewMode_ = function() {
this.showPreviewModeMetadata(false);
this.displayPreviewMode_ = false;
};
/**
* Preloads some video content.
*
* @param {!cast.receiver.media.MediaInformation} mediaInformation The
* asset media information.
* @return {boolean} Whether the video can be preloaded.
* @private
*/
sampleplayer.CastPlayer.prototype.preloadVideo_ = function(mediaInformation) {
this.log_('preloadVideo_');
var self = this;
var url = mediaInformation.contentId;
var protocolFunc = sampleplayer.getProtocolFunction_(mediaInformation);
if (!protocolFunc) {
this.log_('No protocol found for preload');
return false;
}
var customData = mediaInformation.customData || {};
var corsProxy = customData.corsProxy || false;
if (corsProxy) {
url = sampleplayer.getProxiedUrl_(url);
}
var host = new cast.player.api.Host({
'url': url,
'mediaElement': self.mediaElement_
});
host.onError = function() {
self.preloadPlayer_.unload();
self.preloadPlayer_ = null;
self.showPreviewModeMetadata(false);
self.displayPreviewMode_ = false;
self.log_('Error during preload');
};
self.preloadPlayer_ = new cast.player.api.Player(host);
self.preloadPlayer_.preload(protocolFunc(host));
return true;
};
/**
* Loads the given data.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @export
*/
sampleplayer.CastPlayer.prototype.load = function(info) {
this.log_('onLoad_');
clearTimeout(this.idleTimerId_);
var self = this;
var media = info.message.media || {};
var contentType = media.contentType;
var playerType = sampleplayer.getType_(media);
var isLiveStream = media.streamType === cast.receiver.media.StreamType.LIVE;
if (!media.contentId) {
this.log_('Load failed: no content');
self.onLoadMetadataError_(info);
} else if (playerType === sampleplayer.Type.UNKNOWN) {
this.log_('Load failed: unknown content type: ' + contentType);
self.onLoadMetadataError_(info);
} else {
this.log_('Loading: ' + playerType);
self.resetMediaElement_();
self.setType_(playerType, isLiveStream);
var preloaded = false;
switch (playerType) {
case sampleplayer.Type.AUDIO:
self.loadAudio_(info);
break;
case sampleplayer.Type.VIDEO:
preloaded = self.loadVideo_(info);
break;
}
self.playerReady_ = false;
self.metadataLoaded_ = false;
self.loadMetadata_(media);
self.showPreviewModeMetadata(false);
self.displayPreviewMode_ = false;
sampleplayer.preload_(media, function() {
self.log_('preloaded=' + preloaded);
if (preloaded) {
// Data is ready to play so transiton directly to playing.
self.setState_(sampleplayer.State.PLAYING, false);
self.playerReady_ = true;
self.maybeSendLoadCompleted_(info);
// Don't display metadata again, since autoplay already did that.
self.deferPlay_(0);
self.playerAutoPlay_ = false;
} else {
sampleplayer.transition_(self.element_, sampleplayer.TRANSITION_DURATION_, function() {
self.setState_(sampleplayer.State.LOADING, false);
// Only send load completed after we reach this point so the media
// manager state is still loading and the sender can't send any PLAY
// messages
self.playerReady_ = true;
self.maybeSendLoadCompleted_(info);
if (self.playerAutoPlay_) {
// Make sure media info is displayed long enough before playback
// starts.
self.deferPlay_(sampleplayer.MEDIA_INFO_DURATION_);
self.playerAutoPlay_ = false;
}
});
}
});
}
};
/**
* Sends the load complete message to the sender if the two necessary conditions
* are met, the player is ready for messages and the loaded metadata event has
* been received.
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @private
*/
sampleplayer.CastPlayer.prototype.maybeSendLoadCompleted_ = function(info) {
if (!this.playerReady_) {
this.log_('Deferring load response, player not ready');
} else if (!this.metadataLoaded_) {
this.log_('Deferring load response, loadedmetadata event not received');
} else {
this.onMetadataLoadedOrig_(info);
this.log_('Sent load response, player is ready and metadata loaded');
}
};
/**
* Resets the media element.
*
* @private
*/
sampleplayer.CastPlayer.prototype.resetMediaElement_ = function() {
this.log_('resetMediaElement_');
if (this.player_) {
this.player_.unload();
this.player_ = null;
}
this.textTrackType_ = null;
};
/**
* Loads the metadata for the given media.
*
* @param {!cast.receiver.media.MediaInformation} media The media.
* @private
*/
sampleplayer.CastPlayer.prototype.loadMetadata_ = function(media) {
this.log_('loadMetadata_');
if (!sampleplayer.isCastForAudioDevice_()) {
var metadata = media.metadata || {};
var titleElement = this.element_.querySelector('.media-title');
sampleplayer.setInnerText_(titleElement, metadata.title);
var subtitleElement = this.element_.querySelector('.media-subtitle');
sampleplayer.setInnerText_(subtitleElement, metadata.subtitle);
var artwork = sampleplayer.getMediaImageUrl_(media);
if (artwork) {
var artworkElement = this.element_.querySelector('.media-artwork');
sampleplayer.setBackgroundImage_(artworkElement, artwork);
}
}
};
/**
* Loads the metadata for the given preview mode media.
*
* @param {!cast.receiver.media.MediaInformation} media The media.
* @private
*/
sampleplayer.CastPlayer.prototype.loadPreviewModeMetadata_ = function(media) {
this.log_('loadPreviewModeMetadata_');
if (!sampleplayer.isCastForAudioDevice_()) {
var metadata = media.metadata || {};
var titleElement = this.element_.querySelector('.preview-mode-title');
sampleplayer.setInnerText_(titleElement, metadata.title);
var subtitleElement = this.element_.querySelector('.preview-mode-subtitle');
sampleplayer.setInnerText_(subtitleElement, metadata.subtitle);
var artwork = sampleplayer.getMediaImageUrl_(media);
if (artwork) {
var artworkElement = this.element_.querySelector('.preview-mode-artwork');
sampleplayer.setBackgroundImage_(artworkElement, artwork);
}
}
};
/**
* Lets player handle autoplay, instead of depending on underlying
* MediaElement to handle it. By this way, we can make sure that media playback
* starts after loading screen is displayed.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @private
*/
sampleplayer.CastPlayer.prototype.letPlayerHandleAutoPlay_ = function(info) {
this.log_('letPlayerHandleAutoPlay_: ' + info.message.autoplay);
var autoplay = info.message.autoplay;
info.message.autoplay = false;
this.mediaElement_.autoplay = false;
this.playerAutoPlay_ = autoplay == undefined ? true : autoplay;
};
/**
* Loads some audio content.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @private
*/
sampleplayer.CastPlayer.prototype.loadAudio_ = function(info) {
this.log_('loadAudio_');
this.letPlayerHandleAutoPlay_(info);
this.loadDefault_(info);
};
/**
* Loads some video content.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @return {boolean} Whether the media was preloaded
* @private
*/
sampleplayer.CastPlayer.prototype.loadVideo_ = function(info) {
this.log_('loadVideo_');
var self = this;
var protocolFunc = null;
var url = info.message.media.contentId;
var protocolFunc = sampleplayer.getProtocolFunction_(info.message.media);
var wasPreloaded = false;
var customData = info.message.media.customData || {};
var corsProxy = customData.corsProxy || false;
if (corsProxy) {
url = sampleplayer.getProxiedUrl_(url);
}
this.letPlayerHandleAutoPlay_(info);
if (!protocolFunc) {
this.log_('loadVideo_: using MediaElement');
this.mediaElement_.addEventListener('stalled', this.bufferingHandler_,
false);
this.mediaElement_.addEventListener('waiting', this.bufferingHandler_,
false);
} else {
this.log_('loadVideo_: using Media Player Library');
// When MPL is used, buffering status should be detected by
// getState()['underflow]'
this.mediaElement_.removeEventListener('stalled', this.bufferingHandler_);
this.mediaElement_.removeEventListener('waiting', this.bufferingHandler_);
// If we have not preloaded or the content preloaded does not match the
// content that needs to be loaded, perform a full load
var loadErrorCallback = function() {
// unload player and trigger error event on media element
if (self.player_) {
self.resetMediaElement_();
self.mediaElement_.dispatchEvent(new Event('error'));
}
};
if (!this.preloadPlayer_ || (this.preloadPlayer_.getHost &&
this.preloadPlayer_.getHost().url != url)) {
if (this.preloadPlayer_) {
this.preloadPlayer_.unload();
this.preloadPlayer_ = null;
}
this.log_('Regular video load');
var host = new cast.player.api.Host({
'url': url,
'mediaElement': this.mediaElement_
});
host.onError = loadErrorCallback;
this.player_ = new cast.player.api.Player(host);
this.player_.load(protocolFunc(host));
} else {
this.log_('Preloaded video load');
this.player_ = this.preloadPlayer_;
this.preloadPlayer_ = null;
// Replace the "preload" error callback with the "load" error callback
this.player_.getHost().onError = loadErrorCallback;
this.player_.load();
wasPreloaded = true;
}
}
this.loadMediaManagerInfo_(info, !!protocolFunc);
return wasPreloaded;
};
/**
* Loads media and tracks info into media manager.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @param {boolean} loadOnlyTracksMetadata Only load the tracks metadata (if
* it is in the info provided).
* @private
*/
sampleplayer.CastPlayer.prototype.loadMediaManagerInfo_ =
function(info, loadOnlyTracksMetadata) {
if (loadOnlyTracksMetadata) {
// In the case of media that uses MPL we do not
// use the media manager default onLoad API but we still need to load
// the tracks metadata information into media manager (so tracks can be
// managed and properly reported in the status messages) if they are
// provided in the info object (side loaded).
this.maybeLoadSideLoadedTracksMetadata_(info);
} else {
// Media supported by mediamanager, use the media manager default onLoad API
// to load the media, tracks metadata and, if the tracks are vtt the media
// manager will process the cues too.
this.loadDefault_(info);
}
};
/**
* Sets the captions type based on the text tracks.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @private
*/
sampleplayer.CastPlayer.prototype.readSideLoadedTextTrackType_ =
function(info) {
if (!info.message || !info.message.media || !info.message.media.tracks) {
return;
}
for (var i = 0; i < info.message.media.tracks.length; i++) {
var oldTextTrackType = this.textTrackType_;
if (info.message.media.tracks[i].type !=
cast.receiver.media.TrackType.TEXT) {
continue;
}
if (this.isTtmlTrack_(info.message.media.tracks[i])) {
this.textTrackType_ =
sampleplayer.TextTrackType.SIDE_LOADED_TTML;
} else if (this.isVttTrack_(info.message.media.tracks[i])) {
this.textTrackType_ =
sampleplayer.TextTrackType.SIDE_LOADED_VTT;
} else {
this.log_('Unsupported side loaded text track types');
this.textTrackType_ =
sampleplayer.TextTrackType.SIDE_LOADED_UNSUPPORTED;
break;
}
// We do not support text tracks with different caption types for a single
// piece of content
if (oldTextTrackType && oldTextTrackType != this.textTrackType_) {
this.log_('Load has inconsistent text track types');
this.textTrackType_ =
sampleplayer.TextTrackType.SIDE_LOADED_UNSUPPORTED;
break;
}
}
};
/**
* If there is tracks information in the LoadInfo, it loads the side loaded
* tracks information in the media manager without loading media.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @private
*/
sampleplayer.CastPlayer.prototype.maybeLoadSideLoadedTracksMetadata_ =
function(info) {
// If there are no tracks we will not load the tracks information here as
// we are likely in a embedded captions scenario and the information will
// be loaded in the onMetadataLoaded_ callback
if (!info.message || !info.message.media || !info.message.media.tracks ||
info.message.media.tracks.length == 0) {
return;
}
var tracksInfo = /** @type {cast.receiver.media.TracksInfo} **/ ({
tracks: info.message.media.tracks,
activeTrackIds: info.message.activeTrackIds,
textTrackStyle: info.message.media.textTrackStyle
});
this.mediaManager_.loadTracksInfo(tracksInfo);
};
/**
* Loads embedded tracks information without loading media.
* If there is embedded tracks information, it loads the tracks information
* in the media manager without loading media.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @private
*/
sampleplayer.CastPlayer.prototype.maybeLoadEmbeddedTracksMetadata_ =
function(info) {
if (!info.message || !info.message.media) {
return;
}
var tracksInfo = this.readInBandTracksInfo_();
if (tracksInfo) {
this.textTrackType_ = sampleplayer.TextTrackType.EMBEDDED;
tracksInfo.textTrackStyle = info.message.media.textTrackStyle;
this.mediaManager_.loadTracksInfo(tracksInfo);
}
};
/**
* Processes ttml tracks and enables the active ones.
*
* @param {!Array.<number>} activeTrackIds The active tracks.
* @param {!Array.<cast.receiver.media.Track>} tracks The track definitions.
* @private
*/
sampleplayer.CastPlayer.prototype.processTtmlCues_ =
function(activeTrackIds, tracks) {
if (activeTrackIds.length == 0) {
return;
}
// If there is an active text track, that is using ttml, apply it
for (var i = 0; i < tracks.length; i++) {
var contains = false;
for (var j = 0; j < activeTrackIds.length; j++) {
if (activeTrackIds[j] == tracks[i].trackId) {
contains = true;
break;
}
}
if (!contains ||
!this.isTtmlTrack_(tracks[i])) {
continue;
}
if (!this.player_) {
// We do not have a player, it means we need to create it to support
// loading ttml captions
var host = new cast.player.api.Host({
'url': '',
'mediaElement': this.mediaElement_
});
this.protocol_ = null;
this.player_ = new cast.player.api.Player(host);
}
this.player_.enableCaptions(
true, cast.player.api.CaptionsType.TTML, tracks[i].trackContentId);
}
};
/**
* Checks if a track is TTML.
*
* @param {cast.receiver.media.Track} track The track.
* @return {boolean} Whether the track is in TTML format.
* @private
*/
sampleplayer.CastPlayer.prototype.isTtmlTrack_ = function(track) {
return this.isKnownTextTrack_(track,
sampleplayer.TextTrackType.SIDE_LOADED_TTML,
sampleplayer.CaptionsMimeType.TTML);
};
/**
* Checks if a track is VTT.
*
* @param {cast.receiver.media.Track} track The track.
* @return {boolean} Whether the track is in VTT format.
* @private
*/
sampleplayer.CastPlayer.prototype.isVttTrack_ = function(track) {
return this.isKnownTextTrack_(track,
sampleplayer.TextTrackType.SIDE_LOADED_VTT,
sampleplayer.CaptionsMimeType.VTT);
};
/**
* Checks if a track is of a known type by verifying the extension or mimeType.
*
* @param {cast.receiver.media.Track} track The track.
* @param {!sampleplayer.TextTrackType} textTrackType The text track
* type expected.
* @param {!string} mimeType The mimeType expected.
* @return {boolean} Whether the track has the specified format.
* @private
*/
sampleplayer.CastPlayer.prototype.isKnownTextTrack_ =
function(track, textTrackType, mimeType) {
if (!track) {
return false;
}
// The sampleplayer.TextTrackType values match the
// file extensions required
var fileExtension = textTrackType;
var trackContentId = track.trackContentId;
var trackContentType = track.trackContentType;
if ((trackContentId &&
sampleplayer.getExtension_(trackContentId) === fileExtension) ||
(trackContentType && trackContentType.indexOf(mimeType) === 0)) {
return true;
}
return false;
};
/**
* Processes embedded tracks, if they exist.
*
* @param {!Array.<number>} activeTrackIds The active tracks.
* @private
*/
sampleplayer.CastPlayer.prototype.processInBandTracks_ =
function(activeTrackIds) {
var protocol = this.player_.getStreamingProtocol();
var streamCount = protocol.getStreamCount();
for (var i = 0; i < streamCount; i++) {
var trackId = i + 1;
var isActive = false;
for (var j = 0; j < activeTrackIds.length; j++) {
if (activeTrackIds[j] == trackId) {
isActive = true;
break;
}
}
var wasActive = protocol.isStreamEnabled(i);
if (isActive && !wasActive) {
protocol.enableStream(i, true);
} else if (!isActive && wasActive) {
protocol.enableStream(i, false);
}
}
};
/**
* Reads in-band tracks info, if they exist.
*
* @return {cast.receiver.media.TracksInfo} The tracks info.
* @private
*/
sampleplayer.CastPlayer.prototype.readInBandTracksInfo_ = function() {
var protocol = this.player_ ? this.player_.getStreamingProtocol() : null;
if (!protocol) {
return null;
}
var streamCount = protocol.getStreamCount();
var activeTrackIds = [];
var tracks = [];
for (var i = 0; i < streamCount; i++) {
var trackId = i + 1;
if (protocol.isStreamEnabled(i)) {
activeTrackIds.push(trackId);
}
var streamInfo = protocol.getStreamInfo(i);
var mimeType = streamInfo.mimeType;
var track;
if (mimeType.indexOf(sampleplayer.TrackType.TEXT) === 0 ||
mimeType === sampleplayer.CaptionsMimeType.TTML) {
track = new cast.receiver.media.Track(
trackId, cast.receiver.media.TrackType.TEXT);
} else if (mimeType.indexOf(sampleplayer.TrackType.VIDEO) === 0) {
track = new cast.receiver.media.Track(
trackId, cast.receiver.media.TrackType.VIDEO);
} else if (mimeType.indexOf(sampleplayer.TrackType.AUDIO) === 0) {
track = new cast.receiver.media.Track(
trackId, cast.receiver.media.TrackType.AUDIO);
}
if (track) {
track.name = streamInfo.name;
track.language = streamInfo.language;
track.trackContentType = streamInfo.mimeType;
tracks.push(track);
}
}
if (tracks.length === 0) {
return null;
}
var tracksInfo = /** @type {cast.receiver.media.TracksInfo} **/ ({
tracks: tracks,
activeTrackIds: activeTrackIds
});
return tracksInfo;
};
/**
* Loads some media by delegating to default media manager.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load request info.
* @private
*/
sampleplayer.CastPlayer.prototype.loadDefault_ = function(info) {
this.onLoadOrig_(new cast.receiver.MediaManager.Event(
cast.receiver.MediaManager.EventType.LOAD,
/** @type {!cast.receiver.MediaManager.RequestData} */ (info.message),
info.senderId));
};
/**
* Sets the amount of time before the player is considered idle.
*
* @param {number} t the time in milliseconds before the player goes idle
* @private
*/
sampleplayer.CastPlayer.prototype.setIdleTimeout_ = function(t) {
this.log_('setIdleTimeout_: ' + t);
var self = this;
clearTimeout(this.idleTimerId_);
if (t) {
this.idleTimerId_ = setTimeout(function() {
self.receiverManager_.stop();
}, t);
}
};
/**
* Sets the type of player.
*
* @param {sampleplayer.Type} type The type of player.
* @param {boolean} isLiveStream whether player is showing live content
* @private
*/
sampleplayer.CastPlayer.prototype.setType_ = function(type, isLiveStream) {
this.log_('setType_: ' + type);
this.type_ = type;
this.isLiveStream_ = isLiveStream;
this.element_.setAttribute('type', type);
this.element_.setAttribute('live', isLiveStream.toString());
var overlay = this.getElementByClass_('.overlay');
var watermark = this.getElementByClass_('.watermark');
clearInterval(this.burnInPreventionIntervalId_);
if (type != sampleplayer.Type.AUDIO) {
overlay.removeAttribute('style');
} else {
// if we are in 'audio' mode float metadata around the screen to
// prevent screen burn
this.burnInPreventionIntervalId_ = setInterval(function() {
overlay.style.marginBottom = Math.round(Math.random() * 100) + 'px';
overlay.style.marginLeft = Math.round(Math.random() * 600) + 'px';
}, sampleplayer.BURN_IN_TIMEOUT);
}
};
/**
* Sets the state of the player.
*
* @param {sampleplayer.State} state the new state of the player
* @param {boolean=} opt_crossfade true if should cross fade between states
* @param {number=} opt_delay the amount of time (in ms) to wait
* @private
*/
sampleplayer.CastPlayer.prototype.setState_ = function(
state, opt_crossfade, opt_delay) {
this.log_('setState_: state=' + state + ', crossfade=' + opt_crossfade +
', delay=' + opt_delay);
var self = this;
self.lastStateTransitionTime_ = Date.now();
clearTimeout(self.delay_);
if (opt_delay) {
var func = function() { self.setState_(state, opt_crossfade); };
self.delay_ = setTimeout(func, opt_delay);
} else {
if (!opt_crossfade) {
self.state_ = state;
self.element_.setAttribute('state', state);
self.updateApplicationState_();
self.setIdleTimeout_(sampleplayer.IDLE_TIMEOUT[state.toUpperCase()]);
} else {
var stateTransitionTime = self.lastStateTransitionTime_;
sampleplayer.transition_(self.element_, sampleplayer.TRANSITION_DURATION_,
function() {
// In the case of a crossfade transition, the transition will be completed
// even if setState is called during the transition. We need to be sure
// that the requested state is ignored as the latest setState call should
// take precedence.
if (stateTransitionTime < self.lastStateTransitionTime_) {
self.log_('discarded obsolete deferred state(' + state + ').');
return;
}
self.setState_(state, false);
});
}
}
};
/**
* Updates the application state if it has changed.
*
* @private
*/
sampleplayer.CastPlayer.prototype.updateApplicationState_ = function() {
this.log_('updateApplicationState_');
if (this.mediaManager_) {
var idle = this.state_ === sampleplayer.State.IDLE;
var media = idle ? null : this.mediaManager_.getMediaInformation();
var applicationState = sampleplayer.getApplicationState_(media);
if (this.currentApplicationState_ != applicationState) {
this.currentApplicationState_ = applicationState;
this.receiverManager_.setApplicationState(applicationState);
}
}
};
/**
* Called when the player is ready. We initialize the UI for the launching
* and idle screens.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onReady_ = function() {
this.log_('onReady');
this.setState_(sampleplayer.State.IDLE, false);
};
/**
* Called when a sender disconnects from the app.
*
* @param {cast.receiver.CastReceiverManager.SenderDisconnectedEvent} event
* @private
*/
sampleplayer.CastPlayer.prototype.onSenderDisconnected_ = function(event) {
this.log_('onSenderDisconnected');
// When the last or only sender is connected to a receiver,
// tapping Disconnect stops the app running on the receiver.
if (this.receiverManager_.getSenders().length === 0 &&
event.reason ===
cast.receiver.system.DisconnectReason.REQUESTED_BY_SENDER) {
this.receiverManager_.stop();
}
};
/**
* Called when media has an error. Transitions to IDLE state and
* calls to the original media manager implementation.
*
* @see cast.receiver.MediaManager#onError
* @param {!Object} error
* @private
*/
sampleplayer.CastPlayer.prototype.onError_ = function(error) {
this.log_('onError');
var self = this;
sampleplayer.transition_(self.element_, sampleplayer.TRANSITION_DURATION_,
function() {
self.setState_(sampleplayer.State.IDLE, true);
self.onErrorOrig_(error);
});
};
/**
* Called when media is buffering. If we were previously playing,
* transition to the BUFFERING state.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onBuffering_ = function() {
this.log_('onBuffering[readyState=' + this.mediaElement_.readyState + ']');
if (this.state_ === sampleplayer.State.PLAYING &&
this.mediaElement_.readyState < HTMLMediaElement.HAVE_ENOUGH_DATA) {
this.setState_(sampleplayer.State.BUFFERING, false);
}
};
/**
* Called when media has started playing. We transition to the
* PLAYING state.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onPlaying_ = function() {
this.log_('onPlaying');
this.cancelDeferredPlay_('media is already playing');
var isAudio = this.type_ == sampleplayer.Type.AUDIO;
var isLoading = this.state_ == sampleplayer.State.LOADING;
var crossfade = isLoading && !isAudio;
this.setState_(sampleplayer.State.PLAYING, crossfade);
};
/**
* Called when media has been paused. If this is an auto-pause as a result of
* buffer underflow, we transition to BUFFERING state; otherwise, if the media
* isn't done, we transition to the PAUSED state.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onPause_ = function() {
this.log_('onPause');
this.cancelDeferredPlay_('media is paused');
var isIdle = this.state_ === sampleplayer.State.IDLE;
var isDone = this.mediaElement_.currentTime === this.mediaElement_.duration;
var isUnderflow = this.player_ && this.player_.getState()['underflow'];
if (isUnderflow) {
this.log_('isUnderflow');
this.setState_(sampleplayer.State.BUFFERING, false);
this.mediaManager_.broadcastStatus(/* includeMedia */ false);
} else if (!isIdle && !isDone) {
this.setState_(sampleplayer.State.PAUSED, false);
}
this.updateProgress_();
};
/**
* Changes player state reported to sender, if necessary.
* @param {!cast.receiver.media.MediaStatus} mediaStatus Media status that is
* supposed to go to sender.
* @return {cast.receiver.media.MediaStatus} MediaStatus that will be sent to
* sender.
*
* @private
*/
sampleplayer.CastPlayer.prototype.customizedStatusCallback_ = function(
mediaStatus) {
this.log_('customizedStatusCallback_: playerState=' +
mediaStatus.playerState + ', this.state_=' + this.state_);
// TODO: remove this workaround once MediaManager detects buffering
// immediately.
if (mediaStatus.playerState === cast.receiver.media.PlayerState.PAUSED) {
if (this.state_ === sampleplayer.State.BUFFERING) {
mediaStatus.playerState = cast.receiver.media.PlayerState.BUFFERING;
} else if (this.isLiveStream_) {
mediaStatus.playerState = cast.receiver.media.PlayerState.IDLE;
mediaStatus.idleReason = cast.receiver.media.IdleReason.CANCELLED;
}
}
return mediaStatus;
};
/**
* Called when we receive a STOP message. We stop the media and transition
* to the IDLE state.
*
* @param {cast.receiver.MediaManager.Event} event The stop event.
* @private
*/
sampleplayer.CastPlayer.prototype.onStop_ = function(event) {
this.log_('onStop');
this.cancelDeferredPlay_('media is stopped');
var self = this;
sampleplayer.transition_(self.element_, sampleplayer.TRANSITION_DURATION_,
function() {
self.setState_(sampleplayer.State.IDLE, false);
self.onStopOrig_(event);
});
};
/**
* Called when media has ended. We transition to the IDLE state.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onEnded_ = function() {
this.log_('onEnded');
this.setState_(sampleplayer.State.IDLE, true);
this.hidePreviewMode_();
};
/**
* Called when media has been aborted. We transition to the IDLE state.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onAbort_ = function() {
this.log_('onAbort');
this.setState_(sampleplayer.State.IDLE, true);
this.hidePreviewMode_();
};
/**
* Called periodically during playback, to notify changes in playback position.
* We transition to PLAYING state, if we were in BUFFERING or LOADING state.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onProgress_ = function() {
// if we were previously buffering, update state to playing
if (this.state_ === sampleplayer.State.BUFFERING ||
this.state_ === sampleplayer.State.LOADING) {
this.setState_(sampleplayer.State.PLAYING, false);
}
this.updateProgress_();
};
/**
* Updates the current time and progress bar elements.
*
* @private
*/
sampleplayer.CastPlayer.prototype.updateProgress_ = function() {
// Update the time and the progress bar
if (!sampleplayer.isCastForAudioDevice_()) {
var curTime = this.mediaElement_.currentTime;
var totalTime = this.mediaElement_.duration;
if (!isNaN(curTime) && !isNaN(totalTime)) {
var pct = 100 * (curTime / totalTime);
this.curTimeElement_.innerText = sampleplayer.formatDuration_(curTime);
this.totalTimeElement_.innerText = sampleplayer.formatDuration_(totalTime);
this.progressBarInnerElement_.style.width = pct + '%';
this.progressBarThumbElement_.style.left = pct + '%';
// Handle preview mode
if (this.displayPreviewMode_) {
this.previewModeTimerElement_.innerText = "" + Math.round(totalTime-curTime);
}
}
}
};
/**
* Callback called when user starts seeking
*
* @private
*/
sampleplayer.CastPlayer.prototype.onSeekStart_ = function() {
this.log_('onSeekStart');
clearTimeout(this.seekingTimeoutId_);
this.element_.classList.add('seeking');
};
/**
* Callback called when user stops seeking.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onSeekEnd_ = function() {
this.log_('onSeekEnd');
clearTimeout(this.seekingTimeoutId_);
this.seekingTimeoutId_ = sampleplayer.addClassWithTimeout_(this.element_,
'seeking', 3000);
};
/**
* Called when the player is added/removed from the screen because HDMI
* input has changed. If we were playing but no longer visible, pause
* the currently playing media.
*
* @see cast.receiver.CastReceiverManager#onVisibilityChanged
* @param {!cast.receiver.CastReceiverManager.VisibilityChangedEvent} event
* Event fired when visibility of application is changed.
* @private
*/
sampleplayer.CastPlayer.prototype.onVisibilityChanged_ = function(event) {
this.log_('onVisibilityChanged');
if (!event.isVisible) {
this.mediaElement_.pause();
this.mediaManager_.broadcastStatus(false);
}
};
/**
* Called when we receive a PRELOAD message.
*
* @see castplayer.CastPlayer#load
* @param {cast.receiver.MediaManager.Event} event The load event.
* @return {boolean} Whether the item can be preloaded.
* @private
*/
sampleplayer.CastPlayer.prototype.onPreload_ = function(event) {
this.log_('onPreload_');
var loadRequestData =
/** @type {!cast.receiver.MediaManager.LoadRequestData} */ (event.data);
return this.preload(loadRequestData.media);
};
/**
* Called when we receive a CANCEL_PRELOAD message.
*
* @see castplayer.CastPlayer#load
* @param {cast.receiver.MediaManager.Event} event The load event.
* @return {boolean} Whether the item can be preloaded.
* @private
*/
sampleplayer.CastPlayer.prototype.onCancelPreload_ = function(event) {
this.log_('onCancelPreload_');
this.hidePreviewMode_();
return true;
};
/**
* Called when we receive a LOAD message. Calls load().
*
* @see sampleplayer#load
* @param {cast.receiver.MediaManager.Event} event The load event.
* @private
*/
sampleplayer.CastPlayer.prototype.onLoad_ = function(event) {
this.log_('onLoad_');
this.cancelDeferredPlay_('new media is loaded');
this.load(new cast.receiver.MediaManager.LoadInfo(
/** @type {!cast.receiver.MediaManager.LoadRequestData} */ (event.data),
event.senderId));
};
/**
* Called when we receive a EDIT_TRACKS_INFO message.
*
* @param {!cast.receiver.MediaManager.Event} event The editTracksInfo event.
* @private
*/
sampleplayer.CastPlayer.prototype.onEditTracksInfo_ = function(event) {
this.log_('onEditTracksInfo');
this.onEditTracksInfoOrig_(event);
// If the captions are embedded or ttml we need to enable/disable tracks
// as needed (vtt is processed by the media manager)
if (!event.data || !event.data.activeTrackIds || !this.textTrackType_) {
return;
}
var mediaInformation = this.mediaManager_.getMediaInformation() || {};
var type = this.textTrackType_;
if (type == sampleplayer.TextTrackType.SIDE_LOADED_TTML) {
// The player_ may not have been created yet if the type of media did
// not require MPL. It will be lazily created in processTtmlCues_
if (this.player_) {
this.player_.enableCaptions(false, cast.player.api.CaptionsType.TTML);
}
this.processTtmlCues_(event.data.activeTrackIds,
mediaInformation.tracks || []);
} else if (type == sampleplayer.TextTrackType.EMBEDDED) {
this.player_.enableCaptions(false);
this.processInBandTracks_(event.data.activeTrackIds);
this.player_.enableCaptions(true);
}
};
/**
* Called when metadata is loaded, at this point we have the tracks information
* if we need to provision embedded captions.
*
* @param {!cast.receiver.MediaManager.LoadInfo} info The load information.
* @private
*/
sampleplayer.CastPlayer.prototype.onMetadataLoaded_ = function(info) {
this.log_('onMetadataLoaded');
this.onLoadSuccess_();
// In the case of ttml and embedded captions we need to load the cues using
// MPL.
this.readSideLoadedTextTrackType_(info);
if (this.textTrackType_ ==
sampleplayer.TextTrackType.SIDE_LOADED_TTML &&
info.message && info.message.activeTrackIds && info.message.media &&
info.message.media.tracks) {
this.processTtmlCues_(
info.message.activeTrackIds, info.message.media.tracks);
} else if (!this.textTrackType_) {
// If we do not have a textTrackType, check if the tracks are embedded
this.maybeLoadEmbeddedTracksMetadata_(info);
}
// Only send load completed when we have completed the player LOADING state
this.metadataLoaded_ = true;
this.maybeSendLoadCompleted_(info);
};
/**
* Called when the media could not be successfully loaded. Transitions to
* IDLE state and calls the original media manager implementation.
*
* @see cast.receiver.MediaManager#onLoadMetadataError
* @param {!cast.receiver.MediaManager.LoadInfo} event The data
* associated with a LOAD event.
* @private
*/
sampleplayer.CastPlayer.prototype.onLoadMetadataError_ = function(event) {
this.log_('onLoadMetadataError_');
var self = this;
sampleplayer.transition_(self.element_, sampleplayer.TRANSITION_DURATION_,
function() {
self.setState_(sampleplayer.State.IDLE, true);
self.onLoadMetadataErrorOrig_(event);
});
};
/**
* Cancels deferred playback.
*
* @param {string} cancelReason
* @private
*/
sampleplayer.CastPlayer.prototype.cancelDeferredPlay_ = function(cancelReason) {
if (this.deferredPlayCallbackId_) {
this.log_('Cancelled deferred playback: ' + cancelReason);
clearTimeout(this.deferredPlayCallbackId_);
this.deferredPlayCallbackId_ = null;
}
};
/**
* Defers playback start by given timeout.
*
* @param {number} timeout In msec.
* @private
*/
sampleplayer.CastPlayer.prototype.deferPlay_ = function(timeout) {
this.log_('Defering playback for ' + timeout + ' ms');
var self = this;
this.deferredPlayCallbackId_ = setTimeout(function() {
self.deferredPlayCallbackId_ = null;
if (self.player_) {
self.log_('Playing when enough data');
self.player_.playWhenHaveEnoughData();
} else {
self.log_('Playing');
self.mediaElement_.play();
}
}, timeout);
};
/**
* Called when the media is successfully loaded. Updates the progress bar.
*
* @private
*/
sampleplayer.CastPlayer.prototype.onLoadSuccess_ = function() {
this.log_('onLoadSuccess');
// we should have total time at this point, so update the label
// and progress bar
var totalTime = this.mediaElement_.duration;
if (!isNaN(totalTime)) {
this.totalTimeElement_.textContent =
sampleplayer.formatDuration_(totalTime);
} else {
this.totalTimeElement_.textContent = '';
this.progressBarInnerElement_.style.width = '100%';
this.progressBarThumbElement_.style.left = '100%';
}
};
/**
* Returns the image url for the given media object.
*
* @param {!cast.receiver.media.MediaInformation} media The media.
* @return {string|undefined} The image url.
* @private
*/
sampleplayer.getMediaImageUrl_ = function(media) {
var metadata = media.metadata || {};
var images = metadata['images'] || [];
return images && images[0] && images[0]['url'];
};
/**
* Gets the adaptive streaming protocol creation function based on the media
* information.
*
* @param {!cast.receiver.media.MediaInformation} mediaInformation The
* asset media information.
* @return {?function(cast.player.api.Host):player.StreamingProtocol}
* The protocol function that corresponds to this media type.
* @private
*/
sampleplayer.getProtocolFunction_ = function(mediaInformation) {
var url = mediaInformation.contentId;
var type = mediaInformation.contentType || '';
var path = sampleplayer.getPath_(url) || '';
if (sampleplayer.getExtension_(path) === 'm3u8' ||
type === 'application/x-mpegurl' ||
type === 'application/vnd.apple.mpegurl') {
return cast.player.api.CreateHlsStreamingProtocol;
} else if (sampleplayer.getExtension_(path) === 'mpd' ||
type === 'application/dash+xml') {
return cast.player.api.CreateDashStreamingProtocol;
} else if (path.indexOf('.ism') > -1 ||
type === 'application/vnd.ms-sstr+xml') {
return cast.player.api.CreateSmoothStreamingProtocol;
}
return null;
};
/**
* Returns true if the media can be preloaded.
*
* @param {!cast.receiver.media.MediaInformation} media The media information.
* @return {boolean} whether the media can be preloaded.
* @private
*/
sampleplayer.supportsPreload_ = function(media) {
return sampleplayer.getProtocolFunction_(media) != null;
};
/**
* Returns true if the preview UI should be shown for the type of media
* although the media can not be preloaded.
*
* @param {!cast.receiver.media.MediaInformation} media The media information.
* @return {boolean} whether the media can be previewed.
* @private
*/
sampleplayer.canDisplayPreview_ = function(media) {
var contentId = media.contentId || '';
var contentUrlPath = sampleplayer.getPath_(contentId);
if (sampleplayer.getExtension_(contentUrlPath) === 'mp4') {
return true;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'ogv') {
return true;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'webm') {
return true;
}
return false;
};
/**
* Returns the type of player to use for the given media.
* By default this looks at the media's content type, but falls back
* to file extension if not set.
*
* @param {!cast.receiver.media.MediaInformation} media The media.
* @return {sampleplayer.Type} The player type.
* @private
*/
sampleplayer.getType_ = function(media) {
var contentId = media.contentId || '';
var contentType = media.contentType || '';
var contentUrlPath = sampleplayer.getPath_(contentId);
if (contentType.indexOf('audio/') === 0) {
return sampleplayer.Type.AUDIO;
} else if (contentType.indexOf('video/') === 0) {
return sampleplayer.Type.VIDEO;
} else if (contentType.indexOf('application/x-mpegurl') === 0) {
return sampleplayer.Type.VIDEO;
} else if (contentType.indexOf('application/vnd.apple.mpegurl') === 0) {
return sampleplayer.Type.VIDEO;
} else if (contentType.indexOf('application/dash+xml') === 0) {
return sampleplayer.Type.VIDEO;
} else if (contentType.indexOf('application/vnd.ms-sstr+xml') === 0) {
return sampleplayer.Type.VIDEO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'mp3') {
return sampleplayer.Type.AUDIO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'oga') {
return sampleplayer.Type.AUDIO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'wav') {
return sampleplayer.Type.AUDIO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'mp4') {
return sampleplayer.Type.VIDEO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'ogv') {
return sampleplayer.Type.VIDEO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'webm') {
return sampleplayer.Type.VIDEO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'm3u8') {
return sampleplayer.Type.VIDEO;
} else if (sampleplayer.getExtension_(contentUrlPath) === 'mpd') {
return sampleplayer.Type.VIDEO;
} else if (contentType.indexOf('.ism') != 0) {
return sampleplayer.Type.VIDEO;
}
return sampleplayer.Type.UNKNOWN;
};
/**
* Formats the given duration.
*
* @param {number} dur the duration (in seconds)
* @return {string} the time (in HH:MM:SS)
* @private
*/
sampleplayer.formatDuration_ = function(dur) {
dur = Math.floor(dur);
function digit(n) { return ('00' + Math.round(n)).slice(-2); }
var hr = Math.floor(dur / 3600);
var min = Math.floor(dur / 60) % 60;
var sec = dur % 60;
if (!hr) {
return digit(min) + ':' + digit(sec);
} else {
return digit(hr) + ':' + digit(min) + ':' + digit(sec);
}
};
/**
* Adds the given className to the given element for the specified amount of
* time.
*
* @param {!Element} element The element to add the given class.
* @param {string} className The class name to add to the given element.
* @param {number} timeout The amount of time (in ms) the class should be
* added to the given element.
* @return {number} A numerical id, which can be used later with
* window.clearTimeout().
* @private
*/
sampleplayer.addClassWithTimeout_ = function(element, className, timeout) {
element.classList.add(className);
return setTimeout(function() {
element.classList.remove(className);
}, timeout);
};
/**
* Causes the given element to fade out, does something, and then fades
* it back in.
*
* @param {!Element} element The element to fade in/out.
* @param {number} time The total amount of time (in seconds) to transition.
* @param {function()} something The function that does something.
* @private
*/
sampleplayer.transition_ = function(element, time, something) {
if (time <= 0 || sampleplayer.isCastForAudioDevice_()) {
// No transitions supported for Cast for Audio devices
something();
} else {
sampleplayer.fadeOut_(element, time / 2.0, function() {
something();
sampleplayer.fadeIn_(element, time / 2.0);
});
}
};
/**
* Preloads media data that can be preloaded.
*
* @param {!cast.receiver.media.MediaInformation} media The media to load.
* @param {function()} doneFunc The function to call when done.
* @private
*/
sampleplayer.preload_ = function(media, doneFunc) {
if (sampleplayer.isCastForAudioDevice_()) {
// No preloading for Cast for Audio devices
doneFunc();
return;
}
var imagesToPreload = [];
var counter = 0;
var images = [];
function imageLoaded() {
if (++counter === imagesToPreload.length) {
doneFunc();
}
}
// try to preload image metadata
var thumbnailUrl = sampleplayer.getMediaImageUrl_(media);
if (thumbnailUrl) {
imagesToPreload.push(thumbnailUrl);
}
if (imagesToPreload.length === 0) {
doneFunc();
} else {
for (var i = 0; i < imagesToPreload.length; i++) {
images[i] = new Image();
images[i].src = imagesToPreload[i];
images[i].onload = function() {
imageLoaded();
};
images[i].onerror = function() {
imageLoaded();
};
}
}
};
/**
* Causes the given element to fade in.
*
* @param {!Element} element The element to fade in.
* @param {number} time The amount of time (in seconds) to transition.
* @param {function()=} opt_doneFunc The function to call when complete.
* @private
*/
sampleplayer.fadeIn_ = function(element, time, opt_doneFunc) {
sampleplayer.fadeTo_(element, '', time, opt_doneFunc);
};
/**
* Causes the given element to fade out.
*
* @param {!Element} element The element to fade out.
* @param {number} time The amount of time (in seconds) to transition.
* @param {function()=} opt_doneFunc The function to call when complete.
* @private
*/
sampleplayer.fadeOut_ = function(element, time, opt_doneFunc) {
sampleplayer.fadeTo_(element, 0, time, opt_doneFunc);
};
/**
* Causes the given element to fade to the given opacity in the given
* amount of time.
*
* @param {!Element} element The element to fade in/out.
* @param {string|number} opacity The opacity to transition to.
* @param {number} time The amount of time (in seconds) to transition.
* @param {function()=} opt_doneFunc The function to call when complete.
* @private
*/
sampleplayer.fadeTo_ = function(element, opacity, time, opt_doneFunc) {
var self = this;
var id = Date.now();
var listener = function() {
element.style.webkitTransition = '';
element.removeEventListener('webkitTransitionEnd', listener, false);
if (opt_doneFunc) {
opt_doneFunc();
}
};
element.addEventListener('webkitTransitionEnd', listener, false);
element.style.webkitTransition = 'opacity ' + time + 's';
element.style.opacity = opacity;
};
/**
* Utility function to get the extension of a URL file path.
*
* @param {string} url the URL
* @return {string} the extension or "" if none
* @private
*/
sampleplayer.getExtension_ = function(url) {
var parts = url.split('.');
// Handle files with no extensions and hidden files with no extension
if (parts.length === 1 || (parts[0] === '' && parts.length === 2)) {
return '';
}
return parts.pop().toLowerCase();
};
/**
* Returns the application state.
*
* @param {cast.receiver.media.MediaInformation=} opt_media The current media
* metadata
* @return {string} The application state.
* @private
*/
sampleplayer.getApplicationState_ = function(opt_media) {
if (opt_media && opt_media.metadata && opt_media.metadata.title) {
return 'Now Casting: ' + opt_media.metadata.title;
} else if (opt_media) {
return 'Now Casting';
} else {
return 'Ready To Cast';
}
};
/**
* Returns the URL path.
*
* @param {string} url The URL
* @return {string} The URL path.
* @private
*/
sampleplayer.getPath_ = function(url) {
var href = document.createElement('a');
href.href = url;
return href.pathname || '';
};
/**
* Returns the proxied URL which lets the media to be
* streamed even if the CORS headers are not present.
*
* @param {string} the media URL
* @private
*/
sampleplayer.getProxiedUrl_ = function(url) {
return sampleplayer.CORS_PROXY_URL +
url.replace(/^(?:[a-z]+:)?\/\//i,'');
};
/**
* Logging utility.
*
* @param {string} message to log
* @private
*/
sampleplayer.CastPlayer.prototype.log_ = function(message) {
if (this.debug_ && message) {
console.log(message);
}
};
/**
* Sets the inner text for the given element.
*
* @param {Element} element The element.
* @param {string=} opt_text The text.
* @private
*/
sampleplayer.setInnerText_ = function(element, opt_text) {
if (!element) {
return;
}
element.innerText = opt_text || '';
};
/**
* Sets the background image for the given element.
*
* @param {Element} element The element.
* @param {string=} opt_url The image url.
* @private
*/
sampleplayer.setBackgroundImage_ = function(element, opt_url) {
if (!element) {
return;
}
element.style.backgroundImage =
(opt_url ? 'url("' + opt_url.replace(/"/g, '\\"') + '")' : 'none');
element.style.display = (opt_url ? '' : 'none');
};
/**
* Called to determine if the receiver device is an audio device.
*
* @return {boolean} Whether the device is a Cast for Audio device.
* @private
*/
sampleplayer.isCastForAudioDevice_ = function() {
var receiverManager = window.cast.receiver.CastReceiverManager.getInstance();
if (receiverManager) {
var deviceCapabilities = receiverManager.getDeviceCapabilities();
if (deviceCapabilities) {
return deviceCapabilities['display_supported'] === false;
}
}
return false;
};
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
from dbcol import *
# -----------------------------DATABASE-ROW-------------------------------#
class DBRow:
@staticmethod
def dict(columns, values):
if (values is not None):
if (len(columns) == len(values)):
dict = {}
i = 0
for col in columns:
dict[col.name] = values[i]
i += 1
return dict
else:
raise ValueError('columns do not match values')
else:
return None
@staticmethod
def sqlForRowInsert(table, columns, values):
c = len(columns)
v = len(values)
sql = 'INSERT INTO "{}" ('.format(table)
i = 1
for col in columns:
sql += '"{}"'.format(col.name)
if (i < c):
sql += ','
i += 1
sql += ') VALUES ('
i = 1
# allow for first column autoincrement
if (v == (c - 1)):
sql += 'NULL,'
i += 1
for val in values:
if (isinstance(val, int)):
# don't need quotes in integer values
sql += str(val)
elif (val == 'NULL'):
# keep quotes off of NULL
sql += val
else:
# wrap value with quotes
sql += '"{}"'.format(val)
if (i < v):
sql += ','
i += 1
sql += ');'
return sql
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBRow']
# ----------------------------------MAIN----------------------------------#
def main():
f1 = DBCol('f1', 'INTEGER')
f2 = DBCol('f2', 'TEXT')
f3 = DBCol('f3', 'TEXT')
cols = [f1, f2, f3]
vals = [1, 'Test', 'Third']
print DBRow.dict(cols, vals)
print DBRow.sqlForRowInsert('sample', cols, vals)
if __name__ == '__main__':
main()
|
from .prez_model import PrezModel
from .vocprez import *
__all__ = [
"PrezModel",
]
|
export const UNITS = Object.freeze({
REM: `rem`,
EM: `em`,
PX: `px`,
})
export const FONT_SIZE_FOR_OFFSET = 16
export const ERROR_PREFIX = `[cssapi-baseline]`
export const CONFIGURE_PREFIX = `configure()`
export const API_PREFIX = `api()`
export const CONFIGURATION_ARG_NAMES = Object.freeze({
CONFIG: `config`,
})
|
from django.conf.urls import patterns, url, include
from django.contrib.auth.views import login, logout
from django.contrib import admin
from aws_policy_manager import urls as awsurls
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'ssheepdog.views.view_access_summary'),
url(r'^new_key/$', 'ssheepdog.views.generate_new_application_key'),
url(r'^sync_keys/$', 'ssheepdog.views.sync_keys'),
url(r'^manual_sync/(?P<id>[0-9]+)/$', 'ssheepdog.views.manual_sync'),
url(r'^(?P<action>permit|deny)/(?P<user_pk>[0-9]+)/(?P<login_pk>[0-9]+)/$', 'ssheepdog.views.change_access'),
url(r'^user/(?P<id>[0-9]+)/$', 'ssheepdog.views.user_admin_view'),
url(r'^login/(?P<id>[0-9]+)/$','ssheepdog.views.login_admin_view'),
url(r'^openid/', include('django_openid_auth.urls')),
url(r'^accounts/login/$', login, {}, name='login'),
url(r'^accounts/logout/$', logout, {}, name='logout'),
url(r'^awspolicies/', include(awsurls.aws_patterns, namespace='aws_urls')),
)
|
define(function (require) {
// todo Dropping this file when developing.
'use strict';
var Backbone = require('backbone');
var models = {};
models.Sample = Backbone.Model.extend({
urlRoot: '/api/samples'
});
return models;
});
|
# (C) Copyright 2021 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["Rick Kauffman"]
# __license__ = "Apache2.0"
# __maintainer__ = "Rick Kauffman"
# __email__ = "rick.a.kauffman@hpe.com"
from pymongo import MongoClient
from cvprac.cvp_client import CvpClient
import urllib3
from st2common.runners.base_action import Action
class AristaBaseAction(Action):
def __init__(self,config):
super(AristaBaseAction, self).__init__(config)
self.client = self._get_client()
def _get_client(self):
ipaddress = self.config['cvp']
username = self.config['cvp_user']
password = self.config['cvp_word']
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
client = CvpClient()
# Connect to the CVP server
client.connect([cvp], cvp_user, cvp_word)
return client
class MongoBaseAction(Action):
def __init__(self,config):
super(MongoBaseAction, self).__init__(config=config)
self.dbclient = self._get_db_client()
def _get_db_client(self):
dbuser = self.config['dbuser']
dbpass = self.config['dbpass']
dbclient = MongoClient('mongodb://%s:%s@localhost:27017/' % (dbuser,dbpass))
return dbclient
|
import React, { Component } from 'react'
export default class SearchResultList extends Component {
render() {
let list = this.props.results.map(item =>
<li><a href={item.data.url}><h3>{item.data.title}</h3><p>{item.data.ups}</p></a></li>)
return (
<div className="results">
<ul>{list}</ul>
</div>
)
}
}
|
#!/usr/bin/env python
from django.urls import path
from . import views
app_name = "comments"
urlpatterns = [
# url(r'^po456stcomment/(?P<article_id>\d+)$', views.CommentPostView.as_view(), name='postcomment'),
path('article/<int:article_id>/postcomment', views.CommentPostView.as_view(), name='postcomment'),
]
|
class Packer(object):
"""Pack/Unpack data for WIZNet devices"""
def pack(self, s2e):
output = []
for field in s2e._fields:
name = field[0]
packer = getattr(self, "pack_%s" % (field[1],))
fieldvalue = getattr(s2e, name)
packed = packer(fieldvalue, *field[2:])
logger.debug("Packed %s %s -> `%s'" % (name, fieldvalue, packed))
output.append(packed)
return ''.join(output)
def pack_ip(self, str_ip):
"""ip address should be in string form "1.2.3.4"""
return struct.pack(">BBBB", *[ int(c) for c in str_ip.split(".") ])
def pack_firmversion(self, version):
return struct.pack(">BB", * [ int(c) for c in version.split(".") ])
def pack_mac(self, str_mac):
"""mac address should be in string form "00:XX:22::FF:FF:FF"""
return struct.pack(">BBBBBB", *[ int(c, 16) for c in str_mac.split(":") ])
def pack_short(self, value):
return struct.pack(">H", value)
def pack_byte(self, value):
return struct.pack(">B", int(value))
def pack_bool(self, value, inverted=False):
fmt = ">B"
if inverted:
value = not value
if value:
intval = 0x01
else:
intval = 0x00
return struct.pack(fmt, intval)
def pack_str(self, mystr, length, *args):
fmt = "%(length)ss" % {"length": length }
return struct.pack(fmt, mystr)
def pack_dictvalues(self, value, dictvalues, *args):
fmt = ">B"
bytevalue = None
for k, v in dictvalues.items():
if v == value:
bytevalue = k
break
assert(bytevalue is not None)
return struct.pack(fmt, bytevalue)
def pack_bytes(self, value, length):
fmt = "B" * length
return struct.pack(fmt, *[ int(x, 16) for x in value.split() ])
|
import EventEmitter from 'events'
class FileUploadEmitter extends EventEmitter {}
export default FileUploadEmitter
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""pex support for interacting with interpreters."""
from __future__ import absolute_import
import hashlib
import json
import os
import platform
import re
import subprocess
import sys
from collections import OrderedDict
from textwrap import dedent
from pex import third_party
from pex.common import is_exe, safe_mkdtemp, safe_rmtree, temporary_dir
from pex.compatibility import string
from pex.executor import Executor
from pex.jobs import ErrorHandler, Job, Retain, SpawnedJob, execute_parallel
from pex.orderedset import OrderedSet
from pex.platforms import Platform
from pex.third_party.packaging import markers, tags
from pex.third_party.pkg_resources import Distribution, Requirement
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING, cast, overload
from pex.util import CacheHelper
from pex.variables import ENV
if TYPE_CHECKING:
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
MutableMapping,
Optional,
Sequence,
Text,
Tuple,
Union,
)
PathFilter = Callable[[str], bool]
InterpreterIdentificationJobError = Tuple[str, Union[Job.Error, Exception]]
InterpreterOrJobError = Union["PythonInterpreter", InterpreterIdentificationJobError]
# N.B.: We convert InterpreterIdentificationJobErrors that result from spawning interpreter
# identification jobs to these end-user InterpreterIdentificationErrors for display.
InterpreterIdentificationError = Tuple[str, Text]
InterpreterOrError = Union["PythonInterpreter", InterpreterIdentificationError]
class PythonIdentity(object):
class Error(Exception):
pass
class InvalidError(Error):
pass
class UnknownRequirement(Error):
pass
ABBR_TO_INTERPRETER_NAME = {
"pp": "PyPy",
"cp": "CPython",
}
@classmethod
def get(cls, binary=None):
# type: (Optional[str]) -> PythonIdentity
# N.B.: We should not need to look past `sys.executable` to learn the current interpreter's
# executable path, but on OSX there has been a bug where the `sys.executable` reported is
# _not_ the path of the current interpreter executable:
# https://bugs.python.org/issue22490#msg283859
# That case is distinguished by the presence of a `__PYVENV_LAUNCHER__` environment
# variable as detailed in the Python bug linked above.
if binary and binary != sys.executable and "__PYVENV_LAUNCHER__" not in os.environ:
# Here we assume sys.executable is accurate and binary is something like a pyenv shim.
binary = sys.executable
supported_tags = tuple(tags.sys_tags())
preferred_tag = supported_tags[0]
return cls(
binary=binary or sys.executable,
prefix=sys.prefix,
base_prefix=(
# Old virtualenv (16 series and lower) sets `sys.real_prefix` in all cases.
getattr(sys, "real_prefix", None)
# Both pyvenv and virtualenv 20+ set `sys.base_prefix` as per
# https://www.python.org/dev/peps/pep-0405/.
or getattr(sys, "base_prefix", sys.prefix)
),
python_tag=preferred_tag.interpreter,
abi_tag=preferred_tag.abi,
platform_tag=preferred_tag.platform,
version=sys.version_info[:3],
supported_tags=supported_tags,
env_markers=markers.default_environment(),
)
@classmethod
def decode(cls, encoded):
TRACER.log("creating PythonIdentity from encoded: %s" % encoded, V=9)
values = json.loads(encoded)
if len(values) != 9:
raise cls.InvalidError("Invalid interpreter identity: %s" % encoded)
supported_tags = values.pop("supported_tags")
def iter_tags():
for (interpreter, abi, platform) in supported_tags:
yield tags.Tag(interpreter=interpreter, abi=abi, platform=platform)
return cls(supported_tags=iter_tags(), **values)
@classmethod
def _find_interpreter_name(cls, python_tag):
for abbr, interpreter in cls.ABBR_TO_INTERPRETER_NAME.items():
if python_tag.startswith(abbr):
return interpreter
raise ValueError("Unknown interpreter: {}".format(python_tag))
def __init__(
self,
binary, # type: str
prefix, # type: str
base_prefix, # type: str
python_tag, # type: str
abi_tag, # type: str
platform_tag, # type: str
version, # type: Iterable[int]
supported_tags, # type: Iterable[tags.Tag]
env_markers, # type: Dict[str, str]
):
# type: (...) -> None
# N.B.: We keep this mapping to support historical values for `distribution` and `requirement`
# properties.
self._interpreter_name = self._find_interpreter_name(python_tag)
self._binary = binary
self._prefix = prefix
self._base_prefix = base_prefix
self._python_tag = python_tag
self._abi_tag = abi_tag
self._platform_tag = platform_tag
self._version = tuple(version)
self._supported_tags = tuple(supported_tags)
self._env_markers = dict(env_markers)
def encode(self):
values = dict(
binary=self._binary,
prefix=self._prefix,
base_prefix=self._base_prefix,
python_tag=self._python_tag,
abi_tag=self._abi_tag,
platform_tag=self._platform_tag,
version=self._version,
supported_tags=[
(tag.interpreter, tag.abi, tag.platform) for tag in self._supported_tags
],
env_markers=self._env_markers,
)
return json.dumps(values, sort_keys=True)
@property
def binary(self):
return self._binary
@property
def prefix(self):
# type: () -> str
return self._prefix
@property
def base_prefix(self):
# type: () -> str
return self._base_prefix
@property
def python_tag(self):
return self._python_tag
@property
def abi_tag(self):
return self._abi_tag
@property
def platform_tag(self):
return self._platform_tag
@property
def version(self):
# type: () -> Tuple[int, int, int]
"""The interpreter version as a normalized tuple.
Consistent with `sys.version_info`, the tuple corresponds to `<major>.<minor>.<micro>`.
"""
return cast("Tuple[int, int, int]", self._version)
@property
def version_str(self):
# type: () -> str
return ".".join(map(str, self.version))
@property
def supported_tags(self):
# type: () -> Tuple[tags.Tag, ...]
return self._supported_tags
@property
def env_markers(self):
return dict(self._env_markers)
@property
def interpreter(self):
return self._interpreter_name
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
# type: () -> Distribution
return Distribution(project_name=self.interpreter, version=self.version_str)
def iter_supported_platforms(self):
# type: () -> Iterator[Platform]
"""All platforms supported by the associated interpreter ordered from most specific to
least."""
for tag in self._supported_tags:
yield Platform.from_tag(tag)
@classmethod
def parse_requirement(cls, requirement, default_interpreter="CPython"):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse("%s%s" % (default_interpreter, requirement))
except ValueError:
raise ValueError("Unknown requirement string: %s" % requirement)
return requirement
else:
raise ValueError("Unknown requirement type: %r" % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter_name)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
# type: () -> str
if self._interpreter_name == "PyPy":
hashbang_string = "pypy" if self._version[0] == 2 else "pypy{}".format(self._version[0])
else:
hashbang_string = "python{}.{}".format(self._version[0], self._version[1])
return "#!/usr/bin/env {}".format(hashbang_string)
@property
def python(self):
# type: () -> str
# return the python version in the format of the 'python' key for distributions
# specifically, '2.7', '3.2', etc.
return "%d.%d" % (self.version[0:2])
def __str__(self):
# type: () -> str
# N.B.: Kept as distinct from __repr__ to support legacy str(identity) used by Pants v1 when
# forming cache locations.
return "{interpreter_name}-{major}.{minor}.{patch}".format(
interpreter_name=self._interpreter_name,
major=self._version[0],
minor=self._version[1],
patch=self._version[2],
)
def __repr__(self):
# type: () -> str
return (
"{type}({binary!r}, {python_tag!r}, {abi_tag!r}, {platform_tag!r}, {version!r})".format(
type=self.__class__.__name__,
binary=self._binary,
python_tag=self._python_tag,
abi_tag=self._abi_tag,
platform_tag=self._platform_tag,
version=self._version,
)
)
def _tup(self):
return self._binary, self._python_tag, self._abi_tag, self._platform_tag, self._version
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return self._tup() == other._tup()
def __hash__(self):
# type: () -> int
return hash(self._tup())
class PythonInterpreter(object):
_REGEXEN = (
# NB: OSX ships python binaries named Python with a capital-P; so we allow for this.
re.compile(r"^Python$"),
re.compile(
r"""
^
(?:
python |
pypy
)
(?:
# Major version
[2-9]
(?:.
# Minor version
[0-9]
# Some distributions include a suffix on the interpreter name, similar to
# PEP-3149. For example, Gentoo has /usr/bin/python3.6m to indicate it was
# built with pymalloc
[a-z]?
)?
)?
$
""",
flags=re.VERBOSE,
),
)
_PYTHON_INTERPRETER_BY_NORMALIZED_PATH = {} # type: Dict
@staticmethod
def _get_pyvenv_cfg(path):
# type: (str) -> Optional[str]
# See: https://www.python.org/dev/peps/pep-0405/#specification
pyvenv_cfg_path = os.path.join(path, "pyvenv.cfg")
if os.path.isfile(pyvenv_cfg_path):
with open(pyvenv_cfg_path) as fp:
for line in fp:
name, _, value = line.partition("=")
if name.strip() == "home":
return pyvenv_cfg_path
return None
@classmethod
def _find_pyvenv_cfg(cls, maybe_venv_python_binary):
# type: (str) -> Optional[str]
# A pyvenv is identified by a pyvenv.cfg file with a home key in one of the two following
# directory layouts:
#
# 1. <venv dir>/
# bin/
# pyvenv.cfg
# python*
#
# 2. <venv dir>/
# pyvenv.cfg
# bin/
# python*
#
# In practice, we see layout 2 in the wild, but layout 1 is also allowed by the spec.
#
# See: # See: https://www.python.org/dev/peps/pep-0405/#specification
maybe_venv_bin_dir = os.path.dirname(maybe_venv_python_binary)
pyvenv_cfg = cls._get_pyvenv_cfg(maybe_venv_bin_dir)
if not pyvenv_cfg:
maybe_venv_dir = os.path.dirname(maybe_venv_bin_dir)
pyvenv_cfg = cls._get_pyvenv_cfg(maybe_venv_dir)
return pyvenv_cfg
@classmethod
def _resolve_pyvenv_canonical_python_binary(
cls,
real_binary, # type: str
maybe_venv_python_binary, # type: str
):
# type: (...) -> Optional[str]
maybe_venv_python_binary = os.path.abspath(maybe_venv_python_binary)
if not os.path.islink(maybe_venv_python_binary):
return None
pyvenv_cfg = cls._find_pyvenv_cfg(maybe_venv_python_binary)
if pyvenv_cfg is None:
return None
while os.path.islink(maybe_venv_python_binary):
resolved = os.readlink(maybe_venv_python_binary)
if not os.path.isabs(resolved):
resolved = os.path.abspath(
os.path.join(os.path.dirname(maybe_venv_python_binary), resolved)
)
if os.path.dirname(resolved) == os.path.dirname(maybe_venv_python_binary):
maybe_venv_python_binary = resolved
else:
# We've escaped the venv bin dir; so the last resolved link was the
# canonical venv Python binary.
#
# For example, for:
# ./venv/bin/
# python -> python3.8
# python3 -> python3.8
# python3.8 -> /usr/bin/python3.8
#
# We want to resolve each of ./venv/bin/python{,3{,.8}} to the canonical
# ./venv/bin/python3.8 which is the symlink that points to the home binary.
break
return maybe_venv_python_binary
@classmethod
def canonicalize_path(cls, path):
# type: (str) -> str
"""Canonicalize a potential Python interpreter path.
This will return a path-equivalent of the given `path` in canonical form for use in cache
keys.
N.B.: If the path is a venv symlink it will not be fully de-referenced in order to maintain
fidelity with the requested venv Python binary choice.
"""
real_binary = os.path.realpath(path)
# If the path is a PEP-405 venv interpreter symlink we do not want to resolve outside of the
# venv in order to stay faithful to the binary path choice.
return (
cls._resolve_pyvenv_canonical_python_binary(
real_binary=real_binary, maybe_venv_python_binary=path
)
or real_binary
)
class Error(Exception):
pass
class IdentificationError(Error):
pass
class InterpreterNotFound(Error):
pass
@staticmethod
def latest_release_of_min_compatible_version(interps):
# type: (Sequence[PythonInterpreter]) -> PythonInterpreter
"""Find the minimum major version, but use the most recent micro version within that minor
version.
That is, prefer 3.6.1 over 3.6.0, and prefer both over 3.7.*.
"""
assert interps, "No interpreters passed to `PythonInterpreter.safe_min()`"
return min(
interps, key=lambda interp: (interp.version[0], interp.version[1], -interp.version[2])
)
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@staticmethod
def _paths(paths=None):
# type: (Optional[Iterable[str]]) -> Iterable[str]
# NB: If `paths=[]`, we will not read $PATH.
return OrderedSet(paths if paths is not None else os.getenv("PATH", "").split(os.pathsep))
@classmethod
def iter(cls, paths=None):
# type: (Optional[Iterable[str]]) -> Iterator[PythonInterpreter]
"""Iterate all valid interpreters found in `paths`.
NB: The paths can either be directories to search for python binaries or the paths of python
binaries themselves.
:param paths: The paths to look for python interpreters; by default the `PATH`.
"""
return cls._filter(cls._find(cls._paths(paths=paths)))
@classmethod
def iter_candidates(cls, paths=None, path_filter=None):
# type: (Optional[Iterable[str]], Optional[PathFilter]) -> Iterator[InterpreterOrError]
"""Iterate all likely interpreters found in `paths`.
NB: The paths can either be directories to search for python binaries or the paths of python
binaries themselves.
:param paths: The paths to look for python interpreters; by default the `PATH`.
:param path_filter: An optional predicate to test whether a candidate interpreter's binary
path is acceptable.
:return: A heterogeneous iterator over valid interpreters and (python, error) invalid
python binary tuples.
"""
failed_interpreters = OrderedDict() # type: MutableMapping[str, Text]
def iter_interpreters():
# type: () -> Iterator[PythonInterpreter]
for candidate in cls._find(
cls._paths(paths=paths), path_filter=path_filter, error_handler=Retain()
):
if isinstance(candidate, cls):
yield candidate
else:
python, exception = cast("InterpreterIdentificationJobError", candidate)
if isinstance(exception, Job.Error) and exception.stderr:
# We spawned a subprocess to identify the interpreter but the interpreter
# could not run our identification code meaning the interpreter is either
# broken or old enough that it either can't parse our identification code
# or else provide stdlib modules we expect. The stderr should indicate the
# broken-ness appropriately.
failed_interpreters[python] = exception.stderr.strip()
else:
# We couldn't even spawn a subprocess to identify the interpreter. The
# likely OSError should help identify the underlying issue.
failed_interpreters[python] = repr(exception)
for interpreter in cls._filter(iter_interpreters()):
yield interpreter
for python, error in failed_interpreters.items():
yield python, error
@classmethod
def all(cls, paths=None):
# type: (Optional[Iterable[str]]) -> Iterable[PythonInterpreter]
return list(cls.iter(paths=paths))
@classmethod
def _create_isolated_cmd(cls, binary, args=None, pythonpath=None, env=None):
cmd = [binary]
# Don't add the user site directory to `sys.path`.
#
# Additionally, it would be nice to pass `-S` to disable adding site-packages but unfortunately
# some python distributions include portions of the standard library there.
cmd.append("-s")
env = cls._sanitized_environment(env=env)
pythonpath = list(pythonpath or ())
if pythonpath:
env["PYTHONPATH"] = os.pathsep.join(pythonpath)
else:
# Turn off reading of PYTHON* environment variables.
cmd.append("-E")
if args:
cmd.extend(args)
rendered_command = " ".join(cmd)
if pythonpath:
rendered_command = "PYTHONPATH={} {}".format(env["PYTHONPATH"], rendered_command)
TRACER.log("Executing: {}".format(rendered_command), V=3)
return cmd, env
@classmethod
def _execute(cls, binary, args=None, pythonpath=None, env=None, stdin_payload=None, **kwargs):
cmd, env = cls._create_isolated_cmd(binary, args=args, pythonpath=pythonpath, env=env)
stdout, stderr = Executor.execute(cmd, stdin_payload=stdin_payload, env=env, **kwargs)
return cmd, stdout, stderr
INTERP_INFO_FILE = "INTERP-INFO"
@classmethod
def _spawn_from_binary_external(cls, binary):
def create_interpreter(stdout, check_binary=False):
identity = stdout.decode("utf-8").strip()
if not identity:
raise cls.IdentificationError("Could not establish identity of {}.".format(binary))
interpreter = cls(PythonIdentity.decode(identity))
# We should not need to check this since binary == interpreter.binary should always be
# true, but historically this could be untrue as noted in `PythonIdentity.get`.
if check_binary and not os.path.exists(interpreter.binary):
raise cls.InterpreterNotFound(
"Cached interpreter for {} reports a binary of {}, which could not be found".format(
binary, interpreter.binary
)
)
return interpreter
# Part of the PythonInterpreter data are environment markers that depend on the current OS
# release. That data can change when the OS is upgraded but (some of) the installed interpreters
# remain the same. As such, include the OS in the hash structure for cached interpreters.
os_digest = hashlib.sha1()
for os_identifier in platform.release(), platform.version():
os_digest.update(os_identifier.encode("utf-8"))
os_hash = os_digest.hexdigest()
interpreter_cache_dir = os.path.join(ENV.PEX_ROOT, "interpreters")
os_cache_dir = os.path.join(interpreter_cache_dir, os_hash)
if os.path.isdir(interpreter_cache_dir) and not os.path.isdir(os_cache_dir):
with TRACER.timed("GCing interpreter cache from prior OS version"):
safe_rmtree(interpreter_cache_dir)
interpreter_hash = CacheHelper.hash(binary)
# Some distributions include more than one copy of the same interpreter via a hard link (e.g.:
# python3.7 is a hardlink to python3.7m). To ensure a deterministic INTERP-INFO file we must
# emit a separate INTERP-INFO for each link since INTERP-INFO contains the interpreter path and
# would otherwise be unstable.
#
# See cls._REGEXEN for a related affordance.
#
# N.B.: The path for --venv mode interpreters can be quite long; so we just used a fixed
# length hash of the interpreter binary path to ensure uniqueness and not run afoul of file
# name length limits.
path_id = hashlib.sha1(binary.encode("utf-8")).hexdigest()
cache_dir = os.path.join(os_cache_dir, interpreter_hash, path_id)
cache_file = os.path.join(cache_dir, cls.INTERP_INFO_FILE)
if os.path.isfile(cache_file):
try:
with open(cache_file, "rb") as fp:
return SpawnedJob.completed(create_interpreter(fp.read(), check_binary=True))
except (IOError, OSError, cls.Error, PythonIdentity.Error):
safe_rmtree(cache_dir)
return cls._spawn_from_binary_external(binary)
else:
pythonpath = third_party.expose(["pex"])
cmd, env = cls._create_isolated_cmd(
binary,
args=[
"-c",
dedent(
"""\
import os
import sys
from pex.common import atomic_directory, safe_open
from pex.interpreter import PythonIdentity
encoded_identity = PythonIdentity.get(binary={binary!r}).encode()
sys.stdout.write(encoded_identity)
with atomic_directory({cache_dir!r}, exclusive=False) as cache_dir:
if cache_dir:
with safe_open(os.path.join(cache_dir, {info_file!r}), 'w') as fp:
fp.write(encoded_identity)
""".format(
binary=binary, cache_dir=cache_dir, info_file=cls.INTERP_INFO_FILE
)
),
],
pythonpath=pythonpath,
)
# Ensure the `.` implicit PYTHONPATH entry contains no Pex code (of a different version)
# that might interfere with the behavior we expect in the script above.
cwd = safe_mkdtemp()
process = Executor.open_process(
cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
job = Job(command=cmd, process=process, finalizer=lambda: safe_rmtree(cwd))
return SpawnedJob.stdout(job, result_func=create_interpreter)
@classmethod
def _expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return sorted(os.path.join(path, fn) for fn in os.listdir(path))
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
:return: the first matching interpreter found or `None`.
:rtype: :class:`PythonInterpreter`
"""
def hashbang_matches(fn):
basefile = os.path.basename(fn)
return hashbang == basefile
for interpreter in cls._identify_interpreters(filter=hashbang_matches):
return interpreter
@classmethod
def _spawn_from_binary(cls, binary):
canonicalized_binary = cls.canonicalize_path(binary)
if not os.path.exists(canonicalized_binary):
raise cls.InterpreterNotFound(canonicalized_binary)
# N.B.: The cache is written as the last step in PythonInterpreter instance initialization.
cached_interpreter = cls._PYTHON_INTERPRETER_BY_NORMALIZED_PATH.get(canonicalized_binary)
if cached_interpreter is not None:
return SpawnedJob.completed(cached_interpreter)
if canonicalized_binary == cls.canonicalize_path(sys.executable):
current_interpreter = cls(PythonIdentity.get())
return SpawnedJob.completed(current_interpreter)
return cls._spawn_from_binary_external(canonicalized_binary)
@classmethod
def from_binary(cls, binary):
# type: (str) -> PythonInterpreter
"""Create an interpreter from the given `binary`.
:param binary: The path to the python interpreter binary.
:return: an interpreter created from the given `binary`.
"""
return cast(PythonInterpreter, cls._spawn_from_binary(binary).await_result())
@classmethod
def _matches_binary_name(cls, path):
# type: (str) -> bool
basefile = os.path.basename(path)
return any(matcher.match(basefile) is not None for matcher in cls._REGEXEN)
@overload
@classmethod
def _find(cls, paths):
# type: (Iterable[str]) -> Iterator[PythonInterpreter]
pass
@overload
@classmethod
def _find(
cls,
paths, # type: Iterable[str]
error_handler, # type: Retain
path_filter=None, # type: Optional[PathFilter]
):
# type: (...) -> Iterator[InterpreterOrJobError]
pass
@classmethod
def _find(
cls,
paths, # type: Iterable[str]
error_handler=None, # type: Optional[ErrorHandler]
path_filter=None, # type: Optional[PathFilter]
):
# type: (...) -> Union[Iterator[PythonInterpreter], Iterator[InterpreterOrJobError]]
"""Given a list of files or directories, try to detect python interpreters amongst them.
Returns an iterator over PythonInterpreter objects.
"""
return cls._identify_interpreters(
filter=path_filter or cls._matches_binary_name, paths=paths, error_handler=error_handler
)
@overload
@classmethod
def _identify_interpreters(
cls,
filter, # type: PathFilter
error_handler, # type: None
paths=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[PythonInterpreter]
pass
@overload
@classmethod
def _identify_interpreters(
cls,
filter, # type: PathFilter
error_handler, # type: Retain
paths=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[InterpreterOrJobError]
pass
@classmethod
def _identify_interpreters(
cls,
filter, # type: PathFilter
error_handler=None, # type: Optional[ErrorHandler]
paths=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Union[Iterator[PythonInterpreter], Iterator[InterpreterOrJobError]]
def iter_candidates():
# type: () -> Iterator[str]
for path in cls._paths(paths=paths):
for fn in cls._expand_path(path):
if filter(fn):
yield fn
results = execute_parallel(
inputs=list(iter_candidates()),
spawn_func=cls._spawn_from_binary,
error_handler=error_handler,
)
return cast("Union[Iterator[PythonInterpreter], Iterator[InterpreterOrJobError]]", results)
@classmethod
def _filter(cls, pythons):
# type: (Iterable[PythonInterpreter]) -> Iterator[PythonInterpreter]
"""Filters duplicate python interpreters and versions we don't support.
Returns an iterator over PythonInterpreters.
"""
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
# type: (Tuple[int, int, int]) -> bool
return (
version[MAJOR] == 2
and version[MINOR] >= 7
or version[MAJOR] == 3
and version[MINOR] >= 5
)
seen = set()
for interp in pythons:
version = interp.identity.version
identity = version, interp.identity.abi_tag
if identity not in seen and version_filter(version):
seen.add(identity)
yield interp
@classmethod
def _sanitized_environment(cls, env=None):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.7 breaks.
env_copy = (env or os.environ).copy()
env_copy.pop("MACOSX_DEPLOYMENT_TARGET", None)
return env_copy
def __init__(self, identity):
# type: (PythonIdentity) -> None
"""Construct a PythonInterpreter.
You should probably use `PythonInterpreter.from_binary` instead.
"""
self._identity = identity
self._binary = self.canonicalize_path(self.identity.binary)
self._supported_platforms = None
self._PYTHON_INTERPRETER_BY_NORMALIZED_PATH[self._binary] = self
@property
def binary(self):
# type: () -> str
return self._binary
@property
def is_venv(self):
# type: () -> bool
"""Return `True` if this interpreter is homed in a virtual environment."""
return self._identity.prefix != self._identity.base_prefix
@property
def prefix(self):
# type: () -> str
"""Return the `sys.prefix` of this interpreter.
For virtual environments, this will be the virtual environment directory itself.
"""
return self._identity.prefix
class BaseInterpreterResolutionError(Exception):
"""Indicates the base interpreter for a virtual environment could not be resolved."""
def resolve_base_interpreter(self):
# type: () -> PythonInterpreter
"""Finds the base system interpreter used to create a virtual environment.
If this interpreter is not homed in a virtual environment, returns itself.
"""
if not self.is_venv:
return self
# In the case of PyPy, the <base_prefix> dir might contain one of the following:
#
# 1. On a system with PyPy 2.7 series and one PyPy 3.x series
# bin/
# pypy
# pypy3
#
# 2. On a system with PyPy 2.7 series and more than one PyPy 3.x series
# bin/
# pypy
# pypy3
# pypy3.6
# pypy3.7
#
# In both cases, bin/pypy is a 2.7 series interpreter. In case 2 bin/pypy3 could be either
# PyPy 3.6 series or PyPy 3.7 series. In order to ensure we pick the correct base executable
# of a PyPy virtual environment, we always try to resolve the most specific basename first
# to the least specific basename last and we also verify that, if the basename resolves, it
# resolves to an equivalent interpreter. We employ the same strategy for CPython, but only
# for uniformity in the algorithm. It appears to always be the case for CPython that
# python<major>.<minor> is present in any given <prefix>/bin/ directory; so the algorithm
# gets a hit on 1st try for CPython binaries incurring ~no extra overhead.
version = self._identity.version
abi_tag = self._identity.abi_tag
prefix = "pypy" if self._identity.interpreter == "PyPy" else "python"
suffixes = ("{}.{}".format(version[0], version[1]), str(version[0]), "")
candidate_binaries = tuple("{}{}".format(prefix, suffix) for suffix in suffixes)
def iter_base_candidate_binary_paths(interpreter):
# type: (PythonInterpreter) -> Iterator[str]
bin_dir = os.path.join(interpreter._identity.base_prefix, "bin")
for candidate_binary in candidate_binaries:
candidate_binary_path = os.path.join(bin_dir, candidate_binary)
if is_exe(candidate_binary_path):
yield candidate_binary_path
def is_same_interpreter(interpreter):
# type: (PythonInterpreter) -> bool
identity = interpreter._identity
return identity.version == version and identity.abi_tag == abi_tag
resolution_path = [] # type: List[str]
base_interpreter = self
while base_interpreter.is_venv:
resolved = None # type: Optional[PythonInterpreter]
for candidate_path in iter_base_candidate_binary_paths(base_interpreter):
resolved_interpreter = self.from_binary(candidate_path)
if is_same_interpreter(resolved_interpreter):
resolved = resolved_interpreter
break
if resolved is None:
message = [
"Failed to resolve the base interpreter for the virtual environment at "
"{venv_dir}.".format(venv_dir=self._identity.prefix)
]
if resolution_path:
message.append(
"Resolved through {path}".format(
path=" -> ".join(binary for binary in resolution_path)
)
)
message.append(
"Search of base_prefix {} found no equivalent interpreter for {}".format(
base_interpreter._identity.base_prefix, base_interpreter._binary
)
)
raise self.BaseInterpreterResolutionError("\n".join(message))
base_interpreter = resolved_interpreter
resolution_path.append(base_interpreter.binary)
return base_interpreter
@property
def identity(self):
# type: () -> PythonIdentity
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
# type: () -> str
return str(self._identity)
@property
def platform(self):
"""The most specific platform of this interpreter.
:rtype: :class:`Platform`
"""
return next(self._identity.iter_supported_platforms())
@property
def supported_platforms(self):
"""All platforms supported by this interpreter.
:rtype: frozenset of :class:`Platform`
"""
if self._supported_platforms is None:
self._supported_platforms = frozenset(self._identity.iter_supported_platforms())
return self._supported_platforms
def execute(self, args=None, stdin_payload=None, pythonpath=None, env=None, **kwargs):
return self._execute(
self.binary,
args=args,
stdin_payload=stdin_payload,
pythonpath=pythonpath,
env=env,
**kwargs
)
def open_process(self, args=None, pythonpath=None, env=None, **kwargs):
cmd, env = self._create_isolated_cmd(self.binary, args=args, pythonpath=pythonpath, env=env)
process = Executor.open_process(cmd, env=env, **kwargs)
return cmd, process
def __hash__(self):
return hash(self._binary)
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return self._binary == other._binary
def __repr__(self):
return "{type}({binary!r}, {identity!r})".format(
type=self.__class__.__name__, binary=self._binary, identity=self._identity
)
def spawn_python_job(
args, env=None, interpreter=None, expose=None, pythonpath=None, **subprocess_kwargs
):
"""Spawns a python job.
:param args: The arguments to pass to the python interpreter.
:type args: list of str
:param env: The environment to spawn the python interpreter process in. Defaults to the ambient
environment.
:type env: dict of (str, str)
:param interpreter: The interpreter to use to spawn the python job. Defaults to the current
interpreter.
:type interpreter: :class:`PythonInterpreter`
:param expose: The names of any vendored distributions to expose to the spawned python process.
These will be appended to `pythonpath` if passed.
:type expose: list of str
:param pythonpath: The PYTHONPATH to expose to the spawned python process. These will be
pre-pended to the `expose` path if passed.
:type pythonpath: list of str
:param subprocess_kwargs: Any additional :class:`subprocess.Popen` kwargs to pass through.
:returns: A job handle to the spawned python process.
:rtype: :class:`Job`
"""
pythonpath = list(pythonpath or ())
if expose:
subprocess_env = (env or os.environ).copy()
# In order to expose vendored distributions with their un-vendored import paths in-tact, we
# need to set `__PEX_UNVENDORED__`. See: vendor.__main__.ImportRewriter._modify_import.
subprocess_env["__PEX_UNVENDORED__"] = "1"
pythonpath.extend(third_party.expose(expose))
else:
subprocess_env = env
interpreter = interpreter or PythonInterpreter.get()
cmd, process = interpreter.open_process(
args=args, pythonpath=pythonpath, env=subprocess_env, **subprocess_kwargs
)
return Job(command=cmd, process=process)
|
// ax5.ui.calendar
(function () {
const UI = ax5.ui;
const U = ax5.util;
let CALENDAR;
UI.addClass({
className: "calendar"
}, (function () {
/**
* @class ax5calendar
* @classdesc
* @author tom@axisj.com
* @logs
* 2014-06-21 tom : 시작
* @example
* ```js
* ax5.info.months = ["1월","2월","3월","4월","5월","6월","7월","8월","9월","10월","11월"];
* ax5.info.weekNames = [
* {label: "일"},
* {label: "월"},
* {label: "화"},
* {label: "수"},
* {label: "목"},
* {label: "금"},
* {label: "토"}
* ];
*
* var myCalendar = new ax5.ui.calendar({
* control: {
* left: '≪',
* yearTmpl: '%s',
* monthTmpl: '%s',
* right: '≫',
* yearFirst: true
* },
*
* dimensions: {
* itemPadding: 1,
* height: 200
* },
*
* target: document.getElementById("calendar-target"),
* displayDate: (new Date()),
* mode: "day",
* selectMode: "day",
*
* marker: (function () {
* var marker = {};
* marker[_c_date(today, {'return': 'yyyy-MM-dd', 'add': {d: -1}})] = true;
* marker[_c_date(today, {'return': 'yyyy-MM-dd', 'add': {d: 0}})] = true;
* marker[_c_date(today, {'return': 'yyyy-MM-dd', 'add': {d: 1}})] = true;
*
* return marker;
* })(),
* onClick: function () {
* console.log(myCalendar.getSelection());
* },
* onStateChanged: function () {
* console.log(this);
* }
* , multipleSelect: 2
* });
* ```
*/
return function () {
let self = this,
cfg,
selectableCount = 1;
this.instanceId = ax5.getGuid();
this.target = null;
this.selection = [];
this.selectionMap = {};
this.selectableMap = {};
this.markerMap = {};
this.printedDay = {
start: "", end: ""
};
this.config = {
clickEventName: "click",
theme: 'default',
startOfWeek: 0,
mode: 'day', // day|month|year,
dateFormat: 'yyyy-MM-dd',
displayDate: (new Date()),
animateTime: 100,
dimensions: {
controlHeight: '40',
controlButtonWidth: '40',
colHeadHeight: '30',
itemPadding: 2
},
lang: {
yearHeading: "Choose the year",
monthHeading: "Choose the month",
yearTmpl: "%s",
months: ax5.info.months || ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'],
dayTmpl: "%s"
},
multipleSelect: false,
selectMode: 'day',
defaultMarkerTheme: 'holiday',
defaultPeriodTheme: 'period'
};
cfg = this.config;
const onStateChanged = function (opts, that) {
if (opts && opts.onStateChanged) {
opts.onStateChanged.call(that, that);
}
else if (this.onStateChanged) {
this.onStateChanged.call(that, that);
}
that = null;
};
const getFrame = function () {
var
data = jQuery.extend(true, {}, cfg, {
controlCSS: {},
controlButtonCSS: {}
});
data.controlButtonCSS["height"] = data.controlCSS["height"] = U.cssNumber(cfg.dimensions.controlHeight);
data.controlButtonCSS["line-height"] = data.controlCSS["line-height"] = U.cssNumber(cfg.dimensions.controlHeight);
data.controlButtonCSS["width"] = U.cssNumber(cfg.dimensions.controlHeight);
data.controlCSS = U.css(data.controlCSS);
data.controlButtonCSS = U.css(data.controlButtonCSS);
try {
return CALENDAR.tmpl.get.call(this, "frameTmpl", data);
}
finally {
data = null;
}
};
const setDisplay = function () {
var
myDate = U.date(cfg.displayDate),
yy = "",
mm = "",
yy1, yy2;
if (cfg.control) {
if (cfg.mode == "day" || cfg.mode == "d") {
yy = (cfg.control.yearTmpl) ? cfg.control.yearTmpl.replace('%s', myDate.getFullYear()) : myDate.getFullYear();
mm = (cfg.control.monthTmpl) ? cfg.control.monthTmpl.replace('%s', cfg.lang.months[myDate.getMonth()]) : cfg.lang.months[myDate.getMonth()];
this.$["control-display"].html((function () {
if (cfg.control.yearFirst) {
return '<span data-calendar-display="year">' + yy + '</span>' +
'<span data-calendar-display="month">' + mm + '</span>';
}
else {
return '<span data-calendar-display="month">' + mm + '</span>' +
'<span data-calendar-display="year">' + yy + '</span>';
}
})());
}
else if (cfg.mode == "month" || cfg.mode == "m") {
yy = (cfg.control.yearTmpl) ? cfg.control.yearTmpl.replace('%s', myDate.getFullYear()) : myDate.getFullYear();
this.$["control-display"].html('<span data-calendar-display="year">' + yy + '</span>');
}
else if (cfg.mode == "year" || cfg.mode == "y") {
yy1 = (cfg.control.yearTmpl) ? cfg.control.yearTmpl.replace('%s', myDate.getFullYear() - 10) : myDate.getFullYear() - 10;
yy2 = (cfg.control.yearTmpl) ? cfg.control.yearTmpl.replace('%s', Number(myDate.getFullYear()) + 9) : Number(myDate.getFullYear()) + 9;
this.$["control-display"].html(yy1 + ' ~ ' + yy2);
}
this.$["control-display"].find('[data-calendar-display]').on(cfg.clickEventName, (function (e) {
var target = U.findParentNode(e.target, function (target) {
if (target.getAttribute("data-calendar-display")) {
return true;
}
}), mode;
if (target) {
mode = target.getAttribute("data-calendar-display");
this.changeMode(mode);
}
target = null;
mode = null;
}).bind(this));
}
myDate = null;
yy = null;
mm = null;
yy1 = null;
yy2 = null;
return this;
};
const printDay = function (nowDate) {
var
dotDate = U.date(nowDate),
monthStratDate = new Date(dotDate.getFullYear(), dotDate.getMonth(), 1, 12),
_today = cfg.displayDate,
tableStartDate = (function () {
var day = monthStratDate.getDay();
if (day == 0) day = 7;
day -= cfg.startOfWeek;
try {
return U.date(monthStratDate, {add: {d: -day}});
}
finally {
day = null;
}
})(),
loopDate,
thisMonth = dotDate.getMonth(),
itemStyles = {},
i,
k, _k,
frameWidth = this.$["body"].width(),
frameHeight = Math.floor(frameWidth * (6 / 7)), // 1week = 7days, 1month = 6weeks
data,
tmpl;
if (cfg.dimensions.height) {
frameHeight = U.number(cfg.dimensions.height) - U.number(cfg.dimensions.colHeadHeight);
}
itemStyles['height'] = Math.floor(frameHeight / 6) - U.number(cfg.dimensions.itemPadding) * 2 + 'px';
itemStyles['line-height'] = itemStyles['height'];
itemStyles['padding'] = U.cssNumber(cfg.dimensions.itemPadding);
data = {
weekNames: [].concat(ax5.info.weekNames),
list: []
};
if(cfg.startOfWeek) {
data.weekNames = data.weekNames.concat(data.weekNames.slice(0, cfg.startOfWeek)).splice(cfg.startOfWeek);
}
data.weekNames.forEach(function (n) {
n.colHeadHeight = U.cssNumber(cfg.dimensions.colHeadHeight);
});
loopDate = tableStartDate;
i = 0;
while (i < 6) {
k = 0;
while (k < 7) {
_k = (7 + (k - cfg.startOfWeek)) % 7;
var
thisDate = '' + U.date(loopDate, {"return": cfg.dateFormat}),
_date = {
'row': i,
'col': k,
isStartOfWeek: (k == 0),
thisDate: '' + thisDate,
thisDataLabel: cfg.lang.dayTmpl.replace('%s', loopDate.getDate()),
itemStyles: U.css(itemStyles),
addClass: (function () {
var classNames = "";
if (cfg.selectable) {
if (self.selectableMap[thisDate]) {
classNames += ( loopDate.getMonth() == thisMonth ) ? " live" : "";
}
else {
classNames += " disable";
}
}
else {
if(loopDate.getMonth() == thisMonth){
if(thisDate == U.date(_today, {"return": "yyyyMMdd"})){
classNames += " focus";
}else{
classNames += " live";
}
if(loopDate.getDay() == 0){
classNames += " sunday";
}
if(loopDate.getDay() == 6){
classNames += " saturday";
}
}
}
return classNames;
})()
+ ' '
+ (function () {
return (self.markerMap[thisDate]) ? self.markerMap[thisDate].theme || cfg.defaultMarkerTheme : '';
})()
+ ' '
+ (function () {
return (self.selectionMap[thisDate]) ? "selected-day" : '';
})()
};
data.list.push(_date);
k++;
loopDate = U.date(loopDate, {add: {d: 1}});
thisDate = null;
_date = null;
}
i++;
}
tmpl = CALENDAR.tmpl.get.call(this, "dayTmpl", data);
this.$["body"].html(tmpl);
this.$["body"].find('[data-calendar-item-date]').on(cfg.clickEventName, function (e) {
e = e || window.event;
onclick.call(self, e, 'date');
U.stopEvent(e);
});
this.printedDay = {
start: tableStartDate, end: loopDate
};
onStateChanged.call(this, null, {
self: this,
action: "printDay",
printedDay: this.printedDay
});
setDisplay.call(this);
dotDate = null;
monthStratDate = null;
_today = null;
tableStartDate = null;
loopDate = null;
thisMonth = null;
itemStyles = null;
i = null;
k = null;
frameWidth = null;
frameHeight = null;
data = null;
tmpl = null;
};
const printMonth = function (nowDate) {
var
dotDate = U.date(nowDate),
nMonth = dotDate.getMonth(),
itemStyles = {},
i,
k,
m,
tableStartMonth,
frameWidth = this.$["body"].width(),
frameHeight = Math.floor(frameWidth * (6 / 7)),
data,
tmpl;
if (cfg.dimensions.height) {
frameHeight = U.number(cfg.dimensions.height) - U.number(cfg.dimensions.colHeadHeight);
}
itemStyles['height'] = Math.floor(frameHeight / 4) - U.number(cfg.dimensions.itemPadding) * 2 + 'px';
itemStyles['line-height'] = itemStyles['height'];
itemStyles['padding'] = U.cssNumber(cfg.dimensions.itemPadding);
data = {
colHeadHeight: U.cssNumber(cfg.dimensions.colHeadHeight),
colHeadLabel: cfg.lang.monthHeading,
list: []
};
tableStartMonth = 0;
m = 0;
i = 0;
while (i < 4) {
k = 0;
while (k < 3) {
var
_month = {
row: i,
col: k,
isStartOfRow: (k == 0),
thisMonth: dotDate.getFullYear() + '-' + U.setDigit(m + 1, 2) + '-' + U.setDigit(dotDate.getDate(), 2),
thisMonthLabel: cfg.lang.months[m],
itemStyles: U.css(itemStyles),
addClass: (function () {
if (cfg.selectable) {
return (self.selectableMap[m]) ? 'live' : 'disable';
}
else {
return 'live';
}
})()
+ ' '
+ (function () {
return ( m == nMonth ) ? "focus" : "";
})()
+ ' '
+ (function () {
return (self.markerMap[m]) ? self.markerMap[m].theme || cfg.defaultMarkerTheme : '';
})()
};
data.list.push(_month);
m++;
k++;
_month = null;
}
i++;
}
tmpl = CALENDAR.tmpl.get.call(this, "monthTmpl", data);
this.$["body"].html(tmpl);
this.$["body"].find('[data-calendar-item-month]').on(cfg.clickEventName, function (e) {
e = e || window.event;
onclick.call(self, e, 'month');
U.stopEvent(e);
});
this.printedDay = {
start: dotDate.getFullYear() + '-' + U.setDigit(tableStartMonth + 1, 2),
end: dotDate.getFullYear() + '-' + U.setDigit(m, 2)
};
onStateChanged.call(this, null, {
self: this,
action: "printMonth",
printedDay: this.printedDay
});
setDisplay.call(this);
dotDate = null;
nMonth = null;
itemStyles = null;
i = null;
k = null;
m = null;
tableStartMonth = null;
frameWidth = null;
frameHeight = null;
data = null;
tmpl = null;
};
const printYear = function (nowDate) {
var
dotDate = U.date(nowDate),
nYear = dotDate.getFullYear(),
itemStyles = {},
i,
k,
y,
tableStartYear,
frameWidth = this.$["body"].width(),
frameHeight = Math.floor(frameWidth * (6 / 7)),
data,
tmpl;
if (cfg.dimensions.height) {
frameHeight = U.number(cfg.dimensions.height) - U.number(cfg.dimensions.colHeadHeight);
}
itemStyles['height'] = Math.floor(frameHeight / 5) - U.number(cfg.dimensions.itemPadding) * 2 + 'px';
itemStyles['line-height'] = itemStyles['height'];
itemStyles['padding'] = U.cssNumber(cfg.dimensions.itemPadding);
data = {
colHeadHeight: U.cssNumber(cfg.dimensions.colHeadHeight),
colHeadLabel: cfg.lang.yearHeading,
list: []
};
tableStartYear = nYear - 10;
y = nYear - 10;
i = 0;
while (i < 5) {
k = 0;
while (k < 4) {
var
_year = {
row: i,
col: k,
isStartOfRow: (k == 0),
thisYear: y + '-' + U.setDigit(dotDate.getMonth() + 1, 2) + '-' + U.setDigit(dotDate.getDate(), 2),
thisYearLabel: cfg.lang.yearTmpl.replace('%s', (y)),
itemStyles: U.css(itemStyles),
addClass: (function () {
if (cfg.selectable) {
return (self.selectableMap[y]) ? 'live' : 'disable';
}
else {
return 'live';
}
})()
+ ' '
+ (function () {
return ( y == nYear ) ? "focus" : "";
})()
+ ' '
+ (function () {
return (self.selectableMap[y]) ? self.selectableMap[y].theme || cfg.defaultMarkerTheme : '';
})()
};
data.list.push(_year);
y++;
k++;
_year = null;
}
i++;
}
tmpl = CALENDAR.tmpl.get.call(this, "yearTmpl", data);
this.$["body"].html(tmpl);
this.$["body"].find('[data-calendar-item-year]').on(cfg.clickEventName, function (e) {
e = (e || window.event);
onclick.call(this, e, 'year');
U.stopEvent(e);
});
this.printedDay = {
start: tableStartYear, end: y - 1
};
onStateChanged.call(this, null, {
self: this,
action: "printYear",
printedDay: this.printedDay
});
setDisplay.call(this);
dotDate = null;
nYear = null;
itemStyles = null;
i = null;
k = null;
y = null;
tableStartYear = null;
frameWidth = null;
frameHeight = null;
data = null;
tmpl = null;
};
const onclick = function (e, mode, target, value) {
var
removed,
dt,
selectable;
mode = mode || "date";
target = U.findParentNode(e.target, function (target) {
if (target.getAttribute("data-calendar-item-" + mode)) {
return true;
}
});
if (target) {
value = target.getAttribute("data-calendar-item-" + mode);
dt = U.date(value, {"return": cfg.dateFormat});
selectable = true;
selectableCount = (cfg.multipleSelect) ? (U.isNumber(cfg.multipleSelect)) ? cfg.multipleSelect : 2 : 1;
if (cfg.selectable) {
if (!self.selectableMap[dt]) selectable = false;
}
if (mode == "date") {
if (selectable) {
if (self.selection.length >= selectableCount) {
removed = self.selection.splice(0, self.selection.length - (selectableCount - 1));
removed.forEach(function (d) {
self.$["body"].find('[data-calendar-item-date="' + U.date(d, {"return": cfg.dateFormat}) + '"]').removeClass("selected-day");
});
}
jQuery(target).addClass("selected-day");
self.selection.push(value);
if (self.onClick) {
self.onClick.call({
self: this, date: value, target: this.target, dateElement: target
});
}
}
}
else if (mode == "month") {
if (cfg.selectMode == "month") {
if (selectable) {
if (self.selection.length >= selectableCount) {
removed = self.selection.splice(0, self.selection.length - (selectableCount - 1));
removed.forEach(function (d) {
self.$["body"].find('[data-calendar-item-month="' + U.date(d, {"return": 'yyyy-MM-dd'}) + '"]').removeClass("selected-month");
});
}
jQuery(target).addClass("selected-month");
self.selection.push(value);
if (self.onClick) {
self.onClick.call({
self: this, date: value, target: this.target, dateElement: target
});
}
}
}
else {
self.changeMode("day", value);
}
}
else if (mode == "year") {
if (cfg.selectMode == "year") {
if (selectable) {
if (self.selection.length >= selectableCount) {
removed = self.selection.splice(0, self.selection.length - (selectableCount - 1));
removed.forEach(function (d) {
self.$["body"].find('[data-calendar-item-year="' + U.date(d, {"return": 'yyyy-MM-dd'}) + '"]').removeClass("selected-year");
});
}
jQuery(target).addClass("selected-year");
self.selection.push(value);
if (self.onClick) {
self.onClick.call({
self: this, date: value, target: this.target, dateElement: target
});
}
}
}
else {
self.changeMode("month", value);
}
}
}
mode = null;
target = null;
value = null;
removed = null;
dt = null;
selectable = null;
};
const move = function (e, target, value) {
target = U.findParentNode(e.target, function (target) {
if (target.getAttribute("data-calendar-move")) {
return true;
}
});
if (target) {
value = target.getAttribute("data-calendar-move");
if (cfg.mode == "day" || cfg.mode == "d") {
if (value == "left") {
cfg.displayDate = U.date(cfg.displayDate, {add: {m: -1}});
}
else {
cfg.displayDate = U.date(cfg.displayDate, {add: {m: 1}});
}
printDay.call(this, cfg.displayDate);
}
else if (cfg.mode == "month" || cfg.mode == "m") {
if (value == "left") {
cfg.displayDate = U.date(cfg.displayDate, {add: {y: -1}});
}
else {
cfg.displayDate = U.date(cfg.displayDate, {add: {y: 1}});
}
printMonth.call(this, cfg.displayDate);
}
else if (cfg.mode == "year" || cfg.mode == "y") {
if (value == "left") {
cfg.displayDate = U.date(cfg.displayDate, {add: {y: -10}});
}
else {
cfg.displayDate = U.date(cfg.displayDate, {add: {y: 10}});
}
printYear.call(this, cfg.displayDate);
}
}
target = null;
value = null;
};
const applyMarkerMap = function () {
setTimeout((function () {
if (cfg.mode === "day" || cfg.mode === "d") {
for (var k in this.markerMap) {
this.$["body"].find('[data-calendar-item-date="' + k + '"]').addClass(this.markerMap[k].theme || cfg.defaultMarkerTheme);
}
}
}).bind(this));
};
const applySelectionMap = function () {
setTimeout((function () {
for (var k in this.selectionMap) {
this.$["body"].find('[data-calendar-item-date="' + k + '"]').addClass("selected-day");
}
}).bind(this));
};
const applyPeriodMap = function () {
setTimeout((function () {
if (cfg.mode === "day" || cfg.mode === "d") {
for (var k in this.periodMap) {
if (this.periodMap[k].label) {
this.$["body"].find('[data-calendar-item-date="' + k + '"]').find(".addon-footer").html(this.periodMap[k].label);
}
this.$["body"].find('[data-calendar-item-date="' + k + '"]').addClass(this.periodMap[k].theme);
}
}
}).bind(this));
};
const clearPeriodMap = function () {
if (cfg.mode === "day" || cfg.mode === "d") {
for (var k in this.periodMap) {
this.$["body"].find('[data-calendar-item-date="' + k + '"]').find(".addon-footer").empty();
this.$["body"].find('[data-calendar-item-date="' + k + '"]').removeClass(this.periodMap[k].theme);
}
}
};
/**
* Preferences of calendar UI
* @method ax5calendar.setConfig
* @param {Object} config - 클래스 속성값
* @param {Element|nodelist} config.target
* @param {String} [config.mode=day|month|year]
* @param {Function} [config.onClick}
* @returns {ax5calendar}
* @example
* ```js
* var myCalendar = new ax5.ui.calendar();
* myCalendar.setConfig({
* target: $("#target"),
* mode: "day"
* });
* ```
*/
//== class body start
this.init = function () {
// after setConfig();
this.onStateChanged = cfg.onStateChanged;
this.onClick = cfg.onClick;
if (!cfg.target) {
console.log(ax5.info.getError("ax5calendar", "401", "setConfig"));
}
this.target = jQuery(cfg.target);
cfg.displayDate = U.date(cfg.displayDate);
this.target.html(getFrame.call(this));
// 파트수집
this.$ = {
"root": this.target.find('[data-calendar-els="root"]'),
"control": this.target.find('[data-calendar-els="control"]'),
"control-display": this.target.find('[data-calendar-els="control-display"]'),
"body": this.target.find('[data-calendar-els="body"]')
};
if (cfg.control) {
this.$["root"].on(cfg.clickEventName, '[data-calendar-move]', (function (e) {
move.call(this, e || window.event);
}).bind(this));
}
// collect selectableMap
if (cfg.selection) {
this.setSelection(cfg.selection, false);
}
// collect selectableMap
if (cfg.selectable) {
this.setSelectable(cfg.selectable, false);
}
// collect markerMap
if (cfg.marker) {
this.setMarker(cfg.marker, false);
}
setTimeout((function () {
if (cfg.mode === "day" || cfg.mode === "d") {
printDay.call(this, cfg.displayDate);
}
else if (cfg.mode === "month" || cfg.mode === "m") {
printMonth.call(this, cfg.displayDate);
}
else if (cfg.mode === "year" || cfg.mode === "y") {
printYear.call(this, cfg.displayDate);
}
}).bind(this));
};
/**
* @method ax5calendar.changeMode
* @param {String} mode
* @param {String} changeDate
* @returns {ax5calendar}
*/
this.changeMode = function (mode, changeDate) {
if (typeof changeDate != "undefined") cfg.displayDate = changeDate;
if (mode) cfg.mode = mode;
this.$["body"].removeClass("fadein").addClass("fadeout");
setTimeout((function () {
if (cfg.mode == "day" || cfg.mode == "d") {
printDay.call(this, cfg.displayDate);
}
else if (cfg.mode == "month" || cfg.mode == "m") {
printMonth.call(this, cfg.displayDate);
}
else if (cfg.mode == "year" || cfg.mode == "y") {
printYear.call(this, cfg.displayDate);
}
this.$["body"].removeClass("fadeout").addClass("fadein");
}).bind(this), cfg.animateTime);
return this;
};
/**
* @method ax5calendar.setSelection
* @param {Array} selection
* @returns {ax5calendar}
* @example
* ```
*
* ```
*/
this.setSelection = (function () {
self.selectionMap = {};
var processor = {
'arr': function (v, map, count) {
map = {};
if (!U.isArray(v)) return map;
self.selection = v = v.splice(0, count);
v.forEach(function (n) {
if (U.isDate(n))
n = U.date(n, {'return': cfg.dateFormat});
map[n] = true;
});
return map;
}
};
return function (selection, isPrint) {
var
result = {}
;
selectableCount = (cfg.multipleSelect) ? (U.isNumber(cfg.multipleSelect)) ? cfg.multipleSelect : 2 : 1;
if (cfg.selection = selection) {
if (U.isArray(selection)) {
result = processor.arr(selection, {}, selectableCount);
} else {
return this;
}
}
this.selectionMap = jQuery.extend({}, result);
// 변경내용 적용하여 출력
if (isPrint !== false) applySelectionMap.call(this);
result = null;
return this;
};
})();
/**
* @method ax5calendar.getSelection
*/
this.getSelection = function () {
return this.selection;
};
/**
* @method ax5calendar.setSelectable
*/
this.setSelectable = (function () {
self.selectableMap = {};
var processor = {
'arr': function (v, map) {
map = {};
if (!U.isArray(v)) return map;
v.forEach(function (n) {
if (U.isDate(n))
n = U.date(n, {'return': cfg.dateFormat});
map[n] = true;
});
return map;
},
'obj': function (v, map) {
map = {};
if (U.isArray(v)) return map;
if (v.range) return map;
for (var k in v) {
map[k] = v[k];
}
return map;
},
'range': function (v, map) {
map = {};
if (U.isArray(v)) return map;
if (!v.range) return map;
v.range.forEach(function (n) {
if (U.isDateFormat(n.from) && U.isDateFormat(n.to)) {
for (var d = U.date(n.from); d <= U.date(n.to); d.setDate(d.getDate() + 1)) {
map[U.date(d, {"return": cfg.dateFormat})] = true;
}
}
else {
for (var i = n.from; i <= n.to; i++) {
map[i] = true;
}
}
});
return map;
}
};
return function (selectable, isPrint) {
var
key,
result = {}
;
if (cfg.selectable = selectable) {
if (U.isArray(selectable)) {
result = processor.arr(selectable);
}
else {
for (key in processor) {
if (selectable[key]) {
result = processor[key](selectable);
break;
}
}
if (Object.keys(result).length === 0) {
result = processor.obj(selectable);
}
}
}
this.selectableMap = result;
// 변경내용 적용하여 출력
if (isPrint !== false) this.changeMode();
return this;
};
})();
/**
* @method ax5calendar.setMarker
*/
this.setMarker = (function () {
self.markerMap = {};
var processor = {
'obj': function (v, map) {
map = {};
if (U.isArray(v)) return map;
if (v.range) return map;
for (var k in v) {
map[k] = v[k];
}
v = null;
return map;
},
'range': function (v, map) {
map = {};
if (U.isArray(v)) return map;
if (!v.range) return map;
v.range.forEach(function (n) {
if (U.isDateFormat(n.from) && U.isDateFormat(n.to)) {
for (var d = U.date(n.from); d <= U.date(n.to); d.setDate(d.getDate() + 1)) {
map[U.date(d, {"return": cfg.dateFormat})] = {theme: n.theme, label: n.label};
}
}
else {
for (var i = n.from; i <= n.to; i++) {
map[i] = {theme: n.theme, label: n.label};
}
}
});
v = null;
return map;
}
};
return function (marker, isApply) {
var
key,
result = {}
;
if (cfg.marker = marker) {
for (key in processor) {
if (marker[key]) {
result = processor[key](marker);
break;
}
}
if (Object.keys(result).length === 0) {
result = processor.obj(marker);
}
}
this.markerMap = result;
// 변경내용 적용하여 출력
if (isApply !== false) applyMarkerMap.call(this);
return this;
};
})();
/**
* @method ax5calendar.setPeriod
*/
this.setPeriod = (function () {
self.periodMap = {};
var processor = {
'range': function (v, map) {
map = {};
if (U.isArray(v)) return map;
if (!v.range) return map;
v.range.forEach(function (n) {
if (U.isDateFormat(n.from) && U.isDateFormat(n.to)) {
for (var d = new Date(U.date(n.from)); d <= U.date(n.to); d.setDate(d.getDate() + 1)) {
if (d.getTime() == U.date(n.from).getTime()) {
map[U.date(d, {"return": cfg.dateFormat})] = {theme: n.theme || cfg.defaultPeriodTheme, label: n.fromLabel};
} else if (d.getTime() == U.date(n.to).getTime()) {
map[U.date(d, {"return": cfg.dateFormat})] = {theme: n.theme || cfg.defaultPeriodTheme, label: n.toLabel};
} else {
map[U.date(d, {"return": cfg.dateFormat})] = {theme: n.theme || cfg.defaultPeriodTheme};
}
}
}
});
v = null;
return map;
}
};
return function (period, isApply) {
var
key,
result = {}
;
// 변경내용 적용하여 출력
if (isApply !== false) {
clearPeriodMap.call(this);
}
if (cfg.period = period) {
result = processor.range(period);
}
this.periodMap = result;
//console.log(this.periodMap);
// 변경내용 적용하여 출력
if (isApply !== false) {
applyPeriodMap.call(this);
}
return this;
};
})();
// 클래스 생성자
this.main = (function () {
UI.calendar_instance = UI.calendar_instance || [];
UI.calendar_instance.push(this);
if (arguments && U.isObject(arguments[0])) {
this.setConfig(arguments[0]);
}
}).apply(this, arguments);
};
})());
CALENDAR = ax5.ui.calendar;
})();
|
/* permutation/gsl_permute_vector_double.h
*
* Copyright (C) 1996, 1997, 1998, 1999, 2000, 2007 Brian Gough
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __GSL_PERMUTE_VECTOR_DOUBLE_H__
#define __GSL_PERMUTE_VECTOR_DOUBLE_H__
#include <stdlib.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_permutation.h>
#include <gsl/gsl_vector_double.h>
#undef __BEGIN_DECLS
#undef __END_DECLS
#ifdef __cplusplus
# define __BEGIN_DECLS extern "C" {
# define __END_DECLS }
#else
# define __BEGIN_DECLS /* empty */
# define __END_DECLS /* empty */
#endif
__BEGIN_DECLS
int gsl_permute_vector (const gsl_permutation * p, gsl_vector * v);
int gsl_permute_vector_inverse (const gsl_permutation * p, gsl_vector * v);
__END_DECLS
#endif /* __GSL_PERMUTE_VECTOR_DOUBLE_H__ */
|
import sys
import time
from collections import defaultdict
import numpy as np
import psutil
import ray
import scipy.signal
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
num_trials = 5
# Count the number of physical CPUs.
num_cpus = psutil.cpu_count(logical=False)
print('Using {} cores.'.format(num_cpus))
ray.init(num_cpus=num_cpus)
################################################
###### Benchmark 1: numerical computation ######
################################################
@ray.remote
def f(image, random_filter):
# Do some image processing.
return scipy.signal.convolve2d(image, random_filter)[::5, ::5]
filters = [np.random.normal(size=(4, 4)) for _ in range(num_cpus)]
def run_benchmark():
image = np.zeros((3000, 3000))
image_id = ray.put(image)
ray.get([f.remote(image_id, filters[i]) for i in range(num_cpus)])
# Run it a couple times to warm up the Ray object store because the initial
# memory accesses are slower.
[run_benchmark() for _ in range(5)]
durations1 = []
for _ in range(num_trials):
start_time = time.time()
run_benchmark()
duration1 = time.time() - start_time
durations1.append(duration1)
print('Numerical computation workload took {} seconds.'.format(duration1))
###############################################
###### Benchmark 2: stateful computation ######
###############################################
@ray.remote
class StreamingPrefixCount(object):
def __init__(self):
self.prefix_count = defaultdict(int)
self.popular_prefixes = set()
def add_document(self, document):
for word in document:
for i in range(1, len(word)):
prefix = word[:i]
self.prefix_count[prefix] += 1
if self.prefix_count[prefix] > 3:
self.popular_prefixes.add(prefix)
def get_popular(self):
return self.popular_prefixes
durations2 = []
for _ in range(num_trials):
streaming_actors = [StreamingPrefixCount.remote() for _ in range(num_cpus)]
start_time = time.time()
for i in range(num_cpus * 10):
document = [np.random.bytes(20) for _ in range(10000)]
streaming_actors[i % num_cpus].add_document.remote(document)
# Aggregate all of the results.
results = ray.get([actor.get_popular.remote() for actor in streaming_actors])
popular_prefixes = set()
for prefixes in results:
popular_prefixes |= prefixes
duration2 = time.time() - start_time
durations2.append(duration2)
print('Stateful computation workload took {} seconds.'.format(duration2))
###################################################
###### Benchmark 3: expensive initialization ######
###################################################
mnist = tf.keras.datasets.mnist.load_data()
x_train, y_train = mnist[0]
x_train = x_train / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model.
model.fit(x_train, y_train, epochs=1)
# Save the model to disk.
filename = '/tmp/model'
model.save(filename)
@ray.remote
class Model(object):
def __init__(self, i):
# Pin the actor to a specific core if we are on Linux to prevent
# contention between the different actors since TensorFlow uses
# multiple threads.
if sys.platform == 'linux':
psutil.Process().cpu_affinity([i])
# Load the model and some data.
self.model = tf.keras.models.load_model(filename)
mnist = tf.keras.datasets.mnist.load_data()
self.x_test = mnist[1][0] / 255.0
def evaluate_next_batch(self):
# Note that we reuse the same data over and over, but in a
# real application, the data would be different each time.
return self.model.predict(self.x_test)
def ping(self):
pass
actors = [Model.remote(i) for i in range(num_cpus)]
# Make sure the actors have started.
ray.get([actor.ping.remote() for actor in actors])
durations3 = []
for _ in range(num_trials):
start_time = time.time()
# Parallelize the evaluation of some test data.
for j in range(10):
results = ray.get([actor.evaluate_next_batch.remote() for actor in actors])
duration3 = time.time() - start_time
durations3.append(duration3)
print('Expensive initialization workload took {} seconds.'.format(duration3))
print('Used {} cores.'.format(num_cpus))
print("""
Results:
- Numerical computation: {} +/- {}
- Stateful computation: {} +/- {}
- Expensive initialization: {} +/- {}
""".format(np.mean(durations1), np.std(durations1),
np.mean(durations2), np.std(durations2),
np.mean(durations3), np.std(durations3)))
|
import datetime
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import ArrayField
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
class User(AbstractUser):
STUDENT = 'student'
TUTOR = 'tutor'
TYPES = [
(STUDENT, 'Student'),
(TUTOR, 'Tutor'),
]
type = models.CharField(max_length=20, choices=TYPES)
@property
def profile(self):
if self.type == self.STUDENT:
return self.student
elif self.type == self.TUTOR:
return self.tutor
return None
class ProfilePicture(models.Model):
user = models.OneToOneField('accounts.User', on_delete=models.CASCADE, related_name='picture')
image = models.BinaryField()
class Profile(models.Model):
user = models.OneToOneField('accounts.User', on_delete=models.CASCADE, related_name='%(class)s')
locations = models.ManyToManyField('accounts.Location', related_name='%(class)ss')
date_of_birth = models.DateField(blank=True, null=True)
gender = models.CharField(max_length=50, blank=True)
@property
def date_of_birth_datetime(self):
return datetime.datetime.combine(self.date_of_birth, datetime.datetime.min.time())
@date_of_birth_datetime.setter
def date_of_birth_datetime(self, date_of_birth):
self.date_of_birth = date_of_birth.date()
class Meta:
abstract = True
def __str__(self):
return f"{self.user}"
class Student(Profile):
tutors = models.ManyToManyField('accounts.Tutor', related_name='students')
def __str__(self):
return f"{self.user}"
class Tutor(Profile):
HIGH_SCHOOL = "HIGH_SCHOOL"
BACHELOR = "BACHELOR"
MASTER = "MASTER"
PHD = "PHD"
LEVEL_CHOICES = [
(HIGH_SCHOOL, "High school"),
(BACHELOR, "Bachelor"),
(MASTER, "Master"),
(PHD, "PhD"),
]
hourly_rate = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
subjects = ArrayField(
ArrayField(
models.CharField(max_length=100),
size=2,
default=list,
),
default=list,
)
available = models.BooleanField(blank=True, default=True)
@property
def subject_dicts(self):
return list(map(
lambda subject: {'subject': subject[0], 'level': subject[1]},
self.subjects,
))
@subject_dicts.setter
def subject_dicts(self, subject_dicts):
self.subjects = list(map(
lambda subject: [subject['subject'], subject['level']],
subject_dicts,
))
def __str__(self):
return f"{self.user}"
class Location(models.Model):
address = models.CharField(max_length=255)
google_id = models.CharField(max_length=255)
latitude = models.FloatField()
longitude = models.FloatField()
location = models.PointField(geography=True)
def save(self, *args, **kwargs):
self.location = Point(self.longitude, self.latitude, srid=4326)
super().save(*args, **kwargs)
def __str__(self):
return f"{self.address}"
|
import networkx as nx, math, sys, time
verbosed_flag = False
class AnytimeSCANGraph(nx.Graph):
'''Specific Graph Class for Anytime SCAN algorithm
attributes:
untouched_nodes: set, initial value includes all nodes
unprocessed_nodes: set, includes all nodes from which it has not expanded, initial value is empty set
processed_nodes: set, includes all nodes from which it has expanded, initial value is empty set
core_nodes: set, it includes all (processed | unprocessed) core nodes, initial value is empty set. it's used
for obtaining clustering results easiy
core_subgraph: nx.Graph, is the subgraph obtained by core_nodes and it only includes the edges with
SS >= epsilon between core nodes
cluster_num: int, current cluster numbers
degree_map: dict, key is the degree and value is the set of nodes with corresponding degree value
degree here does not count the node itself. This dict is used from active learning,
it only include the unprocessed nodes
'''
def __init__(self, filename = None):
'''
initialize the undirected unweighted graph from a edge list file
:param filename: edge list file, each line represents an edge, with format "node1 node2"
'''
super(AnytimeSCANGraph, self).__init__()
if filename == None: return
fgraph = open(filename)
for line in fgraph:
tokens = line.strip().split()
self.add_edge(tokens[0], tokens[1])
self.edge[tokens[0]][tokens[1]]['SS'] = -1
print('num of nodes: %d; num of edges: %d.' % (self.number_of_nodes(), self.number_of_edges()))
self.untouched_nodes = set()
self.unprocessed_nodes = set()
self.processed_nodes = set()
self.core_nodes = set()
'''cluster_num is not used in this version'''
self.cluster_num = 0
'''degree_map is used for active learning, which only record the deree distribution of unprocessed nodes'''
self.degree_map = dict()
self.core_subgraph = None
'''low and high are used for active learning'''
self.low = 9999999
self.high = 0
'''candidate_nodes_AL is core candiate for active learing'''
self.candidate_nodes_AL = None
'''candidate_nodes_AL_non_core is non core candiate for active learing'''
self.candidate_nodes_AL_non_core = None
'''initialize the status of nodes'''
for node in self.nodes():
self.node[node]['category'] = 'noise'
self.node[node]['clusterID'] = -1
self.node[node]['numStrongEdges'] = 0
self.untouched_nodes.add(node)
self.degree_map.setdefault(self.degree(node), set()).add(node)
def anytime_scan(self, dataset_name, epsilon, mu, threshold = 100, isHeuristic = True, isRandom_AL = False, true_label_filename = None):
'''
Perform anytime SCAN,
:param dataset_name:
:param epsilon and mu: two parameters in SCAN
:param threshold: stop condition for active anytime SCAN. It can be the num of active learning picks or running time.
Now we only implements the version of num of picks by active learning
:param isHeuristic: if apply heuristic in initial clustering. The default value is True, i.e. apply heuristic in initia clustering
:param isRandom_AL: if apply random strategy or high-low strategy in active learning. The default value is False, i.e. apply high-low stratedy in active learning
:param true_label_filename: the file name store the true label of each node.
The default value is None, and does not calculate the ARI and NMI;
Otherwise, calculatet the ARI and NMI
:return: None
'''
print('Entering anytime_scan()...')
print('dataset: %s; isHeuristic: %s; isRandom_AL: %s; epsilon: %f; mu: %d; theshold: %d.' % (dataset_name, isHeuristic, isRandom_AL, epsilon, mu, threshold))
filename = '%s_results_%s_%s_%.2f_%d_%d.txt' % (dataset_name, isHeuristic, isRandom_AL, epsilon, mu, threshold)
fout = open(filename, 'w')
line = 'Iter\tARI\tNMI\tModu\tnumSS\tnumCore\ttime\ttotal_time\tlow\thigh\n'
fout.write(line)
true_cluster_dict = None
if true_label_filename != None:
true_cluster_dict = self.get_true_cluster(true_label_filename)
start = time.clock()
if isHeuristic:
self.get_initial_clustering_heuristic(epsilon, mu)
else:
self.get_initial_clustering(epsilon, mu)
end = time.clock()
total_time = float(end- start)
print('current clusers after initial:')
start = time.clock()
initial_clusters, num_SS, num_Core = self.output_current_clustering_result(epsilon)
end = time.clock()
print('time for initial clustering: %f.' % (total_time + end - start, ))
ARI = -1
NMI = -1
if true_label_filename != None:
ARI, NMI = self.calculate_metrics_current(true_cluster_dict, initial_clusters)
print('ARI: %f; NMI: %f.' % (ARI, NMI))
modularity = self.calculate_modularity_current(initial_clusters)
print('current modularity: %f.' % modularity)
line = '%d\t%.4f\t%.4f\t%.4f\t%d\t%d\t%.4f\t%.4f\t%d\t%d\n' % (0, ARI, NMI, modularity, num_SS, num_Core, total_time, (total_time + end - start), self.low, self.high)
fout.write(line)
counter = 0
while counter < threshold and len(self.unprocessed_nodes) > 0:
'''Pick an unprocessed node by active learning and expand from it'''
'''Perform the active learning based on the selected strategy'''
start = time.clock()
if not isRandom_AL:
next_seed = self.active_learning(mu)
if next_seed == None:
print('No seed can be selected in active learning. done!!!')
break
print('picked seed:',next_seed)
if not isRandom_AL:
self.expand_onehop_from_node(next_seed, epsilon, mu)
'''move the seed from unprocessed to processed'''
'''active_learning_random() will remove seed from unprocessed, so we need check if unprocessed node have the seed before remove it'''
if next_seed in self.unprocessed_nodes:
self.unprocessed_nodes.remove(next_seed)
self.processed_nodes.add(next_seed)
counter += 1
end = time.clock()
total_time += (end - start)
print('in active learning %d' % (counter,))
if counter % 1000 == 0:
print('current clusers:')
start = time.clock()
clusters, num_SS, num_Core = self.output_current_clustering_result(epsilon)
end = time.clock()
print('time neded: %f.' % (total_time + end - start,))
if true_label_filename != None:
ARI, NMI = self.calculate_metrics_current(true_cluster_dict, clusters)
print('ARI: %f; NMI: %f.' % (ARI, NMI))
modularity = self.calculate_modularity_current(clusters)
print('current modularity: %f.' % modularity)
line = '%d\t%.4f\t%.4f\t%.4f\t%d\t%d\t%.4f\t%.4f\t%d\t%d\n' % (
counter, ARI, NMI, modularity, num_SS, num_Core, total_time, (total_time + end - start), self.low,
self.high)
fout.write(line)
if int(NMI) == 1:
break
print('Anytime SCAN done!!!')
print('Num of active learning: %d' % (counter,))
print('final clusers:')
start = time.clock()
clusters, num_SS, num_Core = self.output_final_clustering_result(epsilon)
end = time.clock()
print('time neded: %f.' % (total_time + end - start,))
'''
total_num = 0
for key in clusters:
print(str(key), ':', sorted(clusters[key]))
total_num += len(clusters[key])
print('total_num in clustering results: %d' % (total_num,))
'''
if true_label_filename != None:
ARI, NMI = self.calculate_metrics_final(true_cluster_dict)
print('ARI: %f; NMI: %f.' % (ARI, NMI))
modularity = self.calculate_modularity_final()
print('final modularity: %f.' % modularity)
print('finihed!')
line = '%d\t%.4f\t%.4f\t%.4f\t%d\t%d\t%.4f\t%.4f\t%d\t%d\n' % (counter, ARI, NMI, modularity, num_SS, num_Core, total_time, (total_time + end - start), self.low, self.high)
fout.write(line)
fout.close()
'''output the core / noise'''
filename = 'node_info_%s_results_%s_%s_%.2f_%d_%d.txt' % (dataset_name, isHeuristic, isRandom_AL, epsilon, mu, threshold)
self.print_nodes(filename)
def original_scan(self, dataset_name, epsilon, mu, true_label_filename = None):
'''
:param dataset_name: dataset name, used for name result file
:param epsilon:
:param mu:
:param true_label_filename: ground truth label fille, used for coputing ARI and NMI, if applicable
:return:
'''
print('Entering original_scan()...')
print('dataset: %s; epsilon: %.4f; mu: %d.' % (dataset_name, epsilon, mu))
'''unprocessed nodes: nodes which have no expanded from
processed nodes: nodes which have expanded from
'''
self.unprocessed_nodes |= self.untouched_nodes
self.untouched_nodes.clear()
print('num unprocessed: %d; num processed: %d.' % (len(self.unprocessed_nodes), len(self.processed_nodes)))
self.cluster_num = 0
num_of_cores = 0
seed_pool = set()
start = time.clock()
while(len(self.unprocessed_nodes) > 0):
seed = self.unprocessed_nodes.pop()
seed_pool.add(seed)
'''a flag indicate if a new cluster is constructed'''
has_new_cluster = False
while(len(seed_pool) > 0):
expand_seed = seed_pool.pop()
self.processed_nodes.add(expand_seed)
if expand_seed in self.unprocessed_nodes:
self.unprocessed_nodes.remove(expand_seed)
if verbosed_flag:
print('seed: '+ expand_seed)
adj_list = nx.all_neighbors(self, expand_seed)
'''compute / obtain the edges' SS for enighbors'''
for one_neighbor in adj_list:
'''
if self.edge[expand_seed][one_neighbor]['SS'] == -1:
ss = self.calculate_SS_scan(expand_seed, one_neighbor)
self.edge[expand_seed][one_neighbor]['SS'] = ss
else:
ss = self.edge[expand_seed][one_neighbor]['SS']
'''
ss = self.calculate_SS_scan(expand_seed, one_neighbor)
self.edge[expand_seed][one_neighbor]['SS'] = ss
if ss >= epsilon:
self.node[expand_seed]['numStrongEdges'] += 1
'''expand seed is a core'''
if self.node[expand_seed]['numStrongEdges'] + 1 >= mu:
has_new_cluster = True
self.node[expand_seed]['category'] = 'core'
self.node[expand_seed]['clusterID'] = self.cluster_num
num_of_cores += 1
if verbosed_flag:
print('new core: ' + expand_seed)
'''add neighbors in the seed pool'''
adj_list = nx.all_neighbors(self, expand_seed)
for one_neighbor in adj_list:
'''add all un-expanded neighbors wih SS >= epsilon into the seed pool'''
if one_neighbor not in self.processed_nodes and self.edge[expand_seed][one_neighbor]['SS'] >= epsilon:
seed_pool.add(one_neighbor)
self.node[one_neighbor]['clusterID'] = self.cluster_num
if verbosed_flag:
print('add neighbor: ' + one_neighbor)
if has_new_cluster:
self.cluster_num += 1
'''end for expanding from one seed'''
'''end for all expanding'''
final_clusters = self.get_final_clustering_result_scan()
print('final clusters:')
print('num of clusters: %d; num of cores: %d.' % (self.cluster_num, num_of_cores))
end = time.clock()
print('time needed: %.4f.' % (end - start, ))
if true_label_filename != None:
true_cluster_dict = self.get_true_cluster(true_label_filename)
ARI, NMI = self.calculate_metrics_current(true_cluster_dict, final_clusters)
print('ARI: %f; NMI: %f.' % (ARI, NMI))
modularity = self.calculate_modularity_current(final_clusters)
print('current modularity: %f.' % modularity)
'''output the core / noise'''
filename = 'scan_results_node_info_%s_%.2f_%d.txt' % (dataset_name, epsilon, mu)
self.print_nodes(filename)
def expand_onehop_from_node(self, next_seed, epsilon, mu):
'''
Expand one hop from next_seed obained by active learning
:param next_seed:
:param epsilon:
:param mu:
:return: None
'''
adj_list = nx.all_neighbors(self, next_seed)
for one_neighbor in adj_list:
'''only calculate the edge with neighbors i unprocessed nodes'''
if one_neighbor in self.unprocessed_nodes:
ss = self.calculate_SS(next_seed, one_neighbor, epsilon)
self.edge[next_seed][one_neighbor]['SS'] = ss
if ss >= epsilon:
self.node[next_seed]['numStrongEdges'] += 1
self.node[one_neighbor]['numStrongEdges'] += 1
if self.node[one_neighbor]['numStrongEdges'] + 1 >= mu and self.node[one_neighbor]['category'] != 'core':
self.node[one_neighbor]['category'] = 'core'
self.core_nodes.add(one_neighbor)
'''check if the new core degree will modify the low and high'''
if self.degree(one_neighbor) < self.low:
for key in range(self.degree(one_neighbor), self.low):
if key in self.degree_map:
self.candidate_nodes_AL |= (self.degree_map[key] & self.core_nodes)
self.candidate_nodes_AL_non_core |= (self.degree_map[key] - self.core_nodes)
self.low = self.degree(one_neighbor)
if self.degree(one_neighbor) > self.high:
for key in range(self.high + 1, self.degree(one_neighbor) + 1):
if key in self.degree_map:
self.candidate_nodes_AL |= (self.degree_map[key] & self.core_nodes)
self.candidate_nodes_AL_non_core |= (self.degree_map[key] - self.core_nodes)
self.high = self.degree(one_neighbor)
'''End of for loop'''
if self.node[next_seed]['numStrongEdges'] + 1 >= mu:
self.node[next_seed]['category'] = 'core'
self.core_nodes.add(next_seed)
def update_core_subgraph(self, new_core, epsilon):
'''
Insert the new_core and edges with SS > epsilon into the current core_subgraph
:param new_core:
:param epsilon:
:return:
'''
self.core_subgraph.add_node(new_core)
adj_list = set(nx.all_neighbors(self, new_core))
common_cores = adj_list.intersection(self.core_nodes)
if verbosed_flag:
print('new core: ', new_core)
print('common cores:', common_cores)
for core in common_cores:
if self[new_core][core]['SS'] >= epsilon and core in self.unprocessed_nodes:
self.core_subgraph.add_edge(new_core, core)
if verbosed_flag: print('insert edge: %s, %s' % (new_core, core))
def active_learning(self, mu):
'''
In the firs time pick of active learning, construct the set of candidate_nodes_AL_non_core and core candidate_nodes_AL with degree in [low, high]
First active learning strategy: non-core nodes in the candidate set
Second active learning strategy: core candidate set
:return: the picked node
'''
if verbosed_flag: print('Enering active_learning')
'''In the 1st active learning, initialize the self.candidate_nodes_AL and self.candidate_nodes_AL_non_core'''
if self.candidate_nodes_AL == None:
for core in self.core_nodes:
if self.degree(core) < self.low: self.low = self.degree(core)
if self.degree(core) > self.high: self.high = self.degree(core)
self.candidate_nodes_AL = set()
for key in range(self.low, self.high + 1):
if key in self.degree_map:
self.candidate_nodes_AL |= self.degree_map[key]
self.candidate_nodes_AL_non_core = self.candidate_nodes_AL - self.core_nodes
self.candidate_nodes_AL -= self.candidate_nodes_AL_non_core
print('core degree: low: %d; high: %d' % (self.low, self.high))
if self.low > self.high:
print('low > high, stop the programe')
return None
if len(self.candidate_nodes_AL_non_core) > 0:
print('perform step 1 in active learning')
return self.candidate_nodes_AL_non_core.pop()
if len( self.candidate_nodes_AL) > 0:
print('perform step 2 in active learning')
return self.candidate_nodes_AL.pop()
'''need expand the candidate degrees'''
while len(self.candidate_nodes_AL_non_core) == 0 and len( self.candidate_nodes_AL) == 0:
if self.low >= mu:
if (self.low - 1) in self.degree_map:
self.candidate_nodes_AL |= (self.degree_map[self.low - 1] & self.core_nodes)
self.candidate_nodes_AL_non_core |= (self.degree_map[self.low - 1] - self.core_nodes)
self.low -= 1
if (self.high + 1) <= max(self.degree_map):
if (self.high + 1) in self.degree_map:
self.candidate_nodes_AL |= (self.degree_map[self.high + 1] & self.core_nodes)
self.candidate_nodes_AL_non_core |= (self.degree_map[self.high + 1] - self.core_nodes)
self.high += 1
else:
break
if len(self.candidate_nodes_AL_non_core) > 0:
print('perform step 3 in active learning')
return self.candidate_nodes_AL_non_core.pop()
if len( self.candidate_nodes_AL) > 0:
print('perform step 4 in active learning')
return self.candidate_nodes_AL.pop()
return None
def get_initial_clustering_heuristic(self, epsilon, mu):
'''
To calcluate the initial clustering in anytime SCAN:
First, construct a node set with degrees >= (mu - 1)
Then, repeatedly pick node randomly from untouched candidate nodes, expand one-hop to get initial clustering.
:param epsilon: SS threshold
:param mu: minimal number of strong edges
:return: None
'''
if verbosed_flag: print('Enering get_initial_clustering_heuristic')
sorted_degree_list = sorted(self.degree_map)
candidate_low = mu - 1
candidate_high = sorted_degree_list[len(sorted_degree_list) - 1]
print('candidate_low: %d; candidate_high: %d' % (candidate_low, candidate_high))
candidate_nodes_initial = set()
candidate_nodes_bak = set()
for key in range(candidate_low, candidate_high+1):
if key in self.degree_map:
candidate_nodes_initial |= self.degree_map[key]
candidate_nodes_bak |= self.degree_map[key]
'''count is for debug use'''
count =0
while len(candidate_nodes_initial) > 0:
expand_seed = candidate_nodes_initial.pop()
self.untouched_nodes.remove(expand_seed)
self.expand_onehop_from_node_initial_heuristic(candidate_nodes_initial, expand_seed, epsilon, mu)
'''move the seed node to processed_nodes and remove it from degree_map'''
self.processed_nodes.add(expand_seed)
self.degree_map[self.degree(expand_seed)].remove(expand_seed)
'''remove the empty degre/nodes pair'''
if len(self.degree_map[self.degree(expand_seed)]) ==0:
del self.degree_map[self.degree(expand_seed)]
if verbosed_flag:
print('counter:',count,'picked node:',expand_seed)
self.print_nodes()
self.print_edges()
count += 1
if len(self.untouched_nodes) > 0:
self.unprocessed_nodes |= self.untouched_nodes
self.untouched_nodes.clear()
print('mark here: untoched node set is not empty after initial')
print('num of picks: %d' % (count,))
if len(self.core_nodes) > 0:
print('Have already learned core points in heurisic initial clustering')
return
count = 0
candidate_nodes_bak -= self.processed_nodes
print('num of candidate_nodes_bak: %d; num of processed: %d.' % (len(candidate_nodes_bak), len(self.processed_nodes)))
while len(self.core_nodes) == 0:
if len(candidate_nodes_bak) > 0:
expand_seed = candidate_nodes_bak.pop()
else:
print('no core at all.')
break
self.expand_onehop_from_node(expand_seed, epsilon, mu)
'''move the seed from unprocessed to processed and remove it from degree_map'''
if expand_seed in self.unprocessed_nodes:
self.unprocessed_nodes.remove(expand_seed)
self.processed_nodes.add(expand_seed)
self.degree_map[self.degree(expand_seed)].remove(expand_seed)
'''remove the empty degre/nodes pair'''
if len(self.degree_map[self.degree(expand_seed)]) == 0:
del self.degree_map[self.degree(expand_seed)]
count += 1
print('After iterations. num of candidate_nodes_bak: %d; num of processed: %d.' % (len(candidate_nodes_bak), len(self.processed_nodes)))
candidate_nodes_bak.clear()
print('%d iterations in 2nd step in heuristic initial clustering.' % (count, ))
if verbosed_flag: print('Outing get_initial_clustering_heuristic')
def get_initial_clustering(self, epsilon, mu):
'''
To calcluate the initial clustering in anytime SCAN:
repeatedly pick node randomly from untouched_nodes, expand one-hop to get initial clustering.
:param epsilon: SS threshold
:param mu: minimal number of strong edges
:return: None
'''
if verbosed_flag: print('Entering get_initial_clustering')
'''count is for debug use'''
count =0
while len(self.untouched_nodes) > 0:
expand_seed = self.untouched_nodes.pop()
self.expand_onehop_from_node_initial(expand_seed, epsilon, mu)
'''move the seed node to processed_nodes and remove it from degree_map'''
self.processed_nodes.add(expand_seed)
self.degree_map[self.degree(expand_seed)].remove(expand_seed)
'''remove the empty degre/nodes pair'''
if len(self.degree_map[self.degree(expand_seed)]) ==0:
del self.degree_map[self.degree(expand_seed)]
if verbosed_flag:
print('counter:',count,'picked node:',expand_seed)
count += 1
print('num of picks: %d' % (count,))
if verbosed_flag: print('Outing get_initial_clustering')
def expand_onehop_from_node_initial(self, expand_seed, epsilon, mu):
'''
This function is only called by get_initial_clustering, since it may update the untouched_nodes.
The similar function called in active learning is defined in another function.
if the neighbor is in untouched_node, then move the neighbor from untocuhed from unprocessed and calculate the edge's SS
if the neighbor is in unproessed_node, it means the neighbor has already been visited by another seed, only calculate the edge's SS
:param expand_seed:
:param epsilon:
:param mu:
:return: None
'''
adj_list = nx.all_neighbors(self, expand_seed)
for one_neighbor in adj_list:
if one_neighbor in self.untouched_nodes:
self.untouched_nodes.remove(one_neighbor)
self.unprocessed_nodes.add(one_neighbor)
'''the one_neighbor has already in the unprocessed_nodes'''
ss = self.calculate_SS(expand_seed, one_neighbor, epsilon)
self.edge[expand_seed][one_neighbor]['SS'] = ss
if ss >= epsilon:
self.node[expand_seed]['numStrongEdges'] += 1
self.node[one_neighbor]['numStrongEdges'] += 1
if self.node[one_neighbor]['numStrongEdges'] + 1 >= mu:
self.node[one_neighbor]['category'] = 'core'
self.core_nodes.add(one_neighbor)
'''End of for loop'''
if self.node[expand_seed]['numStrongEdges'] + 1 >= mu:
self.node[expand_seed]['category'] = 'core'
self.core_nodes.add(expand_seed)
def expand_onehop_from_node_initial_heuristic(self, candidate_nodes_initial, expand_seed, epsilon, mu):
'''
This function is only called by get_initial_clustering_heuristic, since it may update the untouched_nodes and candidate_nodes_initial.
The similar function called in active learning is defined in another function.
if the neighbor is in untouched_node, then move the neighbor from untocuhed from unprocessed and calculate the edge's SS
if the neighbor is in the candidate_nodes_initial, also remove the neighbor from candidate_nodes_initial since it is not untouched any more
if the neighbor is in unproessed_node, it means the neighbor has already been visited by another seed, only calculate the edge's SS
:param candidate_nodes_initial:
:param expand_seed:
:param epsilon:
:param mu:
:return: None
'''
adj_list = nx.all_neighbors(self, expand_seed)
for one_neighbor in adj_list:
if one_neighbor in self.untouched_nodes:
self.untouched_nodes.remove(one_neighbor)
self.unprocessed_nodes.add(one_neighbor)
if one_neighbor in candidate_nodes_initial:
candidate_nodes_initial.remove(one_neighbor)
'''the one_neighbor has already in the unprocessed_nodes'''
ss = self.calculate_SS(expand_seed, one_neighbor, epsilon)
self.edge[expand_seed][one_neighbor]['SS'] = ss
if ss >= epsilon:
self.node[expand_seed]['numStrongEdges'] += 1
self.node[one_neighbor]['numStrongEdges'] += 1
if self.node[one_neighbor]['numStrongEdges'] + 1 >= mu:
self.node[one_neighbor]['category'] = 'core'
self.core_nodes.add(one_neighbor)
'''End of for loop'''
if self.node[expand_seed]['numStrongEdges'] + 1 >= mu:
self.node[expand_seed]['category'] = 'core'
self.core_nodes.add(expand_seed)
def get_initial_core_subgraph(self, epsilon):
'''
After get_initial_clusterng, construct the core node subgraph, which includes only the core nodes and the edges with SS >= epsilon
:param epsilon:
:return: None
'''
self.core_subgraph = self.subgraph(list(self.core_nodes))
for edge in self.core_subgraph.edges():
if self.core_subgraph[edge[0]][edge[1]]['SS'] < epsilon:
self.core_subgraph.remove_edge(edge[0],edge[1])
if verbosed_flag: print('remove edge: %s, %s' % (edge[0], edge[1]))
def get_final_clustering_result_scan(self):
'''
get the final clustering results from scan alrithm
hubs (ID = -1) and outliers (ID = -2)
:return: dict as the clusters
'''
clusters = dict()
'''set to store the nodes with clusterID'''
nodes_with_IDs = set()
for node in self.nodes():
if self.node[node]['clusterID'] >= 0:
if self.node[node]['clusterID'] in clusters:
one_cluster = clusters.get(self.node[node]['clusterID'])
else:
one_cluster = set()
one_cluster.add(node)
clusters[self.node[node]['clusterID']] = one_cluster
nodes_with_IDs.add(node)
print('num of nodes with cIDs: %d.' % (len(nodes_with_IDs), ))
noise_set = set(self.nodes()) - nodes_with_IDs
hubs = set()
outliers = set()
for node in noise_set:
neighbors = nx.all_neighbors(self, node)
cID_set = set()
for one_neighbor in neighbors:
if self.node[one_neighbor]['clusterID'] >= 0:
cID_set.add(self.node[one_neighbor]['clusterID'])
'''check if the num of IDs in neighbors >= 2'''
if len(cID_set) > 1:
break
if len(cID_set) > 1:
hubs.add(node)
self.node[node]['clusterID'] = -1
else:
outliers.add(node)
self.node[node]['clusterID'] = -2
clusters[-1] = hubs
clusters[-2] = outliers
print('num of hubs: %d; num of outlires: %d.' % (len(hubs), len(outliers)))
return clusters
def output_current_clustering_result(self, epsilon):
'''
Calculate the current clustering results in the graph
Step 1: (a). Compute the connected components from core_subgraph, assign a clueterID to each connected component.
each cluster is represented by a clusterID / nodes pair.
(b). Based on the edges which have been visited to clusterID to the boder of each core
Step 2: other nodes without cluster ID are divided furture into hubs (ID = -1) and outliers (ID = -2)
:return: dict as the clusters
'''
self.get_initial_core_subgraph(epsilon)
ccs = nx.connected_components(self.core_subgraph)
clusterID = 0
clusters = dict()
'''set to store the nodes with clusterID'''
nodes_with_IDs = set()
'''total is used to count the num of nodes in all clusters, which is used to check if there is any overlapping in the clusters'''
total = 0
for one_cc in ccs:
one_cluster = set()
for node in one_cc:
self.node[node]['clusterID'] = clusterID
one_cluster.add(node)
neighbors = nx.all_neighbors(self, node)
for one_neighbor in neighbors:
if self.edge[node][one_neighbor]['SS'] >= epsilon:
self.node[one_neighbor]['clusterID'] = clusterID
one_cluster.add(one_neighbor)
clusters[clusterID] = one_cluster
nodes_with_IDs |= one_cluster
total += len(one_cluster)
clusterID += 1
'''the following is processing hubs and outliers'''
noise_set = set(self.nodes()) - nodes_with_IDs
hubs = set()
outliers = set()
for node in noise_set:
neighbors = nx.all_neighbors(self, node)
cID_set = set()
for one_neighbor in neighbors:
if self.node[one_neighbor]['clusterID'] >= 0:
cID_set.add(self.node[one_neighbor]['clusterID'])
'''check if the num of IDs in neighbors >= 2'''
if len(cID_set) > 1:
break
if len(cID_set) > 1:
hubs.add(node)
self.node[node]['clusterID'] = -1
else:
outliers.add(node)
self.node[node]['clusterID'] = -2
clusters[-1] = hubs
total += len(hubs)
clusters[-2] = outliers
total += len(outliers)
'''check if there is overlapping in clusters'''
if total > self.number_of_nodes():
print('Have overlappings. num of IDs: %d. num of nodes: %d.' % (total, self.number_of_nodes()))
else:
print('No overlappings')
'''count the number of SSs the have been calculated'''
edge_SSs = nx.get_edge_attributes(self, 'SS')
total =0
for key in edge_SSs:
if edge_SSs[key] > 0:
total += 1
print('Num of SSs have been calculated: %d' % (total,))
print('Num of cores now: %d' % (len(self.core_nodes),))
'''recover the clusterID as the initial value'''
for node in self.nodes():
self.node[node]['clusterID'] = -1
return (clusters, total, len(self.core_nodes))
def output_final_clustering_result(self, epsilon):
'''
Calculate the final clustering results in the graph.
The only differece beween this function and output_current_clustering_result is that:
In this function, we will go one hop more from the unprocesed core nodes.
But in the function current_clustering_result, we do not go further from the unprocessed core
:return: dict as the clusters
'''
self.get_initial_core_subgraph(epsilon)
ccs = nx.connected_components(self.core_subgraph)
clusterID = 0
clusters = dict()
'''set to store the nodes with clusterID'''
nodes_with_IDs = set()
'''total is used to count the num of nodes in all clusters, which is used to check if there is any overlapping in the clusters'''
total = 0
count = 0
for one_cc in ccs:
one_cluster = set()
count += 1
for node in one_cc:
self.node[node]['clusterID'] = clusterID
one_cluster.add(node)
neighbors = nx.all_neighbors(self, node)
for one_neighbor in neighbors:
'''if the edge <node, one_neighbor> has no SS, first calculate SS'''
if self.edge[node][one_neighbor]['SS'] == -1:
self.edge[node][one_neighbor]['SS'] = self.calculate_SS(node, one_neighbor,epsilon)
if self.edge[node][one_neighbor]['SS'] >= epsilon:
self.node[one_neighbor]['clusterID'] = clusterID
one_cluster.add(one_neighbor)
clusters[clusterID] = one_cluster
nodes_with_IDs |= one_cluster
total += len(one_cluster)
clusterID += 1
'''the following is processing hubs and outliers'''
noise_set = set(self.nodes()) - nodes_with_IDs
hubs = set()
outliers = set()
for node in noise_set:
neighbors = nx.all_neighbors(self, node)
cID_set = set()
for one_neighbor in neighbors:
if self.node[one_neighbor]['clusterID'] >= 0:
cID_set.add(self.node[one_neighbor]['clusterID'])
'''check if the num of IDs in neighbors >= 2'''
if len(cID_set) > 1:
break
if len(cID_set) > 1:
hubs.add(node)
self.node[node]['clusterID'] =-1
else:
outliers.add(node)
self.node[node]['clusterID'] = -2
clusters[-1] = hubs
total += len(hubs)
clusters[-2] = outliers
total += len(outliers)
'''check if there is overlapping in clusters'''
if total > self.number_of_nodes():
print('Have overlappings. num of IDs: %d. num of nodes: %d.' % (total, self.number_of_nodes()))
else:
print('No overlappings')
'''count the number of SSs the have been calculated'''
edge_SSs = nx.get_edge_attributes(self, 'SS')
total = 0
for key in edge_SSs:
if edge_SSs[key] > 0:
total += 1
print('Num of SSs have been calculated: %d' % (total,))
print('Num of cores now: %d' % (len(self.core_nodes),))
return (clusters, total, len(self.core_nodes))
def calculate_SS(self, node1, node2, epsilon):
'''
Calculate the cosine SS between node1 and node2
Use upper bound approach to speed up the calculation
:param node1:
:param node2:
:return: SS of edge (node1, node2)
'''
small_degree = self.degree(node1)
if self.degree(node2) < self.degree(node1):
small_degree = self.degree(node2)
denominator = math.sqrt((self.degree(node1) + 1) * (self.degree(node2) + 1))
ss_upper_bound = (small_degree + 1) / denominator
if ss_upper_bound < epsilon:
return 0
commons = len(list(nx.common_neighbors(self, node1, node2))) + 2.0
return commons/denominator
def calculate_SS_scan(self, node1, node2):
'''
Calculate the cosine SS between node1 and node2 without using upper bound, which is used for original scan algorithm
:param node1:
:param node2:
:return: SS of edge (node1, node2)
'''
denominator = math.sqrt((self.degree(node1) + 1) * (self.degree(node2) + 1))
commons = len(list(nx.common_neighbors(self, node1, node2))) + 2.0
return commons/denominator
def print_nodes(self):
'''
Output the information of all nodes
:return: None
'''
print('print nodes information:')
for node in self.nodes():
print(node, ':',self.node[node]['category'],self.node[node]['clusterID'],self.node[node]['numStrongEdges'])
print('untouched:')
print(self.untouched_nodes)
print('unprocessed:')
print(self.unprocessed_nodes)
print('processed:')
print(self.processed_nodes)
print('core:')
print(self.core_nodes)
def print_nodes(self, filename):
'''
Output the information of all nodes
:return: None
'''
fout = open(filename, 'w')
line = 'nodename\tcategory\tclusterID\tnumStrongEdges\n'
fout.write(line)
fcore_info = open(filename + '_core.txt', 'w')
for node in self.nodes():
line = '%s\t%s\t%d\t%d\n' % (node, self.node[node]['category'],self.node[node]['clusterID'],self.node[node]['numStrongEdges'])
fout.write(line)
if self.node[node]['category'] == 'core':
line = '%s\t%d\n' % (node, self.node[node]['clusterID'])
fcore_info.write(line)
fout.close()
fcore_info.close()
def print_edges(self):
'''
Output the SS of al edges
:return:
'''
print('print edges information:')
ss_values = nx.get_edge_attributes(self, 'SS')
for edge in self.edges():
print(edge, "'s SS:", ss_values[edge])
def get_true_cluster(self, ground_truth_filename):
'''
:param ground_truth_filename: the filename in which has the true label of the nodes
:return:
'''
truth_file = open(ground_truth_filename)
label_truth_dict = dict()
for line in truth_file:
'''skip the blank line'''
if not line.strip():
continue
tokens = line.strip().split()
label_truth_dict[tokens[0]] = int(tokens[1])
return label_truth_dict
def calculate_metrics_current(self, label_truth_dict, pred_clusters):
'''
Compare the current clusters and the ground truth, calculate the ARI and NMI
Format in ground truth file:
node_name_1 clusterID_1
node_name_2 clusterID_2
... ...
pred_clusters is a dict(): <clusterID_1, nodes> <clusterID_2, nodes>
For overlapping case, only one clusterID is used.
:param label_truth_dict: dict which includes the true labels of nodes
:param pred_clusters:
:return: tuple (ARI, NMI)
'''
from sklearn import metrics
label_pred_dict = dict()
for key in pred_clusters:
for node in pred_clusters[key]:
label_pred_dict[node] = key
''' only for core points
if len(label_truth_dict) != len(label_pred_dict):
print('ERROR! Num of nodes in truth does not equal to num of nodes in pred')
sys.exit(2)
'''
label_truth_list = list()
label_pred_list = list()
for node in sorted(label_truth_dict):
label_truth_list.append(label_truth_dict[node])
label_pred_list.append(label_pred_dict[node])
ARI = metrics.adjusted_rand_score(label_truth_list, label_pred_list)
NMI = metrics.normalized_mutual_info_score(label_truth_list, label_pred_list)
return (ARI, NMI)
def calculate_metrics_final(self, label_truth_dict):
'''
Compare the final clusters and the ground truth, calculate the ARI and NMI
Format in ground truth file:
node_name_1 clusterID_1
node_name_2 clusterID_2
... ...
Final clusters are in node['clusterID']
For overlapping case, only one clusterID is used.
:param ground_truth_filename:
:return: tuple (ARI, NMI)
'''
from sklearn import metrics
''' only for core points
if len(label_truth_dict) != self.number_of_nodes():
print('ERROR! Num of nodes in truth does not equal to num of nodes in pred')
sys.exit(2)
'''
label_truth_list = list()
label_pred_list = list()
for node in sorted(label_truth_dict):
label_truth_list.append(label_truth_dict[node])
label_pred_list.append(self.node[node]['clusterID'])
ARI = metrics.adjusted_rand_score(label_truth_list, label_pred_list)
NMI = metrics.normalized_mutual_info_score(label_truth_list, label_pred_list)
return (ARI, NMI)
def calculate_modularity_current(self, pred_clusters):
'''
Calculate the modularity of the current cluster
pred_clusters is a dict(): <clusterID_1, nodes> <clusterID_2, nodes>
For overlapping case, only one clusterID is used.
:param pred_clusters:
:return: float, modulariy
'''
import community
label_pred_dict = dict()
'''cID is used for label noises and hubs'''
cID = len(pred_clusters)
for key in pred_clusters:
if key == -1 or key == -2:
for node in pred_clusters[key]:
label_pred_dict[node] = cID
cID += 1
else:
for node in pred_clusters[key]:
label_pred_dict[node] = key
if self.number_of_nodes() != len(label_pred_dict):
print('ERROR! Num of nodes in Graph does not equal to num of nodes in pred')
sys.exit(2)
return community.modularity(label_pred_dict, nx.Graph(self))
def calculate_modularity_final(self):
'''
Compare the modularity of the final clusters ... ...
Final clusters are in node['clusterID']
For overlapping case, only one clusterID is used.
:return: float modularity
'''
import community
'''cID is used for label noises and hubs'''
cID = self.number_of_nodes()
label_pred_dict = dict()
for node in self.nodes():
label = self.node[node]['clusterID']
if label ==-1 or label == -2:
label_pred_dict[node] = cID
cID += 1
else:
label_pred_dict[node] = label
if self.number_of_nodes() != len(label_pred_dict):
print('ERROR! Num of nodes in Graph does not equal to num of nodes in pred')
sys.exit(2)
return community.modularity(label_pred_dict, nx.Graph(self))
import AnytimeSCAN, ast, time
if __name__ == '__main__':
if len(sys.argv) < 3:
print('ERROR!!! Required input: <filename, anytimescan | scan>')
sys.exit(2)
my_graph = AnytimeSCAN.AnytimeSCANGraph(sys.argv[1])
'''for anytime scan'''
if sys.argv[2] == 'anytimescan':
if len(sys.argv) < 5:
print(
'ERROR!!! Required input: <filename, anytimescan, epsilon(float), mu(int), num_of_AL(int, defaul = 100), isHeuristic(boolean, defaul = True), isRandom_AL(boolean, defaul = False), true_label_filename(string, default = None)>')
sys.exit(2)
print('starting anytime_scan')
if len(sys.argv) == 5:
my_graph.anytime_scan(sys.argv[1], float(sys.argv[3]), int(sys.argv[4]))
elif len(sys.argv) == 6:
my_graph.anytime_scan(sys.argv[1], float(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]))
elif len(sys.argv) == 7:
my_graph.anytime_scan(sys.argv[1], float(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]),
ast.literal_eval(sys.argv[6]))
elif len(sys.argv) == 8:
my_graph.anytime_scan(sys.argv[1], float(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]),
ast.literal_eval(sys.argv[6]), ast.literal_eval(sys.argv[7]))
elif len(sys.argv) >= 9:
my_graph.anytime_scan(sys.argv[1], float(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]),
ast.literal_eval(sys.argv[6]), ast.literal_eval(sys.argv[7]), sys.argv[8])
print('finished anytime_scan')
'''for original scan'''
if sys.argv[2] == 'scan':
if len(sys.argv) < 5:
print(
'ERROR!!! Required input: <filename, scan, epsilon(float), mu(int), true_label_filename(string, default = None)>')
sys.exit(2)
print('starting scan')
if len(sys.argv) == 5:
my_graph.original_scan(sys.argv[1], float(sys.argv[3]), int(sys.argv[4]))
elif len(sys.argv) == 6:
my_graph.original_scan(sys.argv[1], float(sys.argv[3]), int(sys.argv[4]), sys.argv[5])
print('finished scan')
|
// Basic JavaScript: Concatenating Strings with the Plus Equals Operator
/*
Build myStr over several lines by concatenating these two strings: "This is the first sentence.
" and "This is the second sentence." using the += operator.
Use the += operator similar to how it is shown in the editor.
Start by assigning the first string to myStr, then add on the second string.
*/
// Only change code below this line
var myStr="This is the first sentence. ";
myStr+= "This is the second sentence.";
console.log(myStr);
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.default = (function (uid, money, message) {
if (message === void 0) { message = ''; }
var data = JSON.stringify({
g: uid,
c: money,
m: message
});
return "+$" + data;
});
|