input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- fundamental -*-
#
# Tests for parsing inflection tables
#
# Copyright (c) 2021 <NAME>. See file LICENSE and https://ylonen.org
import unittest
import json
from wikitextprocessor import Wtp
from wiktextract import WiktionaryConfig
from wiktextract.inflection import parse_inflection_section
class InflTests(unittest.TestCase):
def setUp(self):
self.maxDiff = 100000
self.ctx = Wtp()
self.config = WiktionaryConfig()
self.ctx.start_page("testpage")
self.ctx.start_section("English")
def xinfl(self, word, lang, pos, section, text):
"""Runs a single inflection table parsing test, and returns ``data``."""
self.ctx.start_page(word)
self.ctx.start_section(lang)
self.ctx.start_subsection(pos)
tree = self.ctx.parse(text)
data = {}
parse_inflection_section(self.config, self.ctx, data, word, lang, pos,
section, tree)
return data
def test_Portuguese_verb1(self):
ret = self.xinfl("viajar", "Portuguese", "verb", "Conjugation", """
<div class="NavFrame" style="clear%3Aboth%3B+white-space%3A+nowrap">
<div class="NavHead"> Conjugation of the [[Appendix:Portuguese verbs|Portuguese ''-ar'' verb]] ''viajar''</div>
<div class="NavContent" align="left">
{| class="inflection-table" style="background%3A%23F6F6F6%3B+text-align%3A+left%3B+border%3A+1px+solid+%23999999%3B" cellpadding="3" cellspacing="0"
|-
| style="border%3A+1px+solid+%23999999%3B" colspan="7" | '''Notes''':<sup class="plainlinks">[//wiki.local/w/index.php?action=edit&title=Module%3Apt-conj%2Fdata%2F-ar [edit]]</sup>
* This is a regular verb of the '''-ar''' group.
*
* Verbs with this conjugation include: <i class="Latn+mention" lang="pt">[[amar#Portuguese|amar]]</i>, <i class="Latn+mention" lang="pt">[[cantar#Portuguese|cantar]]</i>, <i class="Latn+mention" lang="pt">[[gritar#Portuguese|gritar]]</i>, <i class="Latn+mention" lang="pt">[[marchar#Portuguese|marchar]]</i>, <i class="Latn+mention" lang="pt">[[mostrar#Portuguese|mostrar]]</i>, <i class="Latn+mention" lang="pt">[[nadar#Portuguese|nadar]]</i>, <i class="Latn+mention" lang="pt">[[parar#Portuguese|parar]]</i>, <i class="Latn+mention" lang="pt">[[participar#Portuguese|participar]]</i>, <i class="Latn+mention" lang="pt">[[retirar#Portuguese|retirar]]</i>, <i class="Latn+mention" lang="pt">[[separar#Portuguese|separar]]</i>, <i class="Latn+mention" lang="pt">[[viajar#Portuguese|viajar]]</i>.
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23B0B0B0" rowspan="2" |
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0" colspan="3" | Singular
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0" colspan="3" | Plural
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0%3B+width%3A12.5%25" | First-person<br>([[eu#Portuguese|eu]])
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0%3B+width%3A12.5%25" | Second-person<br>([[tu#Portuguese|tu]])
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0%3B+width%3A12.5%25" | Third-person<br>([[ele#Portuguese|ele]] / [[ela#Portuguese|ela]] / [[você#Portuguese|você]])
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0%3B+width%3A12.5%25" | First-person<br>([[nós#Portuguese|nós]])
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0%3B+width%3A12.5%25" | Second-person<br>([[vós#Portuguese|vós]])
! style="border%3A+1px+solid+%23999999%3B+background%3A%23D0D0D0%3B+width%3A12.5%25" | Third-person<br>([[eles#Portuguese|eles]] / [[elas#Portuguese|elas]] / [[vocês#Portuguese|vocês]])
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23c498ff" colspan="7" | ''<span title="infinitivo">Infinitive</span>''
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23a478df" | '''<span title="infinitivo+impessoal">Impersonal</span>'''
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" colspan="6" | <span class="Latn" lang="pt">[[viajar#Portuguese|viajar]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23a478df" | '''<span title="infinitivo+pessoal">Personal</span>'''
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajar#Portuguese|viajar]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajares#Portuguese|viajares]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajar#Portuguese|viajar]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarmos#Portuguese|viajarmos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajardes#Portuguese|viajardes]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarem#Portuguese|viajarem]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%2398ffc4" colspan="7" | ''<span title="ger%C3%BAndio">Gerund</span>''
|-
| style="border%3A+1px+solid+%23999999%3B+background%3A%2378dfa4" |
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" colspan="6" | <span class="Latn" lang="pt">[[viajando#Portuguese|viajando]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23ffc498" colspan="7" | ''<span title="partic%C3%ADpio+passado">Past participle</span>''
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23dfa478" | Masculine
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" colspan="3" | <span class="Latn" lang="pt">[[viajado#Portuguese|viajado]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" colspan="3" | <span class="Latn" lang="pt">[[viajados#Portuguese|viajados]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23dfa478" | Feminine
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" colspan="3" | <span class="Latn" lang="pt">[[viajada#Portuguese|viajada]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" colspan="3" | <span class="Latn" lang="pt">[[viajadas#Portuguese|viajadas]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23d0dff4" colspan="7" | ''<span title="indicativo">Indicative</span>''
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0bfd4" | <span title="presente">Present</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajo#Portuguese|viajo]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajas#Portuguese|viajas]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viaja#Portuguese|viaja]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajamos#Portuguese|viajamos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajais#Portuguese|viajais]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajam#Portuguese|viajam]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0bfd4" | <span title="pret%C3%A9rito+imperfeito">Imperfect</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajava#Portuguese|viajava]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajavas#Portuguese|viajavas]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajava#Portuguese|viajava]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajávamos#Portuguese|viajávamos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajáveis#Portuguese|viajáveis]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajavam#Portuguese|viajavam]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0bfd4" | <span title="pret%C3%A9rito+perfeito">Preterite</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajei#Portuguese|viajei]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaste#Portuguese|viajaste]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajou#Portuguese|viajou]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajamos#Portuguese|viajamos]]</span><br><span class="Latn" lang="pt">[[viajámos#Portuguese|viajámos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajastes#Portuguese|viajastes]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaram#Portuguese|viajaram]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0bfd4" | <span title="pret%C3%A9rito+mais-que-perfeito+simples">Pluperfect</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajara#Portuguese|viajara]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaras#Portuguese|viajaras]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajara#Portuguese|viajara]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajáramos#Portuguese|viajáramos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajáreis#Portuguese|viajáreis]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaram#Portuguese|viajaram]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0bfd4" | <span title="futuro+do+presente">Future</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarei#Portuguese|viajarei]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarás#Portuguese|viajarás]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajará#Portuguese|viajará]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaremos#Portuguese|viajaremos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajareis#Portuguese|viajareis]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarão#Portuguese|viajarão]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23ffffaa" colspan="7" | ''<span title="condicional+%2F+futuro+do+pret%C3%A9rito">Conditional</span>''
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23ddddaa" |
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaria#Portuguese|viajaria]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarias#Portuguese|viajarias]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaria#Portuguese|viajaria]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaríamos#Portuguese|viajaríamos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajaríeis#Portuguese|viajaríeis]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajariam#Portuguese|viajariam]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23d0f4d0" colspan="7" | ''<span title="conjuntivo+%28pt%29+%2F+subjuntivo+%28br%29">Subjunctive</span>''
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0d4b0" | <span title="+presente+do+conjuntivo+%28pt%29+%2F+subjuntivo+%28br%29">Present</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viaje#Portuguese|viaje]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajes#Portuguese|viajes]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viaje#Portuguese|viaje]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajemos#Portuguese|viajemos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajeis#Portuguese|viajeis]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajem#Portuguese|viajem]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0d4b0" | <span title="pret%C3%A9rito+imperfeito+do+conjuntivo+%28pt%29+%2F+subjuntivo+%28br%29">Imperfect</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajasse#Portuguese|viajasse]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajasses#Portuguese|viajasses]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajasse#Portuguese|viajasse]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajássemos#Portuguese|viajássemos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajásseis#Portuguese|viajásseis]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajassem#Portuguese|viajassem]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23b0d4b0" | <span title="futuro+do+conjuntivo+%28pt%29+%2F+subjuntivo+%28br%29">Future</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajar#Portuguese|viajar]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajares#Portuguese|viajares]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajar#Portuguese|viajar]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarmos#Portuguese|viajarmos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajardes#Portuguese|viajardes]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajarem#Portuguese|viajarem]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23f4e4d0" colspan="7" | ''<span title="imperativo">Imperative</span>''
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23d4c4b0" | <span title="imperativo+afirmativo">Affirmative</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | -
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viaja#Portuguese|viaja]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viaje#Portuguese|viaje]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajemos#Portuguese|viajemos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajai#Portuguese|viajai]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajem#Portuguese|viajem]]</span>
|-
! style="border%3A+1px+solid+%23999999%3B+background%3A%23d4c4b0" | <span title="imperativo+negativo">Negative</span> ([[não#Portuguese|não]])
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | -
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajes#Portuguese|viajes]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viaje#Portuguese|viaje]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajemos#Portuguese|viajemos]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajeis#Portuguese|viajeis]]</span>
| style="border%3A+1px+solid+%23999999%3B+vertical-align%3A+top%3B" | <span class="Latn" lang="pt">[[viajem#Portuguese|viajem]]</span>
|}
</div>
</div>
""")
expected = {
"forms": [
{
"form": "-ar verb",
"source": "Conjugation title",
"tags": [
"class"
]
},
{
"form": "viajar",
"source": "Conjugation",
"tags": [
"impersonal",
"infinitive"
]
},
{
"form": "viajar",
"source": "Conjugation",
"tags": [
"first-person",
"infinitive",
"singular"
]
},
{
"form": "viajares",
"source": "Conjugation",
"tags": [
"infinitive",
"second-person",
"singular"
]
},
{
"form": "viajar",
"source": "Conjugation",
"tags": [
"infinitive",
"singular",
"third-person"
]
},
{
"form": "viajarmos",
"source": "Conjugation",
"tags": [
"first-person",
"infinitive",
"plural"
]
},
{
"form": "viajardes",
"source": "Conjugation",
"tags": [
"infinitive",
"plural",
"second-person"
]
},
{
"form": "viajarem",
"source": "Conjugation",
"tags": [
"infinitive",
"plural",
"third-person"
]
},
{
"form": "viajando",
"source": "Conjugation",
"tags": [
"gerund"
]
},
{
"form": "viajado",
"source": "Conjugation",
"tags": [
"masculine",
"participle",
"past",
"singular"
]
},
{
"form": "viajados",
"source": "Conjugation",
"tags": [
"masculine",
"participle",
"past",
"plural"
]
},
{
"form": "viajada",
"source": "Conjugation",
"tags": [
"feminine",
"participle",
"past",
"singular"
]
},
{
"form": "viajadas",
"source": "Conjugation",
"tags": [
"feminine",
"participle",
"past",
"plural"
]
},
{
"form": "viajo",
"source": "Conjugation",
"tags": [
"first-person",
"indicative",
"present",
"singular"
]
},
{
"form": "viajas",
"source": "Conjugation",
"tags": [
"indicative",
"present",
"second-person",
"singular"
]
},
{
"form": "viaja",
"source": "Conjugation",
"tags": [
"indicative",
"present",
"singular",
"third-person"
]
},
{
"form": "viajamos",
"source": "Conjugation",
"tags": [
"first-person",
"indicative",
"plural",
"present"
]
},
{
"form": "viajais",
"source": "Conjugation",
"tags": [
"indicative",
"plural",
"present",
"second-person"
]
},
{
"form": "viajam",
"source": "Conjugation",
"tags": [
"indicative",
"plural",
"present",
"third-person"
]
},
{
"form": "viajava",
"source": "Conjugation",
"tags": [
"first-person",
"imperfect",
"indicative",
"singular"
]
},
{
"form": "viajavas",
"source": "Conjugation",
"tags": [
"imperfect",
"indicative",
"second-person",
"singular"
]
},
{
"form": "viajava",
"source": "Conjugation",
"tags": [
"imperfect",
"indicative",
"singular",
"third-person"
]
},
{
"form": "viajávamos",
"source": "Conjugation",
"tags": [
"first-person",
"imperfect",
"indicative",
"plural"
]
},
{
"form": "viajáveis",
"source": "Conjugation",
"tags": [
"imperfect",
"indicative",
"plural",
"second-person"
]
},
{
"form": "viajavam",
"source": "Conjugation",
"tags": [
"imperfect",
"indicative",
"plural",
"third-person"
]
},
{
"form": "viajei",
"source": "Conjugation",
"tags": [
"first-person",
"indicative",
"preterite",
"singular"
]
},
{
"form": "viajaste",
"source": "Conjugation",
"tags": [
"indicative",
"preterite",
"second-person",
"singular"
]
},
{
"form": "viajou",
"source": "Conjugation",
"tags": [
"indicative",
"preterite",
"singular",
"third-person"
]
},
{
"form": "viajamos",
"source": "Conjugation",
"tags": [
"first-person",
"indicative",
"plural",
"preterite"
]
},
{
"form": "viajámos",
"source": "Conjugation",
"tags": [
"first-person",
"indicative",
"plural",
"preterite"
]
},
{
"form": "viajastes",
"source": "Conjugation",
"tags": [
"indicative",
"plural",
"preterite",
"second-person"
]
},
{
"form": "viajaram",
"source": "Conjugation",
"tags": [
"indicative",
"plural",
"preterite",
"third-person"
]
},
{
"form": "viajara",
"source": "Conjugation",
"tags": [
"first-person",
"indicative",
"pluperfect",
"singular"
]
},
{
"form": "viajaras",
"source": "Conjugation",
"tags": [
"indicative",
"pluperfect",
"second-person",
"singular"
]
},
{
"form": "viajara",
"source": "Conjugation",
"tags": [
"indicative",
"pluperfect",
"singular",
"third-person"
]
},
{
"form": "viajáramos",
"source": "Conjugation",
"tags": [
"first-person",
"indicative",
"pluperfect",
"plural"
]
},
{
"form": "viajáreis",
"source": "Conjugation",
"tags": [
"indicative",
"pluperfect",
"plural",
"second-person"
]
},
{
"form": "viajaram",
"source": "Conjugation",
"tags": [
"indicative",
"pluperfect",
"plural",
"third-person"
]
},
{
"form": "viajarei",
"source": "Conjugation",
"tags": [
"first-person",
"future",
"indicative",
"singular"
]
},
{
"form": "viajarás",
"source": "Conjugation",
"tags": [
"future",
"indicative",
"second-person",
"singular"
]
},
{
"form": "viajará",
"source": "Conjugation",
"tags": [
"future",
"indicative",
"singular",
"third-person"
]
},
{
"form": "viajaremos",
"source": "Conjugation",
"tags": [
"first-person",
"future",
"indicative",
"plural"
]
},
{
"form": "viajareis",
"source": "Conjugation",
"tags": [
"future",
"indicative",
"plural",
"second-person"
]
},
{
"form": "viajarão",
"source": "Conjugation",
"tags": [
"future",
"indicative",
"plural",
"third-person"
]
},
{
"form": "viajaria",
"source": "Conjugation",
"tags": [
"conditional",
"first-person",
"singular"
]
},
{
"form": "viajarias",
"source": "Conjugation",
"tags": [
"conditional",
"second-person",
"singular"
]
},
{
"form": "viajaria",
"source": "Conjugation",
"tags": | |
medulla.",
{"entities": [(101, 113, LABEL), (180, 213, LABEL), (227, 234, LABEL), (53, 71, FUNC)]}),
###TESTED TO HERE
("Some NTS neurones are glucose sensitive, while others express POMC, leptin receptors or the MC4R.",
{"entities": [(5, 8, LABEL), (22, 29, PHYS), (62, 66, NT), (68, 84, PHYS), (92, 96, PHYS)]}),
("The NTS also sends a dense projection to the LHN, reinforcening its contribution in the control of body energy (Williams et al. 2001)",
{"entities": [(4, 7, LABEL), (45, 48, LABEL), (112, 120, PER), (128, 132, "DATE"), (88, 110, FUNC), ]}),
("Notably, we demonstrated that RAB39B is an abundant protein in dopaminergic neurons in the SNpc, the neuronal subtype selectively lost in PD.",
{"entities": [(30, 36, PHYS), (63, 75, NT), (91, 95, LABEL)]}),
("MC4R is localized in the thalamus, hypothalamus, and hippocampus among other brain and peripheral sites [20, 21].",
{"entities": [(0, 4, PHYS), (25, 33, LABEL), (35, 47, LABEL), (53, 64, LABEL)]}),
("SIM1 homozygous knockout mice fail to properly form at least the paraventricular (PVH), supraoptic (SON), and anterior periventricular (aPV) hypothalamic nuclei and die perinatally [24].",
{"entities": [(0, 4, PHYS), (65, 80, LABEL), (82, 85, LABEL), (88, 98, LABEL), (100, 103, LABEL), (110, 134, LABEL), (136, 139, LABEL)]}),
("We next explored the contribution of BRS3 in SIM1-expressing neurons, which are predominantly in the paraventricular nucleus of the hypothalamus (PVH).",
{"entities": [(37, 41, PHYS), (45, 49, PHYS), (101, 144, LABEL)]}),
("BRS3 deletion in SIM1-expressing neurons impaired insulin tolerance and increased insulin levels.",
{"entities": [(0, 4, PHYS), (17, 21, PHYS), (41, 67, PHYS), (72, 96, FUNC)]}),
("Glutamate excitotoxicity may develop during numerous events; as a secondary injury after traumatic injury (Park et al., 2008).",
{"entities": [(0, 9, NT)]}),
("The release of glutamate is followed by the activation of its postsynaptic receptors.",
{"entities": [(15, 24, NT), (44, 84, FUNC)]}),
("The experimental rats received one single injection of TGF-β1 (R&D Systems, United States) on day seven after 3-AP infusion. TGF-β1 was unilaterally injected into the lateral ventricle of rats mounted on a stereotaxic frame.",
{"entities": [(167, 185, LABEL), (55, 61, PHYS), (125, 131, PHYS)]}),
("Membranous organelles make intimate contacts with each other and serve as platforms for multiple molecular processes executed by signalling proteins.",
{"entities": [(27, 60, FUNC), (88, 148, FUNC)]}),
("GLP-1 mediates its effects by binding to its receptor, the GLP-1 receptor (GLP-1R), which is a G protein–coupled receptor that is abundantly present in the pancreatic beta cells.",
{"entities": [(0, 5, NT), (59, 73, PHYS), (75, 81, PHYS), (156, 177, LABEL)]}),
("Projections of the medial cerebellar nucleus to the ventrolateral periaqueductal gray.",
{"entities": [(19, 44, LABEL), (52, 85, LABEL)]}),
("First, we injected the mCbN with viruses expressing a channelrhodopsin-eYFP (ChR2-eYFP) fusion protein.",
{"entities": [(23, 27, LABEL)]}),
("Middle, Confocal image of the mCbN after injection of AAV-hSyn-ChR2-eYFP virus",
{"entities": [(30, 34, LABEL)]}),
("Previous tracing studies have demonstrated that the mCbN indeed projects to the vlPAG (Gonzalo-Ruiz et al., 1990).",
{"entities": [(52, 56, LABEL), (80, 85, LABEL), (87, 99, PER), (108, 112, "DATE")]}),
("Right, Confocal image of virally labeled mCbN axons in the vlPAG.",
{"entities": [(41, 45, LABEL), (59, 64, LABEL)]}),
("Example injection site of CTb-GFP in the vlPAG.",
{"entities": [(41, 46, LABEL)]}),
("Low magnification image of mCbN labeled axons in vlPAG.",
{"entities": [(27, 31, LABEL), (49, 54, LABEL)]}),
("Even the vlPAG, however, is heterogeneous, as pharmacological activation of the vlPAG elicits freezing, bradycardia, and anti-nociception (Bandler et al., 2000).",
{"entities": [(9, 14, LABEL), (80, 85, LABEL), (139, 146, PER), (155, 159, "DATE"), (94, 137, FUNC)]}),
("Daily food intake (FI) is a function of meal size (MZ) and meal number.",
{"entities": [(0, 17, FUNC), (19, 21, FUNC)]}),
("Feeding patterns via changes in the concentration of hypothalamic dopamine [32], among other neuromediators.",
{"entities": [(0, 16, FUNC), (53, 65, LABEL), (66, 74, NT)]}),
("The beta toxin gene (cpb) is encoded on a large plasmid and codes for a small polypeptide protoxin.",
{"entities": [(60, 98, FUNC)]}),
("Also, an intracerebral injection of the neurotoxin 6-hydroxydopamine (OHDA) into the medial forebrain bundle (MFB) has been shown to result in a parkinsonian rat model [28, 29].",
{"entities": [(51, 58, PHYS), (70, 74, PHYS), (85, 108, LABEL), (110, 113, LABEL)]}),
("The level of DA was measured in the left hemisphere striatum, prefrontal cortex, and dorsal hippocampus using a dopamine ELISA kit according to the manufacturer's instructions.",
{"entities": [(13, 15, NT), (52, 60, LABEL), (62, 79, LABEL), (85, 103, LABEL)]}),
("Serotonin levels were measured in the left hemisphere striatum, prefrontal cortex, and hippocampus of all rats on PND 76.",
{"entities": [(0, 9, NT), (54, 62, LABEL), (64, 81, LABEL), (87, 98, LABEL)]}),
("Besides, given the hippocampus-dependent learning and memory, the long-lasting and elevated plasma corticosterone levels impaired learning and memory resulting in poor cognitive performance of maternally separated rats in the MWM test.",
{"entities": [(19, 30, LABEL), (41, 60, FUNC), (99, 113, PHYS), (130, 149, FUNC)]}),
("The DRN contains a large number of 5-HT neurons that project to the forebrain and midbrain, including reward-related brain areas, such as the VTA, nucleus accumbens (NAc), and lateral hypothalamus (LH).",
{"entities": [(4, 7, LABEL), (35, 39, NT), (68, 77, LABEL), (82, 91, LABEL), (102, 116, FUNC), (142, 145, LABEL), (147, 164, LABEL), (166, 169, LABEL), (176, 196, LABEL), (198, 200, LABEL)]}),
("Similar to an antidepressive effect, rewarding potency of DRN 5-HT neurons has been studied by using several lines of genetically-modified mice.",
{"entities": [(14, 35, FUNC), (58, 61, LABEL), (62, 66, NT)]}),
("We examined the rewarding potency of optogenetic manipulation of DRN 5-HT neurons using an adeno-associated virus (AAV) expressing optogenetic actuators",
{"entities": [(65, 68, LABEL), (69, 74, NT)]}),
("We found that the optogenetic activation of DRN 5-HT neurons and 5-HT projections from the DRN to VTA strongly reinforced nose-poke self-stimulation behavior and induced conditioned place preference.",
{"entities": [(44, 47, LABEL), (48, 52, NT), (65, 69, NT), (91, 94, LABEL), (98, 101, LABEL), (111, 157, FUNC), (170, 198, FUNC)]}),
("Optogenetic Activation of 5-HT Neuron Terminals in the VTA is Responsible for Reinforcement of Nose-Poke Behavior but not in the LH, CeA, NAc, or Ventral Pallidum (VP).",
{"entities": [(26, 30, NT), (55, 58, LABEL), (78, 113, FUNC), (129, 131, LABEL), (133, 136, LABEL), (138, 141, LABEL), (146, 162, LABEL), (164, 166, LABEL)]}),
("In this study, we examined the involvement of the VTA, LH, NAc, CeA, and VP.",
{"entities": [(50, 53, LABEL), (55, 57, LABEL), (59, 62, LABEL), (64, 67, LABEL), (73, 75, LABEL)]}),
("In summary, our data provide direct evidence that selective stimulation of DRN 5-HT neurons projecting to the VTA was sufficient for reinforcing effect and conditioned place preference.",
{"entities": [(75, 78, LABEL), (79, 83, NT), (110, 113, LABEL), (133, 151, FUNC), (156, 184, FUNC)]}),
("Adiponectin (ADPN) is a plasma protein that belongs to the complement 1q family and it is secreted by adipose tissue [1,2].",
{"entities": []}),
("However, ADPN was found to modulate glucose metabolism in hippocampal neurons, increasing glucose uptake, glycolysis, and ATP production rates [27].",
{"entities": [(27, 54, FUNC), (58, 69, LABEL)]}),
("In both humans and laboratory animals, kappa opioid receptor (KOR) activation produces depressive and anxiety-like effects.",
{"entities": [(39, 51, NT), (87, 122, FUNC)]}),
("The brain stem contains the midbrain, pons, and medulla in the caudal fossa.",
{"entities": [(4, 14, LABEL), (28, 36, LABEL), (38, 42, LABEL), (48, 55, LABEL), (63, 75, LABEL)]}),
("Therefore, we planned to inject retrograde tracer cholera toxin B subunit (CB) into the CSF-contacting nucleus.",
{"entities": [(88, 110, LABEL)]}),
("The generic category of diffuse gliomas (DG) includes two distinct tumor entities: tumors derived from oligodendrocyte precursor cells – OPCs (OligoPDTs) versus those derived from stem cells (SCDTs).",
{"entities": []}),
("In particular, | |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gce classes and methods to manage Compute Engine resources."""
__author__ = '<EMAIL> (<NAME>)'
import logging
import os
import lib_path
from apiclient import discovery
from apiclient import errors as api_errors
from apiclient import http
import httplib2
import oauth2client.client as client
try:
import simplejson as json
except ImportError:
import json
import gce_exception as error
API = 'compute'
GCE_URL = 'https://www.googleapis.com/%s' % API
GOOGLE_PROJECT = 'google'
class GceProject(object):
"""Gce classes and methods to work with Compute Engine.
Attributes:
settings: Dictionary of settings as set in the settings.json file.
gce_url: The string URL of the Compute Engine API endpoint.
project_id: A string name for the Compute Engine project.
zone_name: A string name for the default zone.
service: An apiclient.discovery.Resource object for Compute Engine.
"""
def __init__(
self, credentials, project_id=None, zone_name=None, settings=None):
"""Initializes the GceProject class.
Sets default values for class attributes. See the instance resource for
more information:
https://developers.google.com/compute/docs/reference/v1beta14/instances
Args:
credentials: An oauth2client.client.Credentials object.
project_id: A string name for the Compute Engine project.
zone_name: The string name of the zone.
settings: A dictionary of GCE settings. These settings will override
any settings in the settings.json file. See the settings.json file for
key names.
"""
settings_file = os.path.join(
os.path.dirname(__file__), '../../settings.json')
self.settings = json.loads(open(settings_file, 'r').read())
if settings:
self.settings.update(settings)
self.gce_url = '%s/%s' % (GCE_URL, self.settings['compute']['api_version'])
auth_http = self._auth_http(credentials)
self.service = discovery.build(
API, self.settings['compute']['api_version'], http=auth_http)
self.project_id = project_id
if not self.project_id:
self.project_id = self.settings['project']
self.zone_name = zone_name
if not self.zone_name:
self.zone_name = self.settings['compute']['zone']
def list_instances(self, zone_name=None, **args):
"""Lists all instances for a project and zone with an optional filter.
Args represent any optional parameters for the list instances request.
See the API documentation:
https://developers.google.com/compute/docs/reference/v1beta14/instances/list
Args:
zone_name: The zone in which to query.
Returns:
A list of Instance objects.
"""
return self._list(Instance, zone_name=zone_name, **args)
def list_firewalls(self, **args):
"""Lists all firewalls for a project.
Args represent any optional parameters for the list firewalls request.
See the API documentation:
https://developers.google.com/compute/docs/reference/v1beta14/firewalls/list
Returns:
A list of Firewall objects.
"""
return self._list(Firewall, **args)
def list_images(self, **args):
"""Lists all images for a project.
Args represent any optional parameters for the list images request.
See the API documentation:
https://developers.google.com/compute/docs/reference/v1beta14/images/list
Returns:
A list of Image objects.
"""
return self._list(Image, **args)
def insert(self, resource):
"""Insert a resource into the GCE project.
Args:
resource: A GceResource object.
Raises:
GceError: Raised when API call fails.
GceTokenError: Raised when the access token fails to refresh.
"""
resource.gce_project = self
request = self._insert_request(resource)
try:
self._run_request(request)
except error.GceError:
raise
except error.GceTokenError:
raise
def bulk_insert(self, resources):
"""Insert multiple resources using a batch request.
Args:
resources: A list of GceResource objects.
Raises:
GceError: Raised when API call fails.
GceTokenError: Raised when the access token fails to refresh.
"""
batch = http.BatchHttpRequest()
for resource in resources:
resource.gce_project = self
batch.add(self._insert_request(resource), callback=self._batch_response)
try:
self._run_request(batch)
except error.GceError:
raise
except error.GceTokenError:
raise
def bulk_delete(self, resources):
"""Delete resources using a batch request.
Args:
resources: A list of GceResource objects.
Raises:
GceError: Raised when API call fails.
GceTokenError: Raised when the access token fails to refresh.
"""
batch = http.BatchHttpRequest()
for resource in resources:
resource.gce_project = self
batch.add(self._delete_request(resource), callback=self._batch_response)
try:
self._run_request(batch)
except error.GceError:
raise
except error.GceTokenError:
raise
def _list(self, resource_class, zone_name=None, **args):
"""Get a list of all project resources of type resource_class.
Args:
resource_class: A class of type GceResource.
zone_name: A string zone to apply to the request, if applicable.
Returns:
A list of resource_class objects.
Raises:
GceError: Raised when API call fails.
GceTokenError: Raised when the access token fails to refresh.
"""
resources = []
resource = resource_class()
resource.gce_project = self
request = self._list_request(resource, zone_name=zone_name, **args)
while request:
results = {}
try:
results = self._run_request(request)
except error.GceError:
raise
except error.GceTokenError:
raise
for result in results.get('items', []):
new_resource = resource_class()
new_resource.from_json(result)
resources.append(new_resource)
request = resource.service_resource().list_next(
self._list_request(resource, zone_name=zone_name, **args),
results)
return resources
def _insert_request(self, resource):
"""Construct an insert request for the resource.
Args:
resource: A GceResource object.
Returns:
The insert method of the apiclient.discovery.Resource object.
"""
resource.set_defaults()
params = {'project': self.project_id, 'body': resource.json}
if resource.scope == 'zonal':
params['zone'] = self.zone_name
return resource.service_resource().insert(**params)
def _list_request(self, resource, zone_name=None, **args):
"""Construct an insert request for the resource.
Args:
resource: A GceResource object.
zone_name: The string zone name. Only applicable for zonal resources.
Returns:
The list method of the apiclient.discovery.Resource object.
"""
params = {'project': self.project_id}
if args:
params.update(args)
if resource.scope == 'zonal':
if not zone_name:
zone_name = self.zone_name
params['zone'] = zone_name
return resource.service_resource().list(**params)
def _delete_request(self, resource):
"""Return the delete method of the apiclient.discovery.Resource object.
Args:
resource: A GceResource object.
Returns:
The delete method of the apiclient.discovery.Resource object.
"""
resource.set_defaults()
params = {'project': self.project_id, resource.type: resource.name}
if resource.scope == 'zonal':
params['zone'] = self.zone_name
return resource.service_resource().delete(**params)
def _run_request(self, request):
"""Run API request and handle any errors.
Args:
request: An apiclient.http.HttpRequest object.
Returns:
Dictionary results of the API call.
Raises:
GceError: Raised if API call fails.
GceTokenError: Raised if there's a failure refreshing the access token.
"""
result = {}
try:
result = request.execute()
except httplib2.HttpLib2Error, e:
logging.error(e)
raise error.GceError('Transport Error occurred')
except client.AccessTokenRefreshError, e:
logging.error(e)
raise error.GceTokenError('Access Token refresh error')
except api_errors.BatchError, e:
logging.error(e)
logging.error('BatchError: %s %s' % (e.resp.status, e.content))
if e.resp.status != 200:
raise error.GceError(
'Batch Error: %s %s' % (e.resp.status, e.resp.reason))
except api_errors.HttpError, e:
logging.error(e)
raise error.GceError(
'HttpError: %s %s' % (e.resp.status, e.resp.reason))
return result
def _batch_response(self, request_id, response, exception):
"""Log information about the batch request response.
Args:
request_id: The string request id.
response: A deserialized response object.
exception: An apiclient.errors.HttpError exception object if an error
occurred while processing the request.
"""
if exception is not None:
logging.error(exception)
logging.error('API Request Error! ' + response)
def _auth_http(self, credentials):
"""Authorize an instance of httplib2.Http using credentials.
Args:
credentials: An oauth2client.client.Credentials object.
Returns:
An authorized instance of httplib2.Http.
"""
http = httplib2.Http()
auth_http = credentials.authorize(http)
return auth_http
class GceResource(object):
"""A GCE resource belonging to a GCE project.
Attributes:
type: The string name of the resource type (ex: instance, firewall).
scope: The string name of the scope (ex: zonal, global).
"""
def __init__(self, type, scope):
"""Initializes the GceResource class.
Args:
type: The string name of the resource type (ex: instance, firewall).
scope: The string name of the scope (ex: zonal, global).
"""
self.type = type
self.scope = scope
@property
def url(self):
"""Generate the fully-qualified URL of the resource.
Returns:
The string fully-qualified URL.
"""
project_id = None
if self.type == 'image':
project_id = self.project_id
else:
project_id = self.gce_project.project_id
if self.scope == 'zonal':
return '%s/projects/%s/zones/%s/%ss/%s' % (
self.gce_project.gce_url,
project_id,
self.zone.name,
self.type,
self.name)
if self.scope == 'global':
return '%s/projects/%s/global/%ss/%s' % (
self.gce_project.gce_url,
project_id,
self.type,
self.name)
def set_defaults(self):
"""Set any defaults."""
if not self.name:
if self.type == 'machineType':
self.name = self.gce_project.settings['compute']['machine_type']
else:
self.name = self.gce_project.settings['compute'][self.type]
class Instance(GceResource):
"""A class representing a GCE Instance resource.
Attributes:
name: The string name of the instance.
zone: An object of type Zone representing the instance's zone.
description: A string description of the instance.
tags: A list of string tags for the instance.
image: An object of type Image representing the instance's image.
machine_type: An object of type MachineType representing the instance's
machine type.
network_interfaces: A list of dictionaries representing the instance's
network interfaces.
disks: A list of dictionaries representing the instance's disks.
metadata: A list of dictionaries representing the instance's metadata.
service_accounts: A list of dictionaries representing the instance's
service accounts.
"""
def __init__(self,
name=None,
zone_name=None,
description=None,
tags=None,
image_name=None,
image_project_id=GOOGLE_PROJECT,
machine_type_name=None,
network_interfaces=None,
disks=None,
metadata=None,
service_accounts=None):
"""Initializes the Instance class.
Args:
name: The string name of the instance.
zone_name: The string name of the zone.
description: The string description of | |
"Discarding 3' end of reads"
logger.info(msg)
map_df.loc[m_positive, 'end'] = map_df.loc[m_positive, 'start'] + 1
map_df.loc[m_negative, 'start'] = map_df.loc[m_negative, 'end'] - 1
# now sort everything
msg = "Sorting reads by coordinates"
logger.info(msg)
map_df = map_df.sort_values(['seqname', 'start'])
# and we only want the BED6 fields
map_df = map_df[bed_utils.bed6_field_names]
return map_df
###
# This function smoothes the profiles, frame-by-frame
###
default_fraction = 0.2
default_reweighting_iterations = 0
def smooth_profile(profile, reweighting_iterations=default_reweighting_iterations,
fraction=default_fraction):
""" This function smoothes the given ORF profile using the frame-specific
approach. It assumes the profile is a dense numpy array and that any
filtering due to differences of counts in reading frames, lengths, etc.,
has already been performed.
Please see the statsmodels.api.nonparametric.lowess documentation for
more information about reweighting_iterations and fraction.
Args:
profile (np.array of numbers): an array containing the observed
ORF profile. In principle, this could already be normalized.
reweighting_iterations (int): the number of reweighting iterations
fraction (float): the percentage of the signal to use for smooothing
Returns:
np.array: the smoothed profile
Imports:
statsmodels.api.nonparametric.lowess
"""
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
import numpy as np
smoothed_profile = np.zeros_like(profile)
# split the signal based on frame
x_1 = profile[0::3]
x_2 = profile[1::3]
x_3 = profile[2::3]
exog = np.arange(len(x_1))
# x_1
endog = x_1
smoothed_x_1 = lowess(endog, exog, is_sorted=True, return_sorted=False,
it=reweighting_iterations, frac=fraction)
# x_2
endog = x_2
smoothed_x_2 = lowess(endog, exog, is_sorted=True, return_sorted=False,
it=reweighting_iterations, frac=fraction)
# x_3
endog = x_3
smoothed_x_3 = lowess(endog, exog, is_sorted=True, return_sorted=False,
it=reweighting_iterations, frac=fraction)
smoothed_profile[0::3] = smoothed_x_1
smoothed_profile[1::3] = smoothed_x_2
smoothed_profile[2::3] = smoothed_x_3
return smoothed_profile
###
#
# This function extracts all ORFs which count as "translated", according to the
# values in the config file.
#
###
default_min_profile = None
default_min_bf_mean = 5
default_max_bf_var = None
default_min_bf_likelihood = None
default_min_length = 0
default_chisq_alpha = 0.01
def get_base_filter(bf, min_profile=default_min_profile, min_length=default_min_length):
""" This function extracts the ORFs from the BF dataframe which meet the
minimum requirements to be considered for prediction. Namely, these
requirements are:
* The minimum sum across all reading frames exceeds the specified minimum
* The length exceeds the specified minimum length
* The number of reads in the first reading frame exceeds the number in
either of the other two reading frames (though not necessarily the
other two reading frames combined)
Args:
bf (pd.DataFrame): a data frame containing the relevant ORF information
min_signal (int) : the minimum sum across all reading frames to consider
an ORF as translated
min_length (int) : the minimum length of ORF to consider
Returns:
boolean mask: a mask of the input data frame indicating all ORFs which
meet the filtering criteria
"""
if min_profile is None:
m_profile = bf['profile_sum'] > 0
else:
m_profile = bf['profile_sum'] > min_profile
m_length = bf['orf_len'] > min_length
m_x1_gt_x2 = bf['x_1_sum'] > bf['x_2_sum']
m_x1_gt_x3 = bf['x_1_sum'] > bf['x_3_sum']
m_base = m_profile & m_length & m_x1_gt_x2 & m_x1_gt_x3
return m_base
def get_bf_filter(bf, min_bf_mean=default_min_bf_mean,
max_bf_var=default_max_bf_var,
min_bf_likelihood=default_min_bf_likelihood):
""" This function applies filters to the Bayes factor estimates to find all
ORFs which should be predicted as translated. This does not consider the
length and profile sums, so this filter would need to be combined with
the get_base_filter filter to find the true set of predicted ORFs.
Args:
bf (pd.DataFrame) : a data frame containing the relevant ORF information
min_bf_mean (float) : if max_bf_var is not None, then this is taken
as a hard threshold on the estimated Bayes factor mean. If
min_bf_likelihood is given, then this is taken as the boundary
value; that is, an ORF is "translated" if:
[P(bf > min_bf_mean)] > min_bf_likelihood
If both max_bf_var and min_bf_likelihood are None, then this is
taken as a hard threshold on the mean for selecting translated ORFs.
If both max_bf_var and min_bf_likelihood are given, then both
filters will be applied and the result will be the intersection.
max_bf_var (float) : if given, then this is taken as a hard threshold
on the estimated Bayes factor variance
min_bf_likelihood (float) : if given, then this is taken a threshold
on the likelihood of translation (see min_bf_mean description
for more details)
Returns:
boolean mask: a mask of the input data frame indicating all ORFs which
meet the filtering criteria
Imports:
numpy
scipy.stats
"""
import numpy as np
import scipy.stats
# which bf mean/variance filters do we use?
m_bf_mean = True
m_bf_var = True
m_bf_likelihood = True
if max_bf_var is not None:
m_bf_mean = bf['bayes_factor_mean'] > min_bf_mean
m_bf_var = bf['bayes_factor_var'] < max_bf_var
if min_bf_likelihood is not None:
# first, calculate the likelihood that the true BF is greater than m_bf_mean
# the likelihood that BF>min_mean is 1-cdf(estimated_mean, estimated_var)
# scipy parameterizes the normal using the std, so use sqrt(var)
loc = bf['bayes_factor_mean']
scale = np.sqrt(bf['bayes_factor_var'])
likelihood = 1-scipy.stats.norm.cdf(min_bf_mean, loc, scale)
nans = np.isnan(likelihood)
num_nans = sum(nans)
num_predictions = len(likelihood)
msg = "Num nans: {}, num predictions: {}".format(num_nans, num_predictions)
logger.debug(msg)
if num_nans != num_predictions:
max_likelihood = max(likelihood[~nans])
msg = "Maximum likelihood: {}".format(max_likelihood)
logger.debug(msg)
# now filter
m_bf_likelihood = likelihood > min_bf_likelihood
if (max_bf_var is None) and (min_bf_likelihood is None):
m_bf_mean = bf['bayes_factor_mean'] > min_bf_mean
return m_bf_mean & m_bf_var & m_bf_likelihood
def get_predicted_orfs(bf, min_signal=default_min_profile,
min_length=default_min_length,
min_bf_mean=default_min_bf_mean,
max_bf_var=default_max_bf_var,
min_bf_likelihood=default_min_bf_likelihood,
chisq_alpha=default_chisq_alpha,
select_longest_by_stop=True,
use_chi_square=False):
""" This function applies a set of filters to ORFs to select those which
are predicted as "translated." This function selects translated ORFs
based on the Bayes factor estimates or the chi-square p-values. ORFs
must pass all of the relevant features to be selected as "translated."
Optionally, among all ORFs which share a stop codon, only the longest
"translated" ORF is selected.
Furthermore, for both BF and chi-square predictions, only ORFs which
have more reads in the first reading frame than either of the other two
will be selected as translated. (This is called the 'frame filter'
below.)
Args:
bf (pd.DataFrame) : a data frame containing the relevant ORF information
min_signal (int) : the minimum sum across all reading frames to consider
an ORF as translated
min_length (int) : the minimum length of ORF to consider
min_bf_mean (float) : if max_bf_var is not None, then this is taken
as a hard threshold on the estimated Bayes factor mean. If
min_bf_likelihood is given, then this is taken as the boundary
value; that is, an ORF is "translated" if:
[P(bf > min_bf_mean)] > min_bf_likelihood
If both max_bf_var and min_bf_likelihood are None, then this is
taken as a hard threshold on the mean for selecting translated ORFs.
If both max_bf_var and min_bf_likelihood are given, then both
filters will be applied and the result will be the intersection.
max_bf_var (float) : if given, then this is taken as a hard threshold
on the estimated Bayes factor variance
min_bf_likelihood (float) : if given, then this is taken a threshold
on the likelihood of translation (see min_bf_mean description
for more details)
chisq_alpha (float) : the significance value for selecting translated
ORFs according to the chi-square test. This value is
Bonferroni-corrected based on the number of ORFs which meet the
length, profile and frame filters.
select_longest_by_stop (bool): if True, then the selected ORFs will
be merged based on stop codons: only the longest translated ORF
at each stop codon will be returned. Otherwise, all ORFs will
be returned.
use_chi_square (bool): if True, then the selection is made based on
the chi-square p-values only (Rp-chi), otherwise it is based on the Bayes
factor estimates (Rp-Bp).
Returns:
all_orfs (pd.DataFrame) : all (longest) ORFs which meet the profile,
length, frame filters
predicted_orfs (pd.DataFrame) : all (longest) ORFs which meet the
profile, length, frame Bayes factor (min_bf_mean, max_bf_var, min_bf_likelihood)
or chisq_alpha filters
Imports:
bio_utils.bio
numpy
scipy.stats
"""
import bio_utils.bio as bio
import bio_utils.bed_utils as bed_utils
import numpy as np
import scipy.stats
msg = "Finding all ORFs with signal"
logger.info(msg)
m_base = get_base_filter(bf, min_signal, min_length)
all_orfs = bf[m_base]
# create the selected ORFs based on either Bayes factor or chisq_alpha
if use_chi_square:
M = | |
o00oOOO0Ooo , True )
if 88 - 88: OoO0O00 . I1Ii111 / I11i
IiiI1iii1iIiiI += o00oOOO0Ooo + " since " + IiiIiiIIII
if 47 - 47: OoO0O00 + I1ii11iIi11i . ooOoO0o
if ( lisp . lisp_rloc_probing ) :
IiiIiIIi1 = II1iiiiI1 . print_rloc_probe_rtt ( )
if ( IiiIiIIi1 != "none" ) : IiiI1iii1iIiiI += "<br>rtt: {}, hops: {}" . format ( IiiIiIIi1 , II1iiiiI1 . print_rloc_probe_hops ( ) )
if 40 - 40: iII111i . OoOoOO00 * O0
if 6 - 6: I1IiiI - II111iiii . I1IiiI + I11i . OOooOOo
if 74 - 74: i1IIi
if ( lisp . lisp_rloc_probing and II1iiiiI1 . rloc_next_hop != None ) :
i111iiIIII , Ii11ii1 = II1iiiiI1 . rloc_next_hop
IiiI1iii1iIiiI += "<br>{}nh {}({}) " . format ( lisp . lisp_space ( 2 ) , Ii11ii1 , i111iiIIII )
if 1 - 1: iIii1I11I1II1 % oO0o . iIii1I11I1II1
if 10 - 10: iII111i + OoO0O00
II1iiiiI1 = II1iiiiI1 . next_rloc
if ( II1iiiiI1 == None ) : break
IiiI1iii1iIiiI += "<br>"
if 6 - 6: OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if ( OOOOooO0 == "encapsulate" ) :
Ii1iiIi1I11i = lisp . LISP_DATA_PORT
if ( lisp . lisp_i_am_rtr and Oo0o0ooOoO . translated_port != 0 ) :
Ii1iiIi1I11i = Oo0o0ooOoO . translated_port
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if 50 - 50: OOooOOo + OoO0O00 . i11iIiiIii + I1ii11iIi11i + i11iIiiIii
Ii1I1i = Oo0o0ooOoO . rloc . print_address_no_iid ( ) + ":" + str ( Ii1iiIi1I11i )
if ( lisp . lisp_crypto_keys_by_rloc_encap . has_key ( Ii1I1i ) ) :
oOOOooOo0O = lisp . lisp_crypto_keys_by_rloc_encap [ Ii1I1i ] [ 1 ]
if ( oOOOooOo0O != None and oOOOooOo0O . shared_key != None ) :
OOOOooO0 = "encap-crypto-" + oOOOooOo0O . cipher_suite_string
if 31 - 31: oO0o * I1Ii111 . OoOoOO00 * I11i
if 28 - 28: IiII + I1IiiI - Oo0Ooo % OOooOOo . I11i + I1IiiI
if 72 - 72: Ii1I / Oo0Ooo / oO0o * OoOoOO00 + OOooOOo
if 58 - 58: o0oOOo0O0Ooo % I1IiiI . I1IiiI * OoO0O00 - IiII . OoooooooOO
output += lisp_table_row ( OOOO0oo0 , Oo000o + "<br>" + i1Iii , O0oo0ooo0 , i11iI11I1I ,
ii1111I , IiiI1iii1iIiiI + "<br>" + OOOOooO0 ,
str ( Oo0o0ooOoO . priority ) + "/" + str ( Oo0o0ooOoO . weight ) + "<br>" + str ( Oo0o0ooOoO . mpriority ) + "/" + str ( Oo0o0ooOoO . mweight ) )
if 10 - 10: I1Ii111
if 48 - 48: iII111i * i1IIi % OoooooooOO * Ii1I * OoO0O00
if ( OOOO0oo0 != "" ) : OOOO0oo0 = ""
if ( Oo000o != "" ) : Oo000o , i1Iii , i11iI11I1I = ( "" , "" , "" )
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
return ( [ True , output ] )
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
if 79 - 79: i1IIi . oO0o
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
def lisp_walk_map_cache ( mc , output ) :
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
if ( mc . group . is_null ( ) ) : return ( lisp_display_map_cache ( mc , output ) )
if 28 - 28: i11iIiiIii
if ( mc . source_cache == None ) : return ( [ True , output ] )
if 51 - 51: I1IiiI + ooOoO0o * O0 . Ii1I
if 82 - 82: OOooOOo * I1ii11iIi11i % Ii1I . OOooOOo
if 43 - 43: OoO0O00 . ooOoO0o * Oo0Ooo
if 20 - 20: i1IIi . i1IIi - I11i
if 89 - 89: ooOoO0o - I11i . O0 % OoooooooOO . i11iIiiIii
output = mc . source_cache . walk_cache ( lisp_display_map_cache , output )
return ( [ True , output ] )
if 35 - 35: II111iiii / OoOoOO00 - O0 . II111iiii
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
def lisp_show_myrlocs ( output ) :
if ( lisp . lisp_myrlocs [ 2 ] == None ) :
output += "No local RLOCs found"
else :
iIIiI = lisp . lisp_print_cour ( lisp . lisp_myrlocs [ 2 ] )
O0O0O0OO00oo = lisp . lisp_myrlocs [ 0 ] . print_address_no_iid ( ) if lisp . lisp_myrlocs [ 0 ] != None else "not found"
if 39 - 39: IiII % OoOoOO00 * I1ii11iIi11i - OoooooooOO - Oo0Ooo
O0O0O0OO00oo = lisp . lisp_print_cour ( O0O0O0OO00oo )
Oo0oOOO = "-f inet" if lisp . lisp_is_macos ( ) else "-4"
o00OoOooo = commands . getoutput ( "netstat -rn {}" . format ( Oo0oOOO ) )
O0O0O0OO00oo = lisp . lisp_span ( O0O0O0OO00oo , o00OoOooo )
if 47 - 47: ooOoO0o + iII111i + i1IIi
IIiii = lisp . lisp_myrlocs [ 1 ] . print_address_no_iid ( ) if lisp . lisp_myrlocs [ 1 ] != None else "not found"
if 77 - 77: iIii1I11I1II1 * oO0o
IIiii = lisp . lisp_print_cour ( IIiii )
Oo0oOOO = "-f inet6" if lisp . lisp_is_macos ( ) else "-6"
o00OoOooo = commands . getoutput ( "netstat -rn {}" . format ( Oo0oOOO ) )
IIiii = lisp . lisp_span ( IIiii , o00OoOooo )
if 15 - 15: iIii1I11I1II1 . OOooOOo . I1ii11iIi11i * i11iIiiIii
oOO = "<i>Local RLOCs found on interface </i>{}<i>, " + "IPv4: </i>{}<i>, IPv6: </i>{}"
if 72 - 72: I11i
output += lisp . lisp_print_sans ( oOO ) . format ( iIIiI , O0O0O0OO00oo , IIiii )
if 26 - 26: IiII % Oo0Ooo
output += "<br>"
return ( output )
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
if 5 - 5: i11iIiiIii * iII111i - Ii1I - I1ii11iIi11i - i1IIi + iII111i
if 4 - 4: ooOoO0o + O0 . i1IIi * I1ii11iIi11i - o0oOOo0O0Ooo
if 42 - 42: o0oOOo0O0Ooo * OoOoOO00 . OoO0O00 - iII111i / II111iiii
if 25 - 25: Oo0Ooo % OoOoOO00
def lisp_display_nat_info ( output , dc , dodns ) :
o00O = len ( lisp . lisp_nat_state_info )
if ( o00O == 0 ) : return ( output )
if 36 - 36: OOooOOo * OoO0O00 - I1ii11iIi11i + iII111i
I1I1i1 = "{} entries in the NAT-traversal port table" . format ( o00O )
IIIiiiiiI1I = lisp . lisp_span ( "NAT-Traversed xTR Information:" , I1I1i1 )
if 64 - 64: IiII * iIii1I11I1II1 . I1ii11iIi11i / I11i * iIii1I11I1II1
if ( dodns ) :
output += lisp_table_header ( IIIiiiiiI1I , "xTR Hostname" ,
"Translated<br>Address" , "Translated<br>{} Port" . format ( dc ) ,
"Last<br>Info-Request" , "NAT DNS Name" )
else :
output += lisp_table_header ( IIIiiiiiI1I , "xTR Hostname" ,
"Translated<br>Address" , "Translated<br>{} Port" . format ( dc ) ,
"Last<br>Info-Request" )
if 4 - 4: ooOoO0o % IiII . I1Ii111
if 91 - 91: I1ii11iIi11i + iIii1I11I1II1 % IiII
for O0o0OOOO0 | |
# mysql/reflection.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .enumerated import _EnumeratedValues
from .enumerated import SET
from .types import DATETIME
from .types import TIME
from .types import TIMESTAMP
from ... import log
from ... import types as sqltypes
from ... import util
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.fk_constraints = []
self.ck_constraints = []
@log.class_logger
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r"\r?\n", show_create):
if line.startswith(" " + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(") "):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ")":
pass
elif line.startswith("CREATE "):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if
# loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == "key":
state.keys.append(spec)
elif type_ == "fk_constraint":
state.fk_constraints.append(spec)
elif type_ == "ck_constraint":
state.ck_constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
# NOTE: we may want to consider SHOW INDEX as the
# format of indexes in MySQL becomes more complex
spec["columns"] = self._parse_keyexprs(spec["columns"])
if spec["version_sql"]:
m2 = self._re_key_version_sql.match(spec["version_sql"])
if m2 and m2.groupdict()["parser"]:
spec["parser"] = m2.groupdict()["parser"]
if spec["parser"]:
spec["parser"] = self.preparer.unformat_identifiers(
spec["parser"]
)[0]
return "key", spec
# FOREIGN KEY CONSTRAINT
m = self._re_fk_constraint.match(line)
if m:
spec = m.groupdict()
spec["table"] = self.preparer.unformat_identifiers(spec["table"])
spec["local"] = [c[0] for c in self._parse_keyexprs(spec["local"])]
spec["foreign"] = [
c[0] for c in self._parse_keyexprs(spec["foreign"])
]
return "fk_constraint", spec
# CHECK constraint
m = self._re_ck_constraint.match(line)
if m:
spec = m.groupdict()
return "ck_constraint", spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return "partition", line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group("name"))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ")":
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group("directive"), m.group("val")
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub("", rest_of_line)
for nope in ("auto_increment", "data directory", "index directory"):
options.pop(nope, None)
for opt, val in options.items():
state.table_options["%s_%s" % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec["full"] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec["full"] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec["full"]:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args = spec["name"], spec["coltype"], spec["arg"]
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn(
"Did not recognize type '%s' of column '%s'" % (type_, name)
)
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == "":
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
if type_args:
type_kw["fsp"] = type_args.pop(0)
for kw in ("unsigned", "zerofill"):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ("charset", "collate"):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
if issubclass(col_type, SET) and "" in type_args:
type_kw["retrieve_as_bitwise"] = True
type_instance = col_type(*type_args, **type_kw)
col_kw = {}
# NOT NULL
col_kw["nullable"] = True
# this can be "NULL" in the case of TIMESTAMP
if spec.get("notnull", False) == "NOT NULL":
col_kw["nullable"] = False
# AUTO_INCREMENT
if spec.get("autoincr", False):
col_kw["autoincrement"] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw["autoincrement"] = False
# DEFAULT
default = spec.get("default", None)
if default == "NULL":
# eliminates the need to deal with this later.
default = None
comment = spec.get("comment", None)
if comment is not None:
comment = comment.replace("\\\\", "\\").replace("''", "'")
sqltext = spec.get("generated")
if sqltext is not None:
computed = dict(sqltext=sqltext)
persisted = spec.get("persistence")
if persisted is not None:
computed["persisted"] = persisted == "STORED"
col_kw["computed"] = computed
col_d = dict(
name=name, type=type_instance, default=default, comment=comment
)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = [
row[i] for i in (0, 1, 2, 4, 5)
]
line = [" "]
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append("NOT NULL")
if default:
if "auto_increment" in default:
pass
elif col_type.startswith("timestamp") and default.startswith(
"C"
):
line.append("DEFAULT")
line.append(default)
elif default == "NULL":
line.append("DEFAULT")
line.append(default)
else:
line.append("DEFAULT")
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(" ".join(line))
return "".join(
[
(
"CREATE TABLE %s (\n"
% self.preparer.quote_identifier(table_name)
),
",\n".join(buffer),
"\n) ",
]
)
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(
zip(
("iq", "fq", "esc_fq"),
[
re.escape(s)
for s in (
self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final),
)
],
)
)
self._pr_name = _pr_compile(
r"^CREATE (?:\w+ +)?TABLE +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($" % quotes,
self.preparer._unescape_identifier,
)
# `col`,`col2`(32),`col3`(15) DESC
#
self._re_keyexprs = _re_compile(
r"(?:"
r"(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)"
r"(?:\((\d+)\))?(?: +(ASC|DESC))?(?=\,|$))+" % quotes
)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r"\x27(?:\x27\x27|[^\x27])*\x27")
# 123 or 123,456
self._re_csv_int = _re_compile(r"\d+")
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r" "
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"(?P<coltype>\w+)"
r"(?:\((?P<arg>(?:\d+|\d+,\d+|"
r"(?:'(?:''|[^'])*',?)+))\))?"
r"(?: +(?P<unsigned>UNSIGNED))?"
r"(?: +(?P<zerofill>ZEROFILL))?"
r"(?: +CHARACTER SET +(?P<charset>[\w_]+))?"
r"(?: +COLLATE +(?P<collate>[\w_]+))?"
r"(?: +(?P<notnull>(?:NOT )?NULL))?"
r"(?: +DEFAULT +(?P<default>"
r"(?:NULL|'(?:''|[^'])*'|[\-\w\.\(\)]+"
r"(?: +ON UPDATE [\-\w\.\(\)]+)?)"
r"))?"
r"(?: +(?:GENERATED ALWAYS)? ?AS +(?P<generated>\("
r".*\))? ?(?P<persistence>VIRTUAL|STORED)?)?"
r"(?: +(?P<autoincr>AUTO_INCREMENT))?"
r"(?: +COMMENT +'(?P<comment>(?:''|[^'])*)')?"
r"(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?"
r"(?: +STORAGE +(?P<storage>\w+))?"
r"(?: +(?P<extra>.*))?"
r",?$" % quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r" "
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"(?P<coltype>\w+)"
r"(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?"
r".*?(?P<notnull>(?:NOT )NULL)?" % quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name /*!50100 WITH PARSER name */
self._re_key = _re_compile(
r" "
r"(?:(?P<type>\S+) )?KEY"
r"(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?"
r"(?: +USING +(?P<using_pre>\S+))?"
r" +\((?P<columns>.+?)\)"
r"(?: +USING +(?P<using_post>\S+))?"
r"(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?"
r"(?: +WITH PARSER +(?P<parser>\S+))?"
r"(?: +COMMENT +(?P<comment>(\x27\x27|\x27([^\x27])*?\x27)+))?"
r"(?: +/\*(?P<version_sql>.+)\*/ +)?"
r",?$" % quotes
)
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
# It means if the MySQL version >= \d+, execute what's in the comment
self._re_key_version_sql = _re_compile(
r"\!\d+ " r"(?: *WITH PARSER +(?P<parser>\S+) *)?"
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw["on"] = "RESTRICT|CASCADE|SET NULL|NOACTION"
self._re_fk_constraint = _re_compile(
r" "
r"CONSTRAINT +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"FOREIGN KEY +"
r"\((?P<local>[^\)]+?)\) REFERENCES +"
r"(?P<table>%(iq)s[^%(fq)s]+%(fq)s"
r"(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +"
r"\((?P<foreign>[^\)]+?)\)"
r"(?: +(?P<match>MATCH \w+))?"
r"(?: +ON DELETE (?P<ondelete>%(on)s))?"
| |
<gh_stars>0
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Text, Union, Optional, Sequence, Tuple
from tensornetwork.tensor import Tensor
from tensornetwork import ncon_interface
def _check_backends(tensors: Sequence[Tensor], fname: str) -> Tuple[bool, str]:
""" Checks that each of tensors has the same backend, returning True and an
empty string if so, or False and an error string if not.
Args:
tensors: The list of tensors whose backends to check.
fname: The name of the calling function, which will go into the errstring.
Returns:
(flag, errstr): Whether all backends agree, and an error message if not.
"""
backend_names = [tensor.backend.name for tensor in tensors]
backends_check = [backend_names[0] == name for name in backend_names[1:]]
all_backends_same = all(backends_check)
errstr = ""
if not all_backends_same:
errstr = "All Tensors fed to " + fname + "must have the same backend."
errstr += "Backends were: \n"
errstr += str([name + "\n" for name in backend_names])
return all_backends_same, errstr
def tensordot(a: Tensor, b: Tensor,
axes: Union[int, Sequence[Sequence[int]]]) -> Tensor:
"""Do a tensordot (contraction) of Tensors `a` and `b` over the given axes.
The behaviour of this function largely matches that of np.tensordot.
Args:
a: A Tensor.
b: Another Tensor.
axes: Two lists of integers. These values are the contraction
axes. A single integer may also be supplied, in which case both
tensors are contracted over this axis.
Raises:
ValueError, if a and b have different backends.
Returns:
The result of the tensordot, a Tensor.
"""
if a.backend.name != b.backend.name:
errstr = "Tried to Tensordot Tensors with differing backends \n"
errstr += a.backend.name + "and " + b.backend.name + "."
raise ValueError(errstr)
out_array = a.backend.tensordot(a.array, b.array, axes)
out_tensor = Tensor(out_array, backend=a.backend)
return out_tensor
def reshape(tensor: Tensor, new_shape: Sequence[int]) -> Tensor:
"""Reshape Tensor to the given shape.
Args:
tensor: Tensor to reshape.
new_shape: The new shape.
Returns:
The reshaped Tensor.
"""
return tensor.reshape(new_shape)
def transpose(tensor: Tensor, perm: Optional[Sequence[int]] = None) -> Tensor:
""" Return a new `Tensor` transposed according to the permutation set
by `axes`. By default the axes are reversed.
Args:
axes: The permutation. If None (default) the index order is reversed.
Returns:
The transposed `Tensor`.
"""
return tensor.transpose(perm=perm)
def take_slice(tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
"""Obtains a slice of a Tensor based on start_indices and slice_sizes.
Args:
Tensor: A Tensor.
start_indices: Tuple of integers denoting start indices of slice.
slice_sizes: Tuple of integers denoting size of slice along each axis.
Returns:
The slice, a Tensor.
"""
sliced = tensor.backend.slice(tensor.array, start_indices, slice_sizes)
sliced_tensor = Tensor(sliced, backend=tensor.backend)
return sliced_tensor
def shape(tensor: Tensor) -> Tuple[int, ...]:
"""Get the shape of a Tensor as a tuple of integers.
Args:
Tensor: A Tensor.
Returns:
The shape of the input Tensor.
"""
return tensor.shape
def sqrt(tensor: Tensor) -> Tensor:
"""Take the square root (element wise) of a given Tensor."""
out_array = tensor.backend.sqrt(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def outer(tensor1: Tensor, tensor2: Tensor) -> Tensor:
"""Calculate the outer product of the two given Tensors."""
tensors = [tensor1, tensor2]
all_backends_same, errstr = _check_backends(tensors, "outer")
if not all_backends_same:
raise ValueError(errstr)
out_data = tensor1.backend.outer_product(tensor1.array, tensor2.array)
return Tensor(out_data, backend=tensor1.backend)
def einsum(expression: Text, *tensors: Tensor, optimize: bool) -> Tensor:
"""Calculate sum of products of Tensors according to expression."""
all_backends_same, errstr = _check_backends(tensors, "einsum")
if not all_backends_same:
raise ValueError(errstr)
backend = tensors[0].backend
arrays = [tensor.array for tensor in tensors]
result_data = backend.einsum(expression, *arrays, optimize=optimize)
return Tensor(result_data, backend=backend)
def conj(tensor: Tensor) -> Tensor:
"""
Return the complex conjugate of `Tensor`
Args:
Tensor: A Tensor.
Returns:
The complex conjugated Tensor.
"""
return tensor.conj()
def hconj(tensor: Tensor, perm: Optional[Sequence[int]] = None) -> Tensor:
""" The Hermitian conjugated tensor; e.g. the complex conjugate tranposed
by the permutation set be `axes`. By default the axes are reversed.
Args:
tensor: The Tensor to conjugate.
axes: The permutation. If None (default) the index order is reversed.
Returns:
The Hermitian conjugated `Tensor`.
"""
return tensor.hconj(perm=perm)
def sin(tensor: Tensor) -> Tensor:
"""
Return sin of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.sin(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def cos(tensor: Tensor) -> Tensor:
"""
Return cos of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.cos(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def exp(tensor: Tensor) -> Tensor:
"""
Return elementwise exp of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.exp(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def log(tensor: Tensor) -> Tensor:
"""
Return elementwise natural logarithm of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.log(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def diagonal(tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""
Extracts the offset'th diagonal from the matrix slice of tensor indexed
by (axis1, axis2).
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out : A 1D Tensor storing the elements of the selected diagonal.
"""
backend = tensor.backend
result = backend.diagonal(tensor.array, offset=offset, axis1=axis1,
axis2=axis2)
return Tensor(result, backend=backend)
def diagflat(tensor: Tensor, k: int = 0) -> Tensor:
"""
Flattens tensor and places its elements at the k'th diagonal of a new
(tensor.size + k, tensor.size + k) `Tensor` of zeros.
Args:
tensor: A Tensor.
k : The elements of tensor will be stored at this diagonal.
Returns:
out : A (tensor.size + k, tensor.size + k) `Tensor` with the elements
of tensor on its kth diagonal.
"""
backend = tensor.backend
result = backend.diagflat(tensor.array, k=k)
return Tensor(result, backend=backend)
def trace(tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Calculate the sum along diagonal entries of the given Tensor. The
entries of the offset`th diagonal of the matrix slice of tensor indexed by
(axis1, axis2) are summed.
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out: The trace.
"""
backend = tensor.backend
result = backend.trace(tensor.array, offset=offset, axis1=axis1,
axis2=axis2)
return Tensor(result, backend=backend)
def ncon(
tensors: Sequence[Tensor],
network_structure: Sequence[Sequence[Union[str, int]]],
con_order: Optional[Sequence] = None,
out_order: Optional[Sequence] = None,
check_network: bool = True,
) -> Tensor:
r"""Contracts a list of tn.Tensor according to a tensor network
specification.
The network is provided as a list of lists, one for each
tensor, specifying the labels for the edges connected to that tensor.
Labels can be any numbers or strings. Negative number-type labels
and string-type labels with a prepended hyphen ('-') are open labels
and remain uncontracted.
Positive number-type labels and string-type labels with no prepended
hyphen ('-') are closed labels and are contracted.
Any open label appearing more than once is treated as an open
batch label. Any closed label appearing more than once is treated as
a closed batch label.
Upon finishing the contraction, all open batch labels will have been
collapsed into a single dimension, and all closed batch labels will
have been summed over.
If `out_order = None`, output labels are ordered according to descending
number ordering and ascending ASCII ordering, with number labels always
appearing before string labels. Example:
network_structure = [[-1, 1, '-rick', '2',-2], [-2, '2', 1, '-morty']]
results in an output order of [-1, -2, '-morty', '-rick'].
If `out_order` is given, the indices of the resulting tensor will be
transposed into this order.
If `con_order = None`, `ncon` will first contract all number labels
in ascending order followed by all string labels in ascending ASCII
order.
If `con_order` is given, `ncon` will contract according to this order.
For example, matrix multiplication:
.. code-block:: python
A = np.array([[1.0, 2.0], | |
"""
import socket
import time
result = open_url(url, 0, timeout)
if result.status == InstallationResult.SUCCESS:
rf = result.result
else:
return result
metadata = rf.info()
rf_size = int(metadata.getheaders("Content-Length")[0])
dl_size = 0
block = 16384
x = 0
y = 0
pb = ProgressBar(x, y, rf_size, numeric=True)
for attempt in range(1,6):
# Attempt download 5 times before giving up
pause = timeout
try:
try:
lf = open(localf, 'ab')
except:
return InstallationResult(False, InstallationResult.ERROR, "Failed to create temporary file.")
while True:
buf = rf.read(block)
if not buf:
break
dl_size += len(buf)
lf.write(buf)
pb.update(dl_size)
lf.close()
except (IOError, socket.timeout), err:
MsgUser.debug(err.strerror)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
pause = 0
if dl_size != rf_size:
time.sleep(pause)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
result = open_url(url, dl_size, timeout)
if result.status == InstallationResult.ERROR:
MsgUser.debug(result.message)
else:
rf = result.result
else:
break
if dl_size != rf_size:
return InstallationResult(False, InstallationResult.ERROR, "Failed to download file.")
return InstallationResult(True, InstallationResult.SUCCESS, '')
# ======================================================================================================================
# create_folder: create folder (check if exists before creating it)
# output: 0 -> folder created
# 1 -> folder already exist
# 2 -> permission denied
# ======================================================================================================================
def create_folder(folder):
if not os.path.exists(folder):
try:
os.makedirs(folder)
return 0
except OSError, e:
if e.errno != errno.EEXIST:
return 2
else:
return 1
def run(cmd, verbose=1):
if verbose:
# print cmd
print(bcolors.blue+cmd+bcolors.normal)
process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_final = ''
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
if verbose == 2:
print output.strip()
output_final += output.strip()+'\n'
# need to remove the last \n character in the output -> return output_final[0:-1]
# if process.returncode:
# # from inspect import stack
# print output_final[0:-1]
# else:
return process.returncode, output_final[0:-1]
def edit_profile_files(path_home, SCT_DIR):
# Files are listed in inverse order of reading when shell starts
file_profile = ['.bashrc', '.profile', '.bash_login', '.bash_profile', ]
file_profile_default = ''
# TODO: deal with TSCH and CSH
# edit_profile_files()
# loop across profile files
print "Delete previous SCT entries in existing profile files..."
for i_file in file_profile:
# delete previous SCT entries
if not os.path.isfile(path_home+i_file):
print '.. ' + i_file + ': Not found.'
else:
print '.. ' + i_file + ': Found! Deleting previous SCT entries...'
# update default_file_profile
file_profile_default = i_file
if "SPINALCORDTOOLBOX" in open(path_home+i_file).read():
cmd = "awk '!/SCT_DIR|SPINALCORDTOOLBOX|ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS/' ~/.bashrc > .bashrc_temp && > ~/.bashrc && cat .bashrc_temp >> ~/.bashrc && rm .bashrc_temp"
status, output = run(cmd)
if status != 0:
print '\nERROR: \n' + output + '\nExit program.\n'
sys.exit()
print "Add entries to .bashrc..."
with open(path_home+".bashrc", "a") as bashrc:
bashrc.write("\n# SPINALCORDTOOLBOX (added on " + str(date.today()) + ")")
bashrc.write("\nSCT_DIR=\"" + SCT_DIR + "\"")
bashrc.write("\nexport PATH=${PATH}:$SCT_DIR/bin")
bashrc.write("\nexport PYTHONPATH=${PYTHONPATH}:$SCT_DIR/scripts")
bashrc.write("\nexport SCT_DIR PATH")
from multiprocessing import cpu_count
number_of_cpu = cpu_count()
bashrc.write("\nexport ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS="+str(number_of_cpu))
bashrc.close()
# Because python script cannot source bashrc or bash_profile, it is necessary to modify environment in the
# current instance of bash
os.environ['SCT_DIR'] = SCT_DIR
os.environ['PATH'] = os.environ['PATH']+":"+SCT_DIR+"/bin"
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = os.environ['PYTHONPATH']+":"+SCT_DIR+"/scripts"
else:
os.environ['PYTHONPATH'] = SCT_DIR+"/scripts"
# Check if no profile file other than .bashrc exist
print "Check if no profile file other than .bashrc exist..."
if file_profile_default == '' or file_profile_default == '.bashrc':
print '.. WARNING: No default profile file found: .bash_profile will be created...'
file_profile_default = '.bash_profile'
else:
print '.. OK: Default profile file is:' + file_profile_default
# Check if .bashrc is sourced in default profile file
print "Check if .bashrc is sourced in default profile..."
if "source ~/.bashrc" in open(path_home+file_profile_default).read():
print ".. .bashrc seems to be sourced already"
# TODO: check for the case if the user did comment source ~/.bashrc in his .bash_profile
else:
print ".. .bashrc is NOT sourced. Appending to "+file_profile_default+" ..."
with open(path_home+file_profile_default, "a") as bashprofile:
bashprofile.write("\nif [ -f ~/.bashrc ]; then")
bashprofile.write("\n source ~/.bashrc")
bashprofile.write("\nfi")
bashprofile.close()
# launch .bashrc. This line doesn't always work. Best way is to open a new terminal.
print "Source .bashrc:"
cmd = ". ~/.bashrc"
status, output = run(cmd) # run does not seems to work on Travis when sourcing .bashrc
# status, output = commands.getstatusoutput(cmd)
if status != 0:
print '\nERROR! \n' + output + '\nExit program.\n'
sys.exit()
class Installer:
def __init__(self):
"""
Path by default is /usr/local/sct + version of SCT. Exemple: /usr/local/sct2.2
The installation is not possible with admin rights because the location of .bashrc and .bash_profile are
not the same when being admin (sudoer) and non-admin. Therefore, the installation needs to be done without
using "sudo", but at some point in the installation process, admin permissions may be needed, for exemple when
installing SCT in /usr/local/ folder.
If SCT is already installed, we do not want to remove it. Therefore, the installation is stopped and the user
is asked to fix the issue by, for exemple, removing or renaming the old version of SCT.
When the user provides the installation folder (using -p option), a folder called "sct" is created and SCT is
installed in it. If the folder already exists, the installation is stopped and the user is asked to empty the
folder.
"""
self.issudo = ""
# check if user is sudoer
if os.geteuid() == 0:
print "Sorry, you are root. Please type: ./installer.py without sudo. Your password will be required " \
"later. Exit program\n"
sys.exit(2)
# fetch version of the toolbox
print '\nFetch version of the Spinal Cord Toolbox...'
with open("spinalcordtoolbox/version.txt", "r") as myfile:
version_sct_str = myfile.read().replace('\n', '')
version_sct = Version(version_sct_str)
print " Version: " + str(version_sct)
self.path_install = "/usr/local/sct" + version_sct_str
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:], 'hp:')
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
elif opt == '-p':
self.path_install = arg
if self.path_install[-1:] == '/':
self.path_install += 'sct' + version_sct_str
else:
self.path_install += '/sct' + version_sct_str
print ""
print "============================="
print "SPINAL CORD TOOLBOX INSTALLER"
print "Modified: 2016-01-22"
print "============================="
# Check if OS is compatible with SCT
# The list of compatible OS is available here: TODO: add list of compatible OS
try:
Os()
except UnsupportedOs, err:
MsgUser.debug(str(err))
raise InstallFailed(str(err))
self.SCT_DIR = self.path_install
# Retrieving home folder because in python, paths with ~ do not seem to work.
self.home = os.path.expanduser('~') + '/'
# Check Python
print ('\nCheck which Python distribution is running...')
try:
Python()
except Exception, err:
print err
print "The Python distribution that you are using is not supported by SCT:\n" \
"http://sourceforge.net/p/spinalcordtoolbox/wiki/install_python/\n" \
"You can still use your own Python distribution, but you will have to install " \
"dependencies by yourself.\n" \
"Do you still want to continue?"
install_new = ""
signal.alarm(120)
while install_new not in ["yes", "no"]:
install_new = input_timeout("[yes|no]: ")
signal.alarm(0)
if install_new == "no":
sys.exit(2)
# Check if pip is install
print ('\nCheck if pip is installed...')
status, output = run('pip')
if not status == 0:
print ('.. WARNING: pip is not installed. Installing it with conda...')
# first make sure conda is installed
status, output = run('conda')
if not status == 0:
print ('.. ERROR: conda is not installed either. Please install pip and rerun the installer.\n'+output)
sys.exit(2)
else:
status, output = run('conda install pip -y')
if not status == 0:
print ('.. ERROR: pip installation failed. Please install it and rerun the installer.\n'+output)
sys.exit(2)
else:
print ('.. Testing pip...')
status, output = run('pip')
if not status == 0:
print ('.. ERROR: pip cannot be installed. Please install it and rerun the installer.\n'+output)
sys.exit(2)
else:
print ('.. OK!')
else:
print('.. OK!')
# Check if SCT folder already exists. If so, check if the folder is empty. If not, stops installation.
print ""
print "\nCheck if SCT is already installed..."
if os.path.isdir(self.SCT_DIR) and os.listdir(self.SCT_DIR) != []:
print 'ERROR! SCT is already installed. Two options:\n' \
'1) Use another installation path. E.g.: "./installer.py -p ~"\n' \
'2) Manually remove the current installation (e.g., use "rm -rf").\n'
sys.exit(2)
print ".. Installation path: " + self.path_install
# If SCT folder does not exists, | |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ReportInProductGet(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'authentication_success_filter': 'str',
'custom_field_filter': 'str',
'date_range_custom_from_date': 'str',
'date_range_custom_to_date': 'str',
'date_range_filter': 'str',
'display_order': 'str',
'envelope_date_type_filter': 'str',
'envelope_recipient_name_contains_filter': 'str',
'envelope_status_filter': 'str',
'envelope_subject_contains_filter': 'str',
'fields': 'list[ReportInProductField]',
'last_scheduled_execution_date': 'str',
'last_scheduled_execution_success_date': 'str',
'max_download_rows': 'str',
'max_grid_rows': 'str',
'max_scheduled_rows': 'str',
'period_length_filter': 'str',
'report_customized_id': 'str',
'report_description': 'str',
'report_id': 'str',
'report_name': 'str',
'report_type': 'str',
'run_uri': 'str',
'save_uri': 'str',
'schedule_id': 'str',
'sent_by_details': 'ReportInProductSentByDetails',
'sent_by_filter': 'str',
'sent_by_ids': 'str',
'sort_field_direction': 'str',
'sort_field_name': 'str',
'verification_status_filter': 'str'
}
attribute_map = {
'authentication_success_filter': 'authenticationSuccessFilter',
'custom_field_filter': 'customFieldFilter',
'date_range_custom_from_date': 'dateRangeCustomFromDate',
'date_range_custom_to_date': 'dateRangeCustomToDate',
'date_range_filter': 'dateRangeFilter',
'display_order': 'displayOrder',
'envelope_date_type_filter': 'envelopeDateTypeFilter',
'envelope_recipient_name_contains_filter': 'envelopeRecipientNameContainsFilter',
'envelope_status_filter': 'envelopeStatusFilter',
'envelope_subject_contains_filter': 'envelopeSubjectContainsFilter',
'fields': 'fields',
'last_scheduled_execution_date': 'lastScheduledExecutionDate',
'last_scheduled_execution_success_date': 'lastScheduledExecutionSuccessDate',
'max_download_rows': 'maxDownloadRows',
'max_grid_rows': 'maxGridRows',
'max_scheduled_rows': 'maxScheduledRows',
'period_length_filter': 'periodLengthFilter',
'report_customized_id': 'reportCustomizedId',
'report_description': 'reportDescription',
'report_id': 'reportId',
'report_name': 'reportName',
'report_type': 'reportType',
'run_uri': 'runUri',
'save_uri': 'saveUri',
'schedule_id': 'scheduleId',
'sent_by_details': 'sentByDetails',
'sent_by_filter': 'sentByFilter',
'sent_by_ids': 'sentByIds',
'sort_field_direction': 'sortFieldDirection',
'sort_field_name': 'sortFieldName',
'verification_status_filter': 'verificationStatusFilter'
}
def __init__(self, authentication_success_filter=None, custom_field_filter=None, date_range_custom_from_date=None, date_range_custom_to_date=None, date_range_filter=None, display_order=None, envelope_date_type_filter=None, envelope_recipient_name_contains_filter=None, envelope_status_filter=None, envelope_subject_contains_filter=None, fields=None, last_scheduled_execution_date=None, last_scheduled_execution_success_date=None, max_download_rows=None, max_grid_rows=None, max_scheduled_rows=None, period_length_filter=None, report_customized_id=None, report_description=None, report_id=None, report_name=None, report_type=None, run_uri=None, save_uri=None, schedule_id=None, sent_by_details=None, sent_by_filter=None, sent_by_ids=None, sort_field_direction=None, sort_field_name=None, verification_status_filter=None): # noqa: E501
"""ReportInProductGet - a model defined in Swagger""" # noqa: E501
self._authentication_success_filter = None
self._custom_field_filter = None
self._date_range_custom_from_date = None
self._date_range_custom_to_date = None
self._date_range_filter = None
self._display_order = None
self._envelope_date_type_filter = None
self._envelope_recipient_name_contains_filter = None
self._envelope_status_filter = None
self._envelope_subject_contains_filter = None
self._fields = None
self._last_scheduled_execution_date = None
self._last_scheduled_execution_success_date = None
self._max_download_rows = None
self._max_grid_rows = None
self._max_scheduled_rows = None
self._period_length_filter = None
self._report_customized_id = None
self._report_description = None
self._report_id = None
self._report_name = None
self._report_type = None
self._run_uri = None
self._save_uri = None
self._schedule_id = None
self._sent_by_details = None
self._sent_by_filter = None
self._sent_by_ids = None
self._sort_field_direction = None
self._sort_field_name = None
self._verification_status_filter = None
self.discriminator = None
if authentication_success_filter is not None:
self.authentication_success_filter = authentication_success_filter
if custom_field_filter is not None:
self.custom_field_filter = custom_field_filter
if date_range_custom_from_date is not None:
self.date_range_custom_from_date = date_range_custom_from_date
if date_range_custom_to_date is not None:
self.date_range_custom_to_date = date_range_custom_to_date
if date_range_filter is not None:
self.date_range_filter = date_range_filter
if display_order is not None:
self.display_order = display_order
if envelope_date_type_filter is not None:
self.envelope_date_type_filter = envelope_date_type_filter
if envelope_recipient_name_contains_filter is not None:
self.envelope_recipient_name_contains_filter = envelope_recipient_name_contains_filter
if envelope_status_filter is not None:
self.envelope_status_filter = envelope_status_filter
if envelope_subject_contains_filter is not None:
self.envelope_subject_contains_filter = envelope_subject_contains_filter
if fields is not None:
self.fields = fields
if last_scheduled_execution_date is not None:
self.last_scheduled_execution_date = last_scheduled_execution_date
if last_scheduled_execution_success_date is not None:
self.last_scheduled_execution_success_date = last_scheduled_execution_success_date
if max_download_rows is not None:
self.max_download_rows = max_download_rows
if max_grid_rows is not None:
self.max_grid_rows = max_grid_rows
if max_scheduled_rows is not None:
self.max_scheduled_rows = max_scheduled_rows
if period_length_filter is not None:
self.period_length_filter = period_length_filter
if report_customized_id is not None:
self.report_customized_id = report_customized_id
if report_description is not None:
self.report_description = report_description
if report_id is not None:
self.report_id = report_id
if report_name is not None:
self.report_name = report_name
if report_type is not None:
self.report_type = report_type
if run_uri is not None:
self.run_uri = run_uri
if save_uri is not None:
self.save_uri = save_uri
if schedule_id is not None:
self.schedule_id = schedule_id
if sent_by_details is not None:
self.sent_by_details = sent_by_details
if sent_by_filter is not None:
self.sent_by_filter = sent_by_filter
if sent_by_ids is not None:
self.sent_by_ids = sent_by_ids
if sort_field_direction is not None:
self.sort_field_direction = sort_field_direction
if sort_field_name is not None:
self.sort_field_name = sort_field_name
if verification_status_filter is not None:
self.verification_status_filter = verification_status_filter
@property
def authentication_success_filter(self):
"""Gets the authentication_success_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The authentication_success_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._authentication_success_filter
@authentication_success_filter.setter
def authentication_success_filter(self, authentication_success_filter):
"""Sets the authentication_success_filter of this ReportInProductGet.
# noqa: E501
:param authentication_success_filter: The authentication_success_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._authentication_success_filter = authentication_success_filter
@property
def custom_field_filter(self):
"""Gets the custom_field_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The custom_field_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._custom_field_filter
@custom_field_filter.setter
def custom_field_filter(self, custom_field_filter):
"""Sets the custom_field_filter of this ReportInProductGet.
# noqa: E501
:param custom_field_filter: The custom_field_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._custom_field_filter = custom_field_filter
@property
def date_range_custom_from_date(self):
"""Gets the date_range_custom_from_date of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The date_range_custom_from_date of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._date_range_custom_from_date
@date_range_custom_from_date.setter
def date_range_custom_from_date(self, date_range_custom_from_date):
"""Sets the date_range_custom_from_date of this ReportInProductGet.
# noqa: E501
:param date_range_custom_from_date: The date_range_custom_from_date of this ReportInProductGet. # noqa: E501
:type: str
"""
self._date_range_custom_from_date = date_range_custom_from_date
@property
def date_range_custom_to_date(self):
"""Gets the date_range_custom_to_date of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The date_range_custom_to_date of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._date_range_custom_to_date
@date_range_custom_to_date.setter
def date_range_custom_to_date(self, date_range_custom_to_date):
"""Sets the date_range_custom_to_date of this ReportInProductGet.
# noqa: E501
:param date_range_custom_to_date: The date_range_custom_to_date of this ReportInProductGet. # noqa: E501
:type: str
"""
self._date_range_custom_to_date = date_range_custom_to_date
@property
def date_range_filter(self):
"""Gets the date_range_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The date_range_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._date_range_filter
@date_range_filter.setter
def date_range_filter(self, date_range_filter):
"""Sets the date_range_filter of this ReportInProductGet.
# noqa: E501
:param date_range_filter: The date_range_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._date_range_filter = date_range_filter
@property
def display_order(self):
"""Gets the display_order of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The display_order of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._display_order
@display_order.setter
def display_order(self, display_order):
"""Sets the display_order of this ReportInProductGet.
# noqa: E501
:param display_order: The display_order of this ReportInProductGet. # noqa: E501
:type: str
"""
self._display_order = display_order
@property
def envelope_date_type_filter(self):
"""Gets the envelope_date_type_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_date_type_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_date_type_filter
@envelope_date_type_filter.setter
def envelope_date_type_filter(self, envelope_date_type_filter):
"""Sets the envelope_date_type_filter of this ReportInProductGet.
# noqa: E501
:param envelope_date_type_filter: The envelope_date_type_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_date_type_filter = envelope_date_type_filter
@property
def envelope_recipient_name_contains_filter(self):
"""Gets the envelope_recipient_name_contains_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_recipient_name_contains_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_recipient_name_contains_filter
@envelope_recipient_name_contains_filter.setter
def envelope_recipient_name_contains_filter(self, envelope_recipient_name_contains_filter):
"""Sets the envelope_recipient_name_contains_filter of this ReportInProductGet.
# noqa: E501
:param envelope_recipient_name_contains_filter: The envelope_recipient_name_contains_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_recipient_name_contains_filter = envelope_recipient_name_contains_filter
@property
def envelope_status_filter(self):
"""Gets the envelope_status_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_status_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_status_filter
@envelope_status_filter.setter
def envelope_status_filter(self, envelope_status_filter):
"""Sets the envelope_status_filter of this ReportInProductGet.
# noqa: E501
:param envelope_status_filter: The envelope_status_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_status_filter = envelope_status_filter
@property
def envelope_subject_contains_filter(self):
"""Gets the envelope_subject_contains_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_subject_contains_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_subject_contains_filter
@envelope_subject_contains_filter.setter
def envelope_subject_contains_filter(self, envelope_subject_contains_filter):
"""Sets the envelope_subject_contains_filter of this ReportInProductGet.
# noqa: E501
:param envelope_subject_contains_filter: The envelope_subject_contains_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_subject_contains_filter = envelope_subject_contains_filter
@property
def fields(self):
"""Gets the fields of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The fields of this ReportInProductGet. # noqa: E501
:rtype: list[ReportInProductField]
"""
return self._fields
@fields.setter
def fields(self, fields):
"""Sets the fields of this ReportInProductGet.
# noqa: E501
:param fields: The fields of this ReportInProductGet. # noqa: E501
:type: list[ReportInProductField]
"""
self._fields = fields
@property
def last_scheduled_execution_date(self):
"""Gets the last_scheduled_execution_date of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The last_scheduled_execution_date of this ReportInProductGet. # noqa: E501
:rtype: str
| |
import json
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ImagingReso import _utilities
import plotly.tools as tls
class Resonance(object):
e_min = 1e-5
e_max = 1e8
stack = {} # compound, thickness, atomic_ratio of each layer with isotopes information
stack_sigma = {} # all the energy and sigma of the isotopes and compounds
stack_signal = {} # transmission and attenuation signal for every isotope and compound
total_signal = {} # transmission and attenuation of the entire sample
density_lock = {} # dictionary that will defined the densities locked
energy_max = np.NaN
energy_min = np.NaN
energy_step = np.NaN
def __init__(self, stack={}, energy_max=1, energy_min=0.001, energy_step=0.001,
database='ENDF_VII', temperature='294K'):
"""initialize resonance object
:param stack: dictionary to store sample info
example: {'layer1': {'elements':['Ag','Si],
'atomic_ratio': [1, 2],
'thickness': {'value': 0.025,
'units': 'mm',
},
'density': {'units': 'g/cm3',
'value': 0.5,
},
}
:type stack: dictionary
:param energy_max: (default 300) max energy in eV to use in calculation
:type energy_max: float
:param energy_min: (default 0) min energy in eV to use in calculation
:type energy_min: float
:param energy_step: (default 0.1) energy step to use in extrapolation of sigma data
:type energy_step: float
:param database: database to extract cross-section info. ['ENDF_VII', 'ENDF_VIII'], both are database at 294K
:type database: str
"""
if database not in ['ENDF_VII', 'ENDF_VIII', '_data_for_unittest']:
raise ValueError(
"Database {} entered not existed. \nCurrent support: ['ENDF_VII', 'ENDF_VIII'] ".format(database))
# ENDF_VII only has nuclide 'C-0', replaced with 'C-12' and 'C-13' from ENDF_VIII.
# ENDF_VIII data base has problematic 'B-10' ace file, replaced with 'B-10' from ENFF_VII.
if database == 'ENDF_VIII':
pass
self.database = database
self.__element_metadata = {}
if energy_min < self.e_min:
raise ValueError("Energy min (eV) must be >= {}".format(self.e_min))
self.energy_min = energy_min
if energy_max > self.e_max:
raise ValueError("Energy max (eV) must be <= {}".format(self.e_max))
self.energy_max = energy_max
if energy_min == energy_max:
raise ValueError("Energy min and max should not have the same value!")
if (energy_max - energy_min) < energy_step:
raise ValueError("Energy step is bigger than range of energy specified!")
self.energy_step = energy_step
if not stack == {}:
# checking that every element of each stack is defined
_utilities.checking_stack(stack=stack, database=self.database)
new_stack = self.__update_stack_with_isotopes_infos(stack=stack)
self.stack = new_stack
# if layer density has been defined, lock it
self.__lock_density_if_defined(stack=self.stack)
# calculate stack_sigma, layer density, atoms_per_cm3 ...
self.__math_on_stack()
def __str__(self):
"""what to display if user does
>>> o_reso = Resolution()
>>> print(o_reso)
"""
return json.dumps(self.stack, indent=4)
def __repr__(self):
"""what to display if user does
>>> o_reso = Resolution()
>>> o_reso
"""
return json.dumps(self.stack, indent=4)
def add_layer(self, formula='', thickness=np.NaN, density=np.NaN):
"""provide another way to define the layers (stack)
Parameters:
===========
formula: string
ex: 'CoAg2'
ex: 'Al'
thickness: float (in mm)
density: float (g/cm3)
"""
if formula == '':
return
if formula in self.stack.keys():
raise ValueError("Layer '{}' is already in the sample stack.".format(formula))
_new_stack = _utilities.formula_to_dictionary(formula=formula,
thickness=thickness,
density=density,
database=self.database)
# check if density has been defined
self.__lock_density_if_defined(stack=_new_stack)
new_stack = self.__update_stack_with_isotopes_infos(stack=_new_stack)
self.stack = {**self.stack, **new_stack}
# calculate stack_sigma, layer density, atoms_per_cm3 ...
self.__math_on_stack()
def get_isotopic_ratio(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
compound = str(compound)
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_compound = str(_compound)
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_isotopic_ratio(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
list_istopes = _stack[compound][element]['isotopes']['list']
list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
iso_ratio = zip(list_istopes, list_ratio)
_stoichiometric_ratio = {}
for _iso, _ratio in iso_ratio:
_stoichiometric_ratio[_iso] = _ratio
return _stoichiometric_ratio
def set_isotopic_ratio(self, compound='', element='', list_ratio=[]):
"""defines the new set of ratio of the compound/element and trigger the calculation to update the density
Parameters:
===========
compound: string (default is ''). Name of compound
element: string (default is ''). Name of element
list_ratio: list (default is []). list of new stoichiometric_ratio
Raises:
=======
ValueError if compound does not exist
ValueError if element does not exist
ValueError if list_ratio does not have the right format
"""
_stack = self.stack
list_compounds = _stack.keys()
if compound not in _stack.keys():
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
old_list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
if not (len(old_list_ratio) == len(list_ratio)):
raise ValueError("New list of ratio ({} elements) does not match old list size ({} elements!".format(len(
list_ratio), len(old_list_ratio)))
_utilities.check_iso_ratios(ratios=list_ratio, tol=0.005)
self.stack[compound][element]['isotopes']['isotopic_ratio'] = list_ratio
self.__update_molar_mass(compound=compound, element=element)
self.__update_density(compound=compound, element=element)
# update entire stack
self.__math_on_stack()
def get_density(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their density
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_density(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compile, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
return _stack[compound][element]['density']['value']
def __math_on_stack(self, used_lock=False):
"""will perform all the various update of the stack, such as populating the stack_sigma, caluclate the density of the
layers....etc. """
# populate stack_sigma (Sigma vs Energy for every element)
self.__get_sigmas()
# populate compound density (if none provided)
self.__update_layer_density()
# populate compound molar mass
# self.__update_layer_molar_mass() ### included in __calculate_atoms_per_cm3
# populate atoms_per_cm3
self.__calculate_atoms_per_cm3(used_lock=used_lock)
# calculate transmission and attenuation
self.__calculate_transmission_attenuation()
def __lock_density_if_defined(self, stack: dict):
"""lock (True) the density lock if the density has been been defined during initialization
Store the resulting dictionary into density_lock
Parameters:
===========
stack: dictionary (optional)
if not provided, the entire stack will be used
"""
if self.stack == {}:
density_lock = {}
else:
density_lock = self.density_lock
for _compound in stack.keys():
_density = stack[_compound]['density']['value']
if np.isnan(_density):
density_lock[_compound] = False
else:
density_lock[_compound] = True
self.density_lock = density_lock
def __calculate_transmission_attenuation(self):
""" """
stack = self.stack
stack_sigma = self.stack_sigma
stack_signal = {}
total_signal = {}
total_transmisison = 1.
# compound level
for _name_of_compound in stack.keys():
stack_signal[_name_of_compound] = {}
mu_per_cm_compound = 0
transmission_compound = 1.
energy_compound = []
_list_element = stack[_name_of_compound]['elements']
_thickness_cm = _utilities.set_distance_units(value=stack[_name_of_compound]['thickness']['value'],
from_units=stack[_name_of_compound]['thickness']['units'],
to_units='cm')
# element level
for _element in _list_element:
stack_signal[_name_of_compound][_element] = {}
_atoms_per_cm3 = stack[_name_of_compound][_element]['atoms_per_cm3']
# isotope level
for _iso in stack[_name_of_compound][_element]['isotopes']['list']:
stack_signal[_name_of_compound][_element][_iso] = {}
_sigma_iso = stack_sigma[_name_of_compound][_element][_iso]['sigma_b']
_mu_per_cm_iso, _transmission_iso = _utilities.calculate_transmission(
thickness_cm=_thickness_cm,
atoms_per_cm3=_atoms_per_cm3,
sigma_b=_sigma_iso)
stack_signal[_name_of_compound][_element][_iso]['mu_per_cm'] = _mu_per_cm_iso
stack_signal[_name_of_compound][_element][_iso]['transmission'] = _transmission_iso
stack_signal[_name_of_compound][_element][_iso]['attenuation'] = 1. - _transmission_iso
stack_signal[_name_of_compound][_element][_iso]['energy_eV'] = \
stack_sigma[_name_of_compound][_element][_iso]['energy_eV']
_sigma_ele = stack_sigma[_name_of_compound][_element]['sigma_b']
_mu_per_cm_ele, _transmission_ele = _utilities.calculate_transmission(
thickness_cm=_thickness_cm,
atoms_per_cm3=_atoms_per_cm3,
sigma_b=_sigma_ele)
stack_signal[_name_of_compound][_element]['mu_per_cm'] = _mu_per_cm_ele
stack_signal[_name_of_compound][_element]['transmission'] = _transmission_ele
stack_signal[_name_of_compound][_element]['attenuation'] = 1. - _transmission_ele
stack_signal[_name_of_compound][_element]['energy_eV'] = \
stack_sigma[_name_of_compound][_element]['energy_eV']
mu_per_cm_compound += _mu_per_cm_ele # plus
transmission_compound *= _transmission_ele # multiply
if len(energy_compound) == 0:
energy_compound = stack_sigma[_name_of_compound][_element]['energy_eV']
stack_signal[_name_of_compound]['mu_per_cm'] = mu_per_cm_compound
stack_signal[_name_of_compound]['transmission'] = transmission_compound
stack_signal[_name_of_compound]['attenuation'] = 1. - transmission_compound
| |
<reponame>Rogdham/pyTenable<gh_stars>0
'''
Exports
=======
The following methods allow for interaction into the Tenable.io
:devportal:`exports <exports>` API endpoints.
Methods available on ``tio.exports``:
.. rst-class:: hide-signature
.. autoclass:: ExportsAPI
:members:
'''
from uuid import UUID
from json.decoder import JSONDecodeError
from typing_extensions import Literal
from typing import Dict, Union, List
from marshmallow import Schema
from tenable.base.endpoint import APIEndpoint
from .schema import AssetExportSchema, VulnExportSchema, ComplianceExportSchema
from .iterator import ExportsIterator
class ExportsAPI(APIEndpoint):
def cancel(self,
export_type: Literal['vulns', 'assets', 'compliance'],
export_uuid: UUID,
) -> str:
'''
Cancels the specified export job.
API Documentation for cancel export jobs with
:devportal:`assets <exports-assets-export-cancel>`,
:devportal:`compliance <io-exports-compliance-cancel>`, and
:devportal:`vulnerabilities <exports-vulns-export-cancel>` datatypes.
Args:
export_type:
The type of export job that we are to cancel.
export_uuid:
The export job's unique identifier.
Returns:
str:
The status of the job.
Example:
>>> tio.exports.cancel('vuln', '{UUID}')
'CANCELLED'
'''
return self._api.post(f'{export_type}/export/{export_uuid}/cancel',
box=True
).get('status')
def download_chunk(self,
export_type: Literal['vulns', 'assets', 'compliance'],
export_uuid: UUID,
chunk_id: int,
retries: int = 3
) -> List:
'''
Downloads an export chunk from the specified job.
API Documentation for downloading an export chunk for
:devportal:`assets <exports-assets-download-chunk>`,
:devportal:`compliance <io-exports-compliance-download>`, and
:devportal:`vulnerabilities <exports-vulns-download-chunk>`.
Args:
export_type:
The type of export job
export_uuid:
The export job's unique identifier.
chunk_id:
The identifier for the specific chunk to download.
Returns:
List:
The list of objects that entail the chunk of data requested.
Example:
>>> chunk = tio.exports.download_chunk('vulns', '{UUID}', 1)
'''
# We will attempt to download a chunk of data and convert it into JSON.
# If the conversion fails, then we will increment our own retry counter
# and attempt to download the chunk again. After 3 attempts, we will
# assume that the chunk is dead and return an empty list.
downloaded = False
counter = 0
resp = []
while not downloaded and counter <= retries:
try:
resp = self._api.get(
f'{export_type}/export/{export_uuid}/chunks/{chunk_id}'
).json()
downloaded = True
except JSONDecodeError:
self._log.warning((
f'{export_type} export {export_uuid} encountered an '
f'invalid chunk on chunk id {chunk_id}'
))
counter += 1
if len(resp) < 1:
self._log.warning((
f'{export_type} export {export_uuid} encoundered an empty '
f'chunk on chunk id {chunk_id}'
))
return resp
def status(self,
export_type: Literal['vulns', 'assets', 'compliance'],
export_uuid: UUID,
) -> Dict:
'''
Gets the status of the export job.
API Documentation for the status of an export job for the
:devportal:`assets <exports-assets-export-status>`,
:devportal:`compliance <io-exports-compliance-status>`, and
:devportal:`vulnerabilities <exports-vulns-export-status>` datatypes.
Args:
export_type (str):
The datatype of the export job.
export_uuid (str):
The UUID of the export job.
Examples:
>>> status = tio.exports.status('vulns', {UUID}')
'''
return self._api.get(f'{export_type}/export/{export_uuid}/status',
box=True,
)
def jobs(self,
export_type: Literal['vulns', 'assets'],
) -> Dict:
'''
Returns the list of jobs available for a given datatype.
API Documentation for the job listing APIs for
:devportal:`assets <exports-assets-export-status-recent>`, and
:devportal:`vulnerabilities <exports-vulns-export-status-recent>`
datatypes.
Args:
export_type (str):
The datatype of export to get the jobs for.
Examples:
>>> jobs = tio.exports.jobs('vulns')
'''
return self._api.get(f'{export_type}/export/status', box=True).exports
def _export(self,
export_type: Literal['vulns', 'assets', 'compliance'],
schema: Schema,
**kwargs
) -> Union[ExportsIterator, UUID]:
'''
Get the list of jobs for for the specified datatype.
API Documentation for the job listings for
:devportal:`assets <exports-assets-request-export>`,
:devportal:`compliance <io-exports-compliance-create>`, and
:devportal:`vulnerabilities <exports-vulns-request-export>` datatypes.
'''
export_uuid = kwargs.pop('uuid', None)
use_iterator = kwargs.pop('use_iterator', True)
when_done = kwargs.pop('when_done', False)
Iterator = kwargs.pop('iterator', ExportsIterator) # noqa: PLC0103
timeout = kwargs.pop('timeout', None)
payload = schema.dump(schema.load(kwargs))
if not export_uuid:
export_uuid = self._api.post(f'{export_type}/export',
json=payload,
box=True
).export_uuid
self._log.debug(
f'{export_type} export job {export_uuid} initiated'
)
if use_iterator:
return Iterator(self._api,
type=export_type,
uuid=export_uuid,
_wait_for_complete=when_done,
timeout=timeout
)
return UUID(export_uuid)
def assets(self, **kwargs) -> Union[ExportsIterator, UUID]:
'''
Initiate an asset export.
:devportal:`API Documentation <exports-assets-request-export>`
Args:
created_at (int, optional):
Assets created after this timestamp will be returned.
deleted_at (int, optional):
Assets deleted after this timestamp will be returned.
first_scan_time (int, optional):
Assets with a first_scan time later that this timestamp
will be returned.
last_assessed (int, optional):
Assets last scanned after this timestamp will be returned.
last_authenticated_scan_time (int, optional):
Assets last scanned with an authenticated scan after this
timestamp will be returned.
terminated_at (int, optional):
Assets terminated after this timestamp will be returned.
updated_at (int, optional):
Assets updated after this timestamp will be returned.
has_plugin_results (bool, optional):
Should assets only be returned if they have plugin results?
is_deleted (bool, optional):
Should we return only assets that have been deleted?
is_licensed (bool, optional):
Should we return only assets that are licensed?
is_terminated (bool, optional):
Should we return assets that have been terminated?
servicenow_sysid (bool, optional):
Should we return assets that have a ServiceNOW sysid?
if ``True`` only assets with an id will be returned.
if ``False`` only assets without an id will be returned.
chunk_size (int, optional):
How many asset objects should be returned per chunk of data?
The default is ``1000``.
network_id (str, optional):
Only assets within the specified network UUID will be returned.
sources (list[str], optional):
Only assets with a source matching one of these source values
will be returned. Note that this value is case-sensitive.
tags (list[tuple[str, str]], optional):
A list of tag pairs to filter the results on. The tag pairs
should be presented as ``('CATEGORY', 'VALUE')``.
uuid (str, optional):
A predefined export UUID to use for generating an
ExportIterator. Using this parameter will ignore all of the
filter arguments.
use_iterator (bool, optional):
Determines if we should return an iterator, or simply the
export job UUID. The default is to return an iterator.
when_done (bool, optional):
When creating the iterator, setting this flag to true will tell
the iterator to wait until the export job has completed before
processing the first chunk. The default behaviour is to start
processing chunks of data as soon as they become available.
timeout (int, optional):
If specified, determines a timeout in seconds to wait for the
export job to sit in the queue before cancelling the job and
raising a ``TioExportsTimeout`` error. Once a job has started
to be processed, the timeout is ignored.
iterator (Iterator, optional):
Supports overloading the iterator class to be used to process
the datachunks.
Examples:
Iterating over the results of an asset export:
>>> for asset in tio.exports.assets():
... print(asset)
Getting hosts that have been updated within the last 24 hours
>>> assets = tio.exports.assets(
... updated_at=int(arrow.now().shift(days=-1).timestamp())
... )
Getting assets that have the the ``Region:Chicago`` tag:
>>> assets = tio.exports.assets(
... tags=[('Region', 'Chicago')]
... )
'''
return self._export('assets', AssetExportSchema(), **kwargs)
def compliance(self, **kwargs) -> Union[ExportsIterator, UUID]:
'''
Initiate a compliance export.
:devportal:`API Documentation <io-exports-compliance-create>`
Args:
asset (list[str], optional):
A list of assets to return compliance results for.
first_seen (int, optional):
Returns findings with a first seen time newer than the
specified unix timestamp.
last_seen (int, optional):
Returns findings with a last seen time newer than the
specified unix timestamp.
num_findings (int):
The number of findings to return per chunk of data. If left
unspecified, the default is ``5000``.
uuid (str, optional):
A predefined export UUID to use for generating an
ExportIterator. Using this parameter will ignore all of the
filter arguments.
use_iterator (bool, optional):
Determines if we should return an iterator, or simply the
export job UUID. The default is to return an iterator.
when_done (bool, optional):
When creating the iterator, setting this flag to true will tell
the iterator to wait until the export job has completed before
processing the first chunk. The default behaviour is to start
processing chunks of data as soon as they become available.
timeout (int, optional):
If specified, determines a timeout in seconds to wait for the
export job to sit in the queue before cancelling the job and
raising a ``TioExportsTimeout`` error. Once a job has started
to be processed, the timeout is ignored.
iterator (Iterator, optional):
Supports overloading the iterator class to be used to process
the datachunks.
Examples:
>>> for findings in tio.exports.compliance():
... print(finding)
'''
return self._export('compliance', ComplianceExportSchema(), **kwargs)
def vulns(self, **kwargs) -> Union[ExportsIterator, UUID]:
'''
Initiate a vulnerability export.
:devportal:`API Documentation <exports-vulns-request-export>`
Args:
first_found (int, optional):
Findings first discovered after this timestamp will be
returned.
indexed_at (int, optional):
Findings indexed into Tenable.io after this timestamp will
be returned.
last_fixed (int, optional):
Findings fixed after this timestamp | |
<reponame>thespacedoctor/rockAtlas<filename>rockAtlas/positions/pyephemPositions.py
#!/usr/local/bin/python
# encoding: utf-8
"""
*Estimate the positions of moving objects in the neighbourhood of the ATLAS exposures using pyephemPositions and add results to the database*
:Author:
<NAME>
:Date Created:
October 30, 2017
"""
################# GLOBAL IMPORTS ####################
import sys
import os
os.environ['TERM'] = 'vt100'
from fundamentals import tools
from fundamentals.mysql import readquery
from fundamentals import fmultiprocess
import math
import ephem
import codecs
import healpy as hp
import numpy as np
from collections import defaultdict
from fundamentals.mysql import insert_list_of_dictionaries_into_database_tables
import copy
from fundamentals.mysql import writequery
import psutil
xephemOE = []
tileSide = ""
# MAKE SURE HEALPIX SMALL ENOUGH TO MATCH FOOTPRINTS CORRECTLY
nside = 1024
pi = (4 * math.atan(1.0))
DEG_TO_RAD_FACTOR = pi / 180.0
RAD_TO_DEG_FACTOR = 180.0 / pi
moversDict = {}
class pyephemPositions():
"""
*Estimate the positions of moving objects in the neighbourhood of the ATLAS exposures using pyephemPositions and add results to the database*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``dev_flag`` -- use the dev_flag column in the database to select out specific ATLAS exposures to work with. Default *False*
**Usage:**
To setup your logger, settings and database connections, please use the ``fundamentals`` package (`see tutorial here <http://fundamentals.readthedocs.io/en/latest/#tutorial>`_).
To initiate a pyephemPositions object, use the following:
.. code-block:: python
from rockAtlas.positions import pyephemPositions
pyeph = pyephemPositions(
log=log,
settings=settings
)
pyeph.get()
"""
# Initialisation
def __init__(
self,
log,
settings=False,
dev_flag=False
):
self.log = log
log.debug("instansiating a new 'pyephemPositions' object")
self.settings = settings
self.dev_flag = dev_flag
# xt-self-arg-tmpx
# INITIAL ACTIONS
# SETUP ALL DATABASE CONNECTIONS
from rockAtlas import database
db = database(
log=log,
settings=settings
)
dbConns, dbVersions = db.connect()
self.atlas3DbConn = dbConns["atlas3"]
self.atlas4DbConn = dbConns["atlas4"]
self.atlasMoversDBConn = dbConns["atlasMovers"]
return None
def get(self, singleSnapshot=False):
"""
*geneate the pyephem positions*
**Key Arguments:**
- ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing)
**Return:**
- ``None``
"""
self.log.info('starting the ``get`` method')
global xephemOE
global tileSide
global magLimit
# GRAB PARAMETERS FROM SETTINGS FILE
tileSide = float(self.settings["pyephem"]["atlas exposure match side"])
magLimit = float(self.settings["pyephem"]["magnitude limit"])
snapshotsRequired = 1
while snapshotsRequired > 0:
nextMjds, exposures, snapshotsRequired = self._get_exposures_requiring_pyephem_positions(
concurrentSnapshots=int(self.settings["pyephem"]["batch size"]))
print "There are currently %(snapshotsRequired)s more pyephem snapshots required " % locals()
if snapshotsRequired == 0:
return
if len(xephemOE) == 0:
xephemOE = self._get_xephem_orbital_elements()
# DEFINE AN INPUT ARRAY
magLimit = self.settings["pyephem"]["magnitude limit"]
pyephemDB = fmultiprocess(log=self.log, function=_generate_pyephem_snapshot, timeout=300,
inputArray=nextMjds, magLimit=magLimit)
matchedObjects = []
for p, e, m in zip(pyephemDB, exposures, nextMjds):
matchedObjects.append(
self._match_pyephem_snapshot_to_atlas_exposures(p, e, m))
self._add_matched_objects_to_database(matchedObjects)
self._update_database_flag(exposures)
if singleSnapshot:
snapshotsRequired = 0
self.log.info('completed the ``get`` method')
return None
def _get_exposures_requiring_pyephem_positions(
self,
concurrentSnapshots=10):
"""*get next batch of exposures requiring pyephem positions*
**Key Arguments:**
- ``concurrentSnapshots`` -- number of concurrent PyEphem snapshots to process
"""
self.log.info(
'starting the ``_get_exposures_requiring_pyephem_positions`` method')
if self.dev_flag == True:
dev_flag = " and dev_flag = 1"
else:
dev_flag = ""
sqlQuery = u"""
select distinct pyephem_mjd from atlas_exposures where pyephem_positions = 0 and local_data = 1 %(dev_flag)s order by pyephem_mjd asc
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn,
quiet=False
)
snapshotsRequired = len(rows)
if snapshotsRequired == 0:
return [], [], 0
nextMjds = []
nextMjds[:] = [r["pyephem_mjd"] for r in rows[:concurrentSnapshots]]
exposures = []
for nextMjd in nextMjds:
sqlQuery = u"""
select * from atlas_exposures where pyephem_positions = 0 %(dev_flag)s and pyephem_mjd = %(nextMjd)s
""" % locals()
theseExps = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn,
quiet=False
)
exposures.append(list(theseExps))
self.log.info(
'completed the ``_get_exposures_requiring_pyephem_positions`` method')
return nextMjds, exposures, snapshotsRequired
def _add_matched_objects_to_database(
self,
matchedObjects):
"""*add mathced objects to database*
**Key Arguments:**
- ``matchedObjects`` -- these objects matched in the neighbourhood of the ATLAS exposures (list of dictionaries)
"""
self.log.info(
'starting the ``_add_matched_objects_to_database`` method')
print "Adding the matched sources to the `pyephem_positions` database table"
allMatches = []
for m in matchedObjects:
allMatches += m
dbSettings = self.settings["database settings"]["atlasMovers"]
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=allMatches,
dbTableName="pyephem_positions",
uniqueKeyList=["expname", "object_name"],
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
self.log.info(
'completed the ``_add_matched_objects_to_database`` method')
return None
def _update_database_flag(
self,
exposures):
"""* update database flag*
**Key Arguments:**
- ``exposures`` -- the atlas exposure to update the database flags for
"""
self.log.info('starting the ``_update_database_flag`` method')
allExposures = []
for e in exposures:
allExposures += e
expIds = []
expIds[:] = [e["expname"] for e in allExposures]
expIds = ('","').join(expIds)
sqlQuery = """update atlas_exposures a, pyephem_positions p set a.pyephem_positions = 1 where a.pyephem_positions = 0 and a.expname=p.expname;""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn,
)
sqlQuery = """update atlas_exposures set pyephem_positions = 2, dophot_match = 2, orbfit_positions = 2 where pyephem_positions = 0 and expname in ("%(expIds)s");""" % locals(
)
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn,
)
self.log.info('completed the ``_update_database_flag`` method')
return None
def _get_xephem_orbital_elements(
self):
"""*get xephem orbital elements*
**Key Arguments:**
- ``xephemOE`` -- a list of xephem database format strings for use with pyephem
"""
self.log.info('starting the ``_get_xephem_orbital_elements`` method')
print "Getting the XEphem orbital element strings from the database"
sqlQuery = u"""
select pyephem_string, name, mpc_number from orbital_elements where include_in_match = 1
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn,
quiet=False
)
xephemOE = list(rows)
self.log.info('completed the ``_get_xephem_orbital_elements`` method')
return xephemOE
def _match_pyephem_snapshot_to_atlas_exposures(
self,
pyephemDB,
exposures,
mjd):
"""*match pyephem snapshot to atlas exposures*
**Key Arguments:**
- ``pyephemDB`` -- the pyephem solar-system snapshot database
- ``exposures`` -- the atlas exposures to match against the snapshot
- ``mjd`` -- the MJD of the pyephem snapshot
**Return:**
- ``matchedObjects`` -- these objects matched in the neighbourhood of the ATLAS exposures (list of dictionaries)
"""
self.log.info(
'starting the ``_match_pyephem_snapshot_to_atlas_exposures`` method')
global DEG_TO_RAD_FACTOR
global RAD_TO_DEG_FACTOR
global moversDict
e = len(exposures)
print "Matching %(e)s ATLAS exposures against the pyephem snapshot for MJD = %(mjd)s" % locals()
# MAKE SURE HEALPIX SMALL ENOUGH TO MATCH FOOTPRINTS CORRECTLY
global nside
# GRAB PARAMETERS FROM SETTINGS FILE
tileSide = float(self.settings["pyephem"]["atlas exposure match side"])
magLimit = float(self.settings["pyephem"]["magnitude limit"])
# EXPLODE OUT THE PYEPHEM DATABASE
ra = pyephemDB["ra_deg"]
dec = pyephemDB["dec_deg"]
healpix = pyephemDB["healpix"]
objects = pyephemDB["object_name"]
mpc_numbers = pyephemDB["mpc_number"]
mag = pyephemDB["mag"]
# INDEX PYEPHEM MOVERS IN DICTIONARY BY HEALPIX ID
moversDict = defaultdict(list)
for ind, (p, r, d, o, m, g) in enumerate(zip(healpix, ra, dec, objects, mpc_numbers, mag)):
moversDict[p].append(
{"object_name": o,
"ra_deg": r,
"dec_deg": d,
"mpc_number": m,
"mag": g
}
)
# MATCH THE PYEPHEM MOVERS AGAINST THE ATLAS EXPOSURES
matchedObjects = []
results = fmultiprocess(log=self.log, function=_match_single_exposure_against_pyephem_db, timeout=120,
inputArray=exposures)
for r in results:
matchedObjects += r
self.log.info(
'completed the ``_match_pyephem_snapshot_to_atlas_exposures`` method')
return matchedObjects
def _generate_pyephem_snapshot(
mjd,
log,
magLimit):
"""* generate pyephem snapshot*
**Key Arguments:**
- ``mjd`` -- the mjd to generate the pyephem snapshot database for
- ``xephemOE`` -- a list of xephem database format strings for use with pyephem
**Return:**
- ``pyephemDB`` -- the pyephem solar-system snapshot database
"""
log.info('starting the ``_generate_pyephem_snapshot`` method')
print "generating pyephem database for MJD %(mjd)s" % locals()
global xephemOE
global DEG_TO_RAD_FACTOR
global RAD_TO_DEG_FACTOR
global nside
# THE PYEPHEM OBSERVER
obs = ephem.Observer()
# PYEPHEM WORKS IN DUBLIN JD, TO CONVERT FROM MJD SUBTRACT 15019.5
obs.date = float(mjd) - 15019.5
pyephemDB = {
"ra_deg": [],
"dec_deg": [],
"mpc_number": [],
"object_name": [],
"healpix": [],
"mag": []
}
for d in xephemOE:
# GENERATE EPHEMERIS FOR THIS OBJECT
minorPlanet = ephem.readdb(d["pyephem_string"])
minorPlanet.compute(obs)
if minorPlanet.mag > magLimit:
continue
if d["mpc_number"]:
d["mpc_number"] = int(d["mpc_number"])
thisRa = minorPlanet.a_ra * RAD_TO_DEG_FACTOR
thisDec = minorPlanet.a_dec * RAD_TO_DEG_FACTOR
pyephemDB["mag"].append(minorPlanet.mag)
pyephemDB["ra_deg"].append(thisRa)
pyephemDB["dec_deg"].append(thisDec)
pyephemDB["mpc_number"].append(d["mpc_number"])
pyephemDB["object_name"].append(d["name"])
pyephemDB["healpix"].append(hp.ang2pix(
nside, theta=thisRa, phi=thisDec, lonlat=True))
log.info('completed the ``_generate_pyephem_snapshot`` method')
return pyephemDB
def _match_single_exposure_against_pyephem_db(
exposure,
log):
"""*summary of function*
**Key Arguments:**
- ``exposure`` -- the atlas expsosure metadata
- ``log`` -- logger
**Return:**
- None
**Usage:**
.. todo::
add usage info
create a sublime snippet for usage
.. code-block:: python
usage code
"""
log.info('starting the ``_match_single_exposure_against_pyephem_db`` function')
global tileSide
global magLimit
global DEG_TO_RAD_FACTOR
global nside
global moversDict
matchedObjects = []
expId = exposure["expname"]
raFc = float(exposure["raDeg"])
decFc = float(exposure["decDeg"])
# GENERATE THE EXPOSURE HEALPIX ID MAP
decCorners = (decFc - tileSide / 2,
decFc + tileSide / 2)
corners = []
for d in decCorners:
if d > 90.:
d = 180. - d
elif d < -90.:
d = -180 - d
raCorners = (raFc - (tileSide / 2) / np.cos(d * DEG_TO_RAD_FACTOR),
raFc + | |
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result"))
def get_slice(self, key, column_parent, predicate, consistency_level):
"""
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_slice(key, column_parent, predicate, consistency_level)
return d
def send_get_slice(self, key, column_parent, predicate, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('get_slice', TMessageType.CALL, self._seqid)
args = get_slice_args()
args.key = key
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_get_slice(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = get_slice_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success != None:
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "get_slice failed: unknown result"))
def get_count(self, key, column_parent, predicate, consistency_level):
"""
returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
<code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_count(key, column_parent, predicate, consistency_level)
return d
def send_get_count(self, key, column_parent, predicate, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('get_count', TMessageType.CALL, self._seqid)
args = get_count_args()
args.key = key
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_get_count(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = get_count_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success != None:
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "get_count failed: unknown result"))
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
"""
Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
return d
def send_multiget_slice(self, keys, column_parent, predicate, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('multiget_slice', TMessageType.CALL, self._seqid)
args = multiget_slice_args()
args.keys = keys
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_multiget_slice(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = multiget_slice_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success != None:
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "multiget_slice failed: unknown result"))
def multiget_count(self, keys, column_parent, predicate, consistency_level):
"""
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_multiget_count(keys, column_parent, predicate, consistency_level)
return d
def send_multiget_count(self, keys, column_parent, predicate, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('multiget_count', TMessageType.CALL, self._seqid)
args = multiget_count_args()
args.keys = keys
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_multiget_count(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = multiget_count_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success != None:
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "multiget_count failed: unknown result"))
def get_range_slices(self, column_parent, predicate, range, consistency_level):
"""
returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_range_slices(column_parent, predicate, range, consistency_level)
return d
def send_get_range_slices(self, column_parent, predicate, range, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('get_range_slices', TMessageType.CALL, self._seqid)
args = get_range_slices_args()
args.column_parent = column_parent
args.predicate = predicate
args.range = range
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_get_range_slices(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = get_range_slices_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success != None:
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "get_range_slices failed: unknown result"))
def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
"""
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
return d
def send_get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('get_indexed_slices', TMessageType.CALL, self._seqid)
args = get_indexed_slices_args()
args.column_parent = column_parent
args.index_clause = index_clause
args.column_predicate = column_predicate
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_get_indexed_slices(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = get_indexed_slices_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success != None:
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "get_indexed_slices failed: unknown result"))
def insert(self, key, column_parent, column, consistency_level):
"""
Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_insert(key, column_parent, column, consistency_level)
return d
def send_insert(self, key, column_parent, column, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('insert', TMessageType.CALL, self._seqid)
args = insert_args()
args.key = key
args.column_parent = column_parent
args.column = column
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_insert(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = insert_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.callback(None)
def add(self, key, column_parent, column, consistency_level):
"""
Increment or decrement a counter.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_add(key, column_parent, column, consistency_level)
return d
def send_add(self, key, column_parent, column, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)
args = add_args()
args.key = key
args.column_parent = column_parent
args.column = column
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_add(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = add_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.callback(None)
def remove(self, key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_remove(key, column_path, timestamp, consistency_level)
return d
def send_remove(self, key, column_path, timestamp, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('remove', TMessageType.CALL, self._seqid)
args = remove_args()
args.key = key
args.column_path = column_path
args.timestamp = timestamp
args.consistency_level = consistency_level
args.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def recv_remove(self, iprot, mtype, rseqid):
d = self._reqs.pop(rseqid)
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
return d.errback(x)
result = remove_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire != None:
return d.errback(result.ire)
if result.ue != None:
return d.errback(result.ue)
if result.te != None:
return d.errback(result.te)
return d.callback(None)
def remove_counter(self, key, path, consistency_level):
"""
Remove a counter at the specified location.
Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
until the delete has reached all the nodes and all of them have been fully compacted.
Parameters:
- key
- path
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_remove_counter(key, path, consistency_level)
return d
def send_remove_counter(self, key, path, consistency_level):
oprot = self._oprot_factory.getProtocol(self._transport)
oprot.writeMessageBegin('remove_counter', TMessageType.CALL, self._seqid)
args = remove_counter_args()
| |
<reponame>jorobledo/StratoPy
import datetime
import os
import attr
from netCDF4 import Dataset
import numpy as np
from pyorbital import astronomy
from pyspectral.near_infrared_reflectance import Calculator
from scipy import interpolate
from . import core
PATH = os.path.abspath(os.path.dirname(__file__))
def read_nc(file_path):
"""
Reads netCDF files through the netCDF4 library.
Parameters
----------
file_path: ``str tuple``
Contains a file path of one or all three paths of
channels 3, 7 and 13 of the CMIPF GOES-16 product.
Returns
-------
result: ``netCDF4.Dataset``
File variables.
"""
if len(file_path) == 3:
# Check for date and product consistency
files_date = [
band_path.split("s20", 1)[1].split("_", 1)[0]
for band_path in file_path
]
# Create boolean for consistency evaluation
eq_dates = all(date == files_date[0] for date in files_date)
eq_product = all("L2-CMIPF" in path for path in file_path)
if not eq_dates:
raise ValueError("Start date's from all files should be the same.")
elif not eq_product:
raise ValueError("Files must be from the same product.")
elif len(file_path) != 1 and len(file_path) != 3:
raise ValueError(
"File path must be a tuple of length 1 or 3 (in case of RGB)."
)
data = dict()
for paths in file_path:
channel = paths.split("-")[3].split("_")[0]
data[channel] = Dataset(paths, "r").variables
return Goes(data)
@attr.s(frozen=True, repr=False)
class Goes:
"""Generates an object containing de Day Microphysics state
according to GOES-16 manual.
Parameters
----------
data: ``netCDF4.Dataset.variables dict``
Dictionary with variables data from each channel of the
GOES Day Microphysics product.
coordinates: ``tuple`` (default: cut will be south hemisphere)
(lat_inf, lat_sup, lon_east, lon_west) where:
lat_inf, latitude of minimal position
lat_sup, latitude of maximal position
lon_east, longitude of
lon_west, longitude of
"""
_data = attr.ib(validator=attr.validators.instance_of(dict))
coordinates = attr.ib(default=(-40.0, 10.0, -37.0, -80.0))
_trim_coord = attr.ib(init=False)
RGB = attr.ib(init=False)
_img_date = attr.ib(init=False)
def __repr__(self):
_img_date = self._img_date.strftime("%d/%m/%y-%H:%M")
bands = [int(band.split("C")[1]) for band in self._data]
if len(bands) == 1:
return f"GOES Object -- {_img_date}, CH={bands[0]}"
else:
return (
f"GOES Object -- {_img_date}, "
f"CH={bands[0]}, {bands[1]} and {bands[2]}"
)
def _repr_html_(self):
_img_date = self._img_date.strftime("%d/%m/%y-%H:%M")
bands = [int(band.split("C")[1]) for band in self._data]
footer = "<b>-- Goes Object</b>"
if len(bands) == 1:
return f"<div>{_img_date}, , CH={bands[0]} {footer}</div>"
else:
return (
f"<div>{_img_date}, , "
f"CH={bands[0]}, {bands[1]} and {bands[2]} {footer}</div>"
)
@_img_date.default
def _img_date_default(self):
# Using existing channel date (same for all)
channel_data = list(self._data.values())[0]
# Img date in sec
time_delta = datetime.timedelta(seconds=int(channel_data["t"][:].data))
date_0 = datetime.datetime(year=2000, month=1, day=1, hour=12)
return date_0 + time_delta
@_trim_coord.default
def _trim_coord_default(self):
# Coordinates in deegres
lat_inf, lat_sup, lon_east, lon_west = self.coordinates
trim_coordinates = dict()
for ch_id, dataset in self._data.items():
# Extract all the variables
metadata = dataset
# satellite height
h = metadata["goes_imager_projection"].perspective_point_height
semieje_may = metadata["goes_imager_projection"].semi_major_axis
semieje_men = metadata["goes_imager_projection"].semi_minor_axis
lon_cen = metadata[
"goes_imager_projection"
].longitude_of_projection_origin
scale_factor = metadata["x"].scale_factor
offset = np.array(
[metadata["x"].add_offset, metadata["y"].add_offset]
)
pto_sup_izq = core.latlon2scan(
lat_sup,
lon_west,
lon_cen,
Re=semieje_may,
Rp=semieje_men,
h=h,
)
pto_inf_der = core.latlon2scan(
lat_inf,
lon_east,
lon_cen,
Re=semieje_may,
Rp=semieje_men,
h=h,
)
c0, r0 = core.scan2colfil(
pto_sup_izq[1],
pto_sup_izq[0],
offset[0],
offset[1],
scale_factor,
1,
)
c1, r1 = core.scan2colfil(
pto_inf_der[1],
pto_inf_der[0],
offset[0],
offset[1],
scale_factor,
1,
)
trim_coordinates[ch_id] = (r0, r1, c0, c1)
return trim_coordinates
def trim(self, for_RGB=True):
"""
This function trims a GOES CMI image according to the width, height
max west longitude and upper latitude specified on the parameters.
Default parameters are set to return a South America image.
Parameters
----------
Returns
-------
trim_img: ``numpy.array`` containing the trimmed image.
"""
trim_img = dict()
N = 5424 # Image size for psize = 2000 [m]
for ch_id, dataset in self._data.items():
image = np.array(dataset["CMI"][:].data)
esc = N / image.shape[0]
r0, r1, c0, c1 = self._trim_coord[ch_id]
trim_img[ch_id] = image[r0:r1, c0:c1]
# Rescale channels with psize = 1000 [m]
if for_RGB and ch_id == "M3C03":
x = range(trim_img[ch_id][:].shape[1])
y = range(trim_img[ch_id][:].shape[0])
f = interpolate.interp2d(x, y, trim_img[ch_id], kind="cubic")
xnew = np.arange(x[0], x[-1] + 1, (x[1] - x[0]) / esc)
ynew = np.arange(y[0], y[-1], (y[1] - y[0]) / esc)
trim_img[ch_id] = f(xnew, ynew)
return trim_img
@RGB.default
def _RGB_default(self, masked=False):
"""
This function creates an RGB image that represents the day microphysics
according to the GOES webpage manual.
goes_obj.RGB() tira la imagen en np array recortada, corregida
Parameters
----------
rec03: ``numpy.array``
Processed image of channel 3.
rec07b: ``numpy.array``
Processed image of channel 7.
rec13: ``numpy.array``
Processed image of channel 13.
masked: bool
If True, returns a masked RGB
according to day MP quick guide
Returns
-------
RGB: ``numpy.array``
RGB day microphysics image.
"""
# Starts with all channels trimmed images
trimmed_img = self.trim()
if len(trimmed_img) == 1:
return np.array(list(trimmed_img.values()))
else:
# Asign color to bands and make zenith correction on band 7.
R = trimmed_img["M3C03"]
G = solar7(
self._trim_coord["M3C07"],
trimmed_img["M3C07"],
trimmed_img["M3C13"],
)
B = trimmed_img["M3C13"]
# Minimuns and Maximuns
Rmin = 0
Rmax = 1
Gmin = 0
Gmax = 0.6
Bmin = 203
Bmax = 323
# Normalize the data and copying
R = (R - Rmin) / (Rmax - Rmin)
with np.errstate(invalid="ignore"):
G = ((G - Gmin) / (Gmax - Gmin)) ** 0.4
B = (B - Bmin) / (Bmax - Bmin)
RR = np.copy(R)
BB = np.copy(B)
GG = np.copy(G)
RR[RR < 0] = 0.0
RR[RR > 1] = 1.0
BB[BB < 0] = 0.0
BB[BB > 1] = 1.0
GG[GG < 0] = 0.0
GG[GG > 1] = 1.0
# Create the norm RGB
RRGB = np.stack([RR, GG, BB], axis=2)
if masked is True:
RRGB = mask(RRGB)
return RRGB
def solar7(trim_coord_ch7, ch7, ch13):
"""
This function does a zenith angle correction to channel 7.
This correction is needed for daylight images. It is used
in RGB method of Goes class.
Parameters
----------
trim_coord_ch7: ``tuple``
(r0, r1, c0, c1) where:
r0, latitude of
r1, latitude of
c0, longitude of
c1, longitude of
ch7: ``numpy.array``
Trimmed image of channel 7.
ch13: ``numpy.array``
Trimed image of channel 13.
Returns
-------
``numpy.array``
Zenith calculation for every pixel for channel 7.
"""
# Construct paths
latitude_path = os.path.join(PATH, "lat_vec.npy")
longitude_path = os.path.join(PATH, "lon_vec.npy")
# Trimmed coordinates
r0, r1, c0, c1 = trim_coord_ch7
lat = np.load(latitude_path)[r0:r1]
lon = np.load(longitude_path)[c0:c1]
# Calculate the solar zenith angle
utc_time = datetime.datetime(2019, 1, 2, 18, 00)
LON, LAT = np.meshgrid(lon, lat)
zenith = astronomy.sun_zenith_angle(utc_time, LON, LAT)
refl39 = Calculator(platform_name="GOES-16", instrument="abi", band="ch7")
return refl39.reflectance_from_tbs(zenith, ch7, ch13)
def mask(rgb):
"""This function returns a labeled-by-color image according to
the interpretation of the product Day Microphysics
(https://weather.msfc.nasa.gov/sport/training/quickGuides/
rgb/QuickGuide_DtMicroRGB_NASA_SPoRT.pdf)
Parameters:
-----------
rgb: numpy array
Numpy Array object containig the Day Microphysics RGB product
Returns:
-------
img_mask: numpy array
Masked RGB
"""
img_mask = np.zeros(rgb.shape)
# Large drops, Low clouds-> pink/magenta
lc_rfilter = rgb[:, :, 0] > 0.7 # R>0.8
lc_gfilter = rgb[:, :, 1] < 0.4 # G
lc_bfilter = rgb[:, :, 2] > 0.6 # B
lc_filter = lc_rfilter * lc_gfilter * lc_bfilter
# Mask= magenta
img_mask[lc_filter, 0] = 1.0
img_mask[lc_filter, 1] = 0.0
img_mask[lc_filter, 2] = 1.0
# Stratus/Stratoculumus (small drops, low clouds) -> bright green/blue
st_rfilter = (rgb[:, :, 0] > 0.3) * (rgb[:, :, 0] < 0.45) # R
st_gfilter = (rgb[:, :, 1] > 0.5) * (rgb[:, :, 1] < 0.8) # G
st_bfilter = rgb[:, :, 2] < 0.7
st_filter = st_rfilter * st_gfilter * st_bfilter
# Mask=Light blue
img_mask[st_filter, 0] = 0.0
img_mask[st_filter, 1] = 1.0
img_mask[st_filter, 2] = 1.0
# CumuloNimbis (high clouds) -> red, dark orange
cb_rfilter = rgb[:, :, 0] > 0.7 # R
cb_gfilter = rgb[:, :, 1] < 0.3 # G
cb_bfilter = rgb[:, :, 2] < 0.3 # B
cb_filter = cb_rfilter * cb_gfilter * cb_bfilter
# Mask=Red
img_mask[cb_filter, 0] = 1.0
img_mask[cb_filter, 1] = 0.0
img_mask[cb_filter, 2] = 0.0
# Cirrus (high clouds)-> green, dark green
cr_rfilter = rgb[:, :, 0] < 0.3 # R
cr_gfilter = rgb[:, :, 1] > 0.7 # G
cr_bfilter = rgb[:, :, 2] < 0.3 # B
cr_filter = cr_rfilter * cr_gfilter * cr_bfilter
# Mask= Green
img_mask[cr_filter, 0] = 0.0
img_mask[cr_filter, 1] = 1.0
img_mask[cr_filter, 2] = 0.0
# supercooled clouds Thick, small drops, medium clouds-> yellow
super_rfilter = rgb[:, :, 0] > 0.8
super_gfilter = rgb[:, :, 1] > 0.8
super_bfilter = rgb[:, | |
and module.params['encryption_mode'] == 'aws:kms':
params['config'] = botocore.client.Config(signature_version='s3v4')
elif module.params['mode'] in ('get', 'getstr') and sig_4:
params['config'] = botocore.client.Config(signature_version='s3v4')
if module.params['dualstack']:
dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
if 'config' in params:
params['config'] = params['config'].merge(dualconf)
else:
params['config'] = dualconf
return boto3_conn(**params)
def main():
argument_spec = dict(
bucket=dict(required=True),
dest=dict(default=None, type='path'),
encrypt=dict(default=True, type='bool'),
encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
expiry=dict(default=600, type='int', aliases=['expiration']),
headers=dict(type='dict'),
marker=dict(default=""),
max_keys=dict(default=1000, type='int', no_log=False),
metadata=dict(type='dict'),
mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object=dict(),
permission=dict(type='list', elements='str', default=['private']),
version=dict(default=None),
overwrite=dict(aliases=['force'], default='always'),
prefix=dict(default=""),
retries=dict(aliases=['retry'], type='int', default=0),
s3_url=dict(aliases=['S3_URL']),
dualstack=dict(default='no', type='bool'),
rgw=dict(default='no', type='bool'),
src=dict(type='path'),
content=dict(),
content_base64=dict(),
ignore_nonexistent_bucket=dict(default=False, type='bool'),
encryption_kms_key_id=dict()
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[['mode', 'put', ['object']],
['mode', 'get', ['dest', 'object']],
['mode', 'getstr', ['object']],
['mode', 'geturl', ['object']]],
mutually_exclusive=[['content', 'content_base64', 'src']],
)
bucket = module.params.get('bucket')
encrypt = module.params.get('encrypt')
expiry = module.params.get('expiry')
dest = module.params.get('dest', '')
headers = module.params.get('headers')
marker = module.params.get('marker')
max_keys = module.params.get('max_keys')
metadata = module.params.get('metadata')
mode = module.params.get('mode')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
dualstack = module.params.get('dualstack')
rgw = module.params.get('rgw')
src = module.params.get('src')
content = module.params.get('content')
content_base64 = module.params.get('content_base64')
ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite = 'never'
if overwrite == 'different' and not HAS_MD5:
module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region in ('us-east-1', '', None):
# default to US Standard region
location = 'us-east-1'
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module.params.get('object'):
obj = module.params['object']
# If there is a top level object, do nothing - if the object starts with /
# remove the leading character to maintain compatibility with Ansible versions < 2.4
if obj.startswith('/'):
obj = obj[1:]
# Bucket deletion does not require obj. Prevents ambiguity with delobj.
if obj and mode == "delete":
module.fail_json(msg='Parameter obj cannot be used with mode=delete')
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
module.fail_json(msg='dualstack only applies to AWS S3')
if dualstack and not module.botocore_at_least('1.4.45'):
module.fail_json(msg='dualstack requires botocore >= 1.4.45')
# rgw requires an explicit url
if rgw and not s3_url:
module.fail_json(msg='rgw flavour requires s3_url')
# Look at s3_url and tweak connection settings
# if connecting to RGW, Walrus or fakes3
if s3_url:
for key in ['validate_certs', 'security_token', 'profile_name']:
aws_connect_kwargs.pop(key, None)
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
validate = not ignore_nonexistent_bucket
# separate types of ACLs
bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
if error_acl:
module.fail_json(msg='Unknown permission specified: %s' % error_acl)
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket, validate=validate)
if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
module.fail_json(msg="Source bucket cannot be found.")
if mode == 'get':
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn is False:
if version:
module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
else:
module.fail_json(msg="Key %s does not exist." % obj)
if dest and path_check(dest) and overwrite != 'always':
if overwrite == 'never':
module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
if etag_compare(module, s3, bucket, obj, version=version, local_file=dest):
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
if mode == 'put':
# if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
# these were separated into the variables bucket_acl and object_acl above
if content is None and content_base64 is None and src is None:
module.fail_json('Either content, content_base64 or src must be specified for PUT operations')
if src is not None and not path_check(src):
module.fail_json('Local object "%s" does not exist for PUT operation' % (src))
keyrtn = None
if bucketrtn:
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
else:
# If the bucket doesn't exist we should create it.
# only use valid bucket acls for create_bucket function
module.params['permission'] = bucket_acl
create_bucket(module, s3, bucket, location)
# the content will be uploaded as a byte string, so we must encode it first
bincontent = None
if content is not None:
bincontent = content.encode('utf-8')
if content_base64 is not None:
bincontent = base64.standard_b64decode(content_base64)
if keyrtn and overwrite != 'always':
if overwrite == 'never' or etag_compare(module, s3, bucket, obj, version=version, local_file=src, content=bincontent):
# Return the download URL for the existing object
get_download_url(module, s3, bucket, obj, expiry, changed=False)
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=src, content=bincontent)
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required")
if bucket:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
else:
module.fail_json(msg="Bucket parameter is required.")
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete':
if bucket:
deletertn = delete_bucket(module, s3, bucket)
if deletertn is True:
module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
else:
module.fail_json(msg="Bucket parameter is required.")
# Support for listing a set of keys
if mode == 'list':
exists = bucket_check(module, s3, bucket)
# If the bucket does not exist then bail out
if not exists:
module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
list_keys(module, s3, bucket, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create':
# if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
# these were separated above into the variables bucket_acl and object_acl
if bucket and not obj:
if bucketrtn:
module.exit_json(msg="Bucket already exists.", changed=False)
else:
# only use valid bucket acls when creating the bucket
module.params['permission'] = bucket_acl
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
if bucket and obj:
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucketrtn:
if key_check(module, s3, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
else:
# setting valid object acls for the create_dirkey function
module.params['permission'] = object_acl
create_dirkey(module, s3, bucket, dirobj, encrypt)
else:
# only use valid bucket acls for the create_bucket function
module.params['permission'] = bucket_acl
created = create_bucket(module, s3, bucket, location)
# only use valid object acls for the create_dirkey function
module.params['permission'] = object_acl
create_dirkey(module, s3, bucket, dirobj, encrypt)
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == 'geturl':
if not bucket and not obj:
module.fail_json(msg="Bucket and Object parameters must be set")
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn:
get_download_url(module, s3, bucket, obj, expiry)
else:
module.fail_json(msg="Key %s does not exist." % obj)
if mode == 'getstr':
if bucket and obj:
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn:
try:
download_s3str(module, s3, bucket, obj, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3str(module, s3, bucket, obj, version=version)
elif version is not None:
module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
else:
module.fail_json(msg="Key %s | |
<filename>laplace/baselaplace.py
from abc import ABC, abstractmethod, abstractproperty
from math import sqrt, pi
import numpy as np
import torch
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.distributions import MultivariateNormal, Dirichlet, Normal
from laplace.utils import parameters_per_layer, invsqrt_precision, get_nll, validate
from laplace.matrix import Kron
from laplace.curvature import BackPackGGN
__all__ = ['BaseLaplace', 'FullLaplace', 'KronLaplace', 'DiagLaplace']
class BaseLaplace(ABC):
"""Baseclass for all Laplace approximations in this library.
Subclasses need to specify how the Hessian approximation is initialized,
how to add up curvature over training data, how to sample from the
Laplace approximation, and how to compute the functional variance.
A Laplace approximation is represented by a MAP which is given by the
`model` parameter and a posterior precision or covariance specifying
a Gaussian distribution \\(\\mathcal{N}(\\theta_{MAP}, P^{-1})\\).
The goal of this class is to compute the posterior precision \\(P\\)
which sums as
\\[
P = \\sum_{n=1}^N \\nabla^2_\\theta \\log p(\\mathcal{D}_n \\mid \\theta)
\\vert_{\\theta_{MAP}} + \\nabla^2_\\theta \\log p(\\theta) \\vert_{\\theta_{MAP}}.
\\]
Every subclass implements different approximations to the log likelihood Hessians,
for example, a diagonal one. The prior is assumed to be Gaussian and therefore we have
a simple form for \\(\\nabla^2_\\theta \\log p(\\theta) \\vert_{\\theta_{MAP}} = P_0 \\).
In particular, we assume a scalar, layer-wise, or diagonal prior precision so that in
all cases \\(P_0 = \\textrm{diag}(p_0)\\) and the structure of \\(p_0\\) can be varied.
Parameters
----------
model : torch.nn.Module
likelihood : {'classification', 'regression'}
determines the log likelihood Hessian approximation
sigma_noise : torch.Tensor or float, default=1
observation noise for the regression setting; must be 1 for classification
prior_precision : torch.Tensor or float, default=1
prior precision of a Gaussian prior (= weight decay);
can be scalar, per-layer, or diagonal in the most general case
prior_mean : torch.Tensor or float, default=0
prior mean of a Gaussian prior, useful for continual learning
temperature : float, default=1
temperature of the likelihood; lower temperature leads to more
concentrated posterior and vice versa.
backend : subclasses of `laplace.curvature.CurvatureInterface`
backend for access to curvature/Hessian approximations
backend_kwargs : dict, default=None
arguments passed to the backend on initialization, for example to
set the number of MC samples for stochastic approximations.
"""
def __init__(self, model, likelihood, sigma_noise=1., prior_precision=1.,
prior_mean=0., temperature=1., backend=BackPackGGN, backend_kwargs=None):
if likelihood not in ['classification', 'regression']:
raise ValueError(f'Invalid likelihood type {likelihood}')
self.model = model
self._device = next(model.parameters()).device
# initialize state #
# posterior mean/mode
self.mean = parameters_to_vector(self.model.parameters()).detach()
self.n_params = len(self.mean)
self.n_layers = len(list(self.model.parameters()))
self.prior_precision = prior_precision
self.prior_mean = prior_mean
if sigma_noise != 1 and likelihood != 'regression':
raise ValueError('Sigma noise != 1 only available for regression.')
self.likelihood = likelihood
self.sigma_noise = sigma_noise
self.temperature = temperature
self._backend = None
self._backend_cls = backend
self._backend_kwargs = dict() if backend_kwargs is None else backend_kwargs
self.H = None
# log likelihood = g(loss)
self.loss = 0.
self.n_outputs = None
self.n_data = None
@property
def backend(self):
if self._backend is None:
self._backend = self._backend_cls(self.model, self.likelihood,
**self._backend_kwargs)
return self._backend
@abstractmethod
def _init_H(self):
pass
@abstractmethod
def _curv_closure(self, X, y, N):
pass
def _check_fit(self):
if self.H is None:
raise AttributeError('Laplace not fitted. Run fit() first.')
def fit(self, train_loader):
"""Fit the local Laplace approximation at the parameters of the model.
Parameters
----------
train_loader : torch.data.utils.DataLoader
each iterate is a training batch (X, y);
`train_loader.dataset` needs to be set to access \\(N\\), size of the data set
"""
if self.H is not None:
raise ValueError('Already fit.')
self._init_H()
self.model.eval()
X, _ = next(iter(train_loader))
with torch.no_grad():
self.n_outputs = self.model(X[:1].to(self._device)).shape[-1]
setattr(self.model, 'output_size', self.n_outputs)
N = len(train_loader.dataset)
for X, y in train_loader:
self.model.zero_grad()
X, y = X.to(self._device), y.to(self._device)
loss_batch, H_batch = self._curv_closure(X, y, N)
self.loss += loss_batch
self.H += H_batch
self.n_data = N
def log_marginal_likelihood(self, prior_precision=None, sigma_noise=None):
"""Compute the Laplace approximation to the log marginal likelihood subject
to specific Hessian approximations that subclasses implement.
Requires that the Laplace approximation has been fit before.
The resulting torch.Tensor is differentiable in `prior_precision` and
`sigma_noise` if these have gradients enabled.
By passing `prior_precision` or `sigma_noise`, the current value is
overwritten. This is useful for iterating on the log marginal likelihood.
Parameters
----------
prior_precision : torch.Tensor, optional
prior precision if should be changed from current `prior_precision` value
sigma_noise : [type], optional
observation noise standard deviation if should be changed
Returns
-------
log_marglik : torch.Tensor
"""
# make sure we can differentiate wrt prior and sigma_noise for regression
self._check_fit()
# update prior precision (useful when iterating on marglik)
if prior_precision is not None:
self.prior_precision = prior_precision
# update sigma_noise (useful when iterating on marglik)
if sigma_noise is not None:
if self.likelihood != 'regression':
raise ValueError('Can only change sigma_noise for regression.')
self.sigma_noise = sigma_noise
return self.log_likelihood - 0.5 * (self.log_det_ratio + self.scatter)
@property
def log_likelihood(self):
"""Compute log likelihood on the training data after `.fit()` has been called.
The log likelihood is computed on-demand based on the loss and, for example,
the observation noise which makes it differentiable in the latter for
iterative updates.
Returns
-------
log_likelihood : torch.Tensor
"""
self._check_fit()
factor = - self._H_factor
if self.likelihood == 'regression':
# loss used is just MSE, need to add normalizer for gaussian likelihood
c = self.n_data * self.n_outputs * torch.log(self.sigma_noise * sqrt(2 * pi))
return factor * self.loss - c
else:
# for classification Xent == log Cat
return factor * self.loss
def __call__(self, x, pred_type='glm', link_approx='probit', n_samples=100):
"""Compute the posterior predictive on input data `X`.
Parameters
----------
x : torch.Tensor
`(batch_size, input_shape)`
pred_type : {'glm', 'nn'}, default='glm'
type of posterior predictive, linearized GLM predictive or neural
network sampling predictive. The GLM predictive is consistent with
the curvature approximations used here.
link_approx : {'mc', 'probit', 'bridge'}
how to approximate the classification link function for the `'glm'`.
For `pred_type='nn'`, only 'mc' is possible.
n_samples : int
number of samples for `link_approx='mc'`.
Returns
-------
predictive: torch.Tensor or Tuple[torch.Tensor]
For `likelihood='classification'`, a torch.Tensor is returned with
a distribution over classes (similar to a Softmax).
For `likelihood='regression'`, a tuple of torch.Tensor is returned
with the mean and the predictive variance.
"""
self._check_fit()
if pred_type not in ['glm', 'nn']:
raise ValueError('Only glm and nn supported as prediction types.')
if link_approx not in ['mc', 'probit', 'bridge']:
raise ValueError(f'Unsupported link approximation {link_approx}.')
if pred_type == 'glm':
f_mu, f_var = self._glm_predictive_distribution(x)
# regression
if self.likelihood == 'regression':
return f_mu, f_var
# classification
if link_approx == 'mc':
try:
dist = MultivariateNormal(f_mu, f_var)
except:
dist = Normal(f_mu, torch.diagonal(f_var, dim1=1, dim2=2).sqrt())
return torch.softmax(dist.sample((n_samples,)), dim=-1).mean(dim=0)
elif link_approx == 'probit':
kappa = 1 / torch.sqrt(1. + np.pi / 8 * f_var.diagonal(dim1=1, dim2=2))
return torch.softmax(kappa * f_mu, dim=-1)
elif link_approx == 'bridge':
_, K = f_mu.size(0), f_mu.size(-1)
f_var_diag = torch.diagonal(f_var, dim1=1, dim2=2)
sum_exp = torch.sum(torch.exp(-f_mu), dim=1).unsqueeze(-1)
alpha = 1/f_var_diag * (1 - 2/K + torch.exp(f_mu)/(K**2) * sum_exp)
dist = Dirichlet(alpha)
return torch.nan_to_num(dist.mean, nan=1.0)
else:
samples = self._nn_predictive_samples(x, n_samples)
if self.likelihood == 'regression':
return samples.mean(dim=0), samples.var(dim=0)
return samples.mean(dim=0)
def predictive(self, x, pred_type='glm', link_approx='mc', n_samples=100):
return self(x, pred_type, link_approx, n_samples)
def predictive_samples(self, x, pred_type='glm', n_samples=100):
"""Sample from the posterior predictive on input data `x`.
Can be used, for example, for Thompson sampling.
Parameters
----------
x : torch.Tensor
input data `(batch_size, input_shape)`
pred_type : {'glm', 'nn'}, default='glm'
type of posterior predictive, linearized GLM predictive or neural
network sampling predictive. The GLM predictive is consistent with
the curvature approximations used here.
n_samples : int
number of samples
Returns
-------
samples : torch.Tensor
samples `(n_samples, batch_size, output_shape)`
"""
self._check_fit()
if pred_type not in ['glm', 'nn']:
raise ValueError('Only glm and nn supported as prediction types.')
if pred_type == 'glm':
f_mu, f_var = self._glm_predictive_distribution(x)
assert f_var.shape == torch.Size([f_mu.shape[0], f_mu.shape[1], f_mu.shape[1]])
dist = MultivariateNormal(f_mu, f_var)
samples = dist.sample((n_samples,))
if self.likelihood == 'regression':
return samples
return torch.softmax(samples, dim=-1)
else: # 'nn'
return self._nn_predictive_samples(x, n_samples)
@torch.enable_grad()
def _glm_predictive_distribution(self, X):
Js, f_mu = self.backend.jacobians(self.model, X)
f_var = self.functional_variance(Js)
return f_mu.detach(), f_var.detach()
def _nn_predictive_samples(self, X, n_samples=100):
fs = list()
for sample in self.sample(n_samples):
vector_to_parameters(sample, self.model.parameters())
fs.append(self.model(X.to(self._device)).detach())
vector_to_parameters(self.mean, self.model.parameters())
fs = torch.stack(fs)
if self.likelihood == 'classification':
fs = torch.softmax(fs, dim=-1)
return fs
@abstractmethod
def functional_variance(self, Jacs):
"""Compute functional variance for the `'glm'` predictive:
`f_var[i] = Jacs[i] @ P.inv() @ Jacs[i].T`, which | |
<filename>python/hist-interpreter.py
import argparse
import os
import timeit
import numpy as np
import matplotlib.pyplot as plt
start = timeit.default_timer()
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--begin', help="Start value", required=True)
parser.add_argument('-e', '--end', help="End value", required=True)
parser.add_argument('-s', '--step_size', help="The size ot the steps", required=True)
parser.add_argument('-n', '--name', help="The common part of all file names (without <rate>.csv)", default="hist")
parser.add_argument('-r', '--rate_name',
help="The common part of all statistics file names (without -<rate>-stats.csv)",
default="measurement-rate")
parser.add_argument('-i', '--image_title', help="Title displayed on all created images", default="Measurement")
parser.add_argument('-c', '--bucket_size', help="Bucket/Container size of the histogram", required=True)
parser.add_argument('-t', '--tex', help="Use TeX for typesetting", action='store_true')
parser.add_argument('-x', '--image_extension', help="Select the image type. Default is png", default="png")
args = parser.parse_args()
#########################################
# additional values which can be modified
# format strings for ccdf_df and cdf_df
FORMAT_PRIMARY = 'b-' # format for df
FORMAT_SECONDARY = 'r--' # format for cdf/ccdf
BASELINE = None # Baseline to be drawn in box/violin plot -> number as y value or None for no baseline
FONT_SIZE = None # Font size for figures, if None standard font size is used
# colors taken from:
# https://stackoverflow.com/a/287944
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def ask_yes_no(question):
"""
Ask the user a yes or no question via commandline
:param question: The question to be displayed
:return: True if the user answered with yes, False otherwise
"""
while True:
print(question + " [y/n] ")
choice = input().lower()
if choice in ['true', '1', 't', 'y', 'yes']:
return True
elif choice in ['false', '0', 'f', 'n', 'no']:
return False
else:
print("Please write yes or no.")
def configure_plt_font():
"""
If --tex flag is set, use TeX fonts for texts
"""
if args.tex:
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
if FONT_SIZE is not None:
plt.rcParams.update({'font.size': FONT_SIZE})
def save_figure(plt, filename):
"""
Saves the given figure in a subfolder called image under the given name
Old files are never overwritten, if the name is not unique, a number will be incremented
:param plt: The plot to save
:param filename: The name of the file
"""
directory = 'images'
if not os.path.exists(directory):
os.makedirs(directory)
ctr = 0
# check if a picture with this name exists
# to never overwrite old images
while os.path.exists(directory + '/{}{:d}.{}'.format(filename, ctr, args.image_extension)):
ctr += 1
print('Saving to ' + directory + '/{}{:d}.{}'.format(filename, ctr, args.image_extension))
plt.savefig(directory + '/{}{:d}.{}'.format(filename, ctr, args.image_extension), dpi=200)
print('Done saving')
def compute_ccdf(compressed_hist, index):
"""
Computes the CCDF directly form MoonSniffs histogram representation
:param compressed_hist: Array of MoonSniff histograms (compressed)
:param index: Index within the compressed_hist
:return:
x: values (latencies) in [us]<br>
y: values (amount of data-points) normalized to 1<br>
inverse: 1-y element-wise
"""
x = compressed_hist[index][0].copy()
y = compressed_hist[index][1].copy()
# convert to [us]
x = np.divide(x, 1000)
# normalize to 1
y = np.divide(y, sum(y))
print(y)
cummulative_y = np.cumsum(y)
inverse = np.repeat(1, len(y))
inverse = np.subtract(inverse, cummulative_y)
return x, y, inverse
def plot_ccdf(compressed_hist, points, bucket_size, xlim, name):
"""
Plots the cumulative distribution function for all histograms
:param compressed_hist: Array of MoonSniff histograms (compressed)
:param points: Values for the x axis
:param bucket_size: Size of the buckets
:param xlim: If used limits the shown area (x-axis) to this array, e.g. [0, 1] will only show
the graph between 0 and 1
:param name: The name of the image
"""
for i in range(0, len(compressed_hist)):
x, y, inverse = compute_ccdf(compressed_hist, i)
plt.semilogy(x, inverse)
axes = plt.gca()
percent_line = axes.axhline(0.01, linestyle='dashed', color='grey', linewidth=0.8)
plt.title(name + " (" + str(bucket_size) + " ns buckets)")
plt.xlabel("Latency [us]")
plt.ylabel("Normalized prevalence")
plt.legend([str(point) + " Mbit/s" for point in points] + ['99th percentile'])
if len(xlim) == 2:
axes = plt.gca()
axes.set_xlim(xlim)
# axes.set_ylim([0, 1])
plt.tight_layout()
save_figure(plt, 'ccdf-' + str(bucket_size) + 'b')
# plt.show()
plt.close()
def plot_cdf_df(compressed_hist, points, bucket_size, xlim, name):
"""
Plots the cumulative distribution function and the plain distribution function of a single histogram
:param compressed_hist: Array of MoonSniff histograms (compressed)
:param points: Values for the x axis
:param bucket_size: Size of the buckets
:param xlim: If used limits the shown area (x-axis) to this array, e.g. [0, 1] will only show
the graph between 0 and 1
:param name: The name of the image
"""
print(compressed_hist)
print(compressed_hist[0])
x = compressed_hist[0][0].copy()
y = compressed_hist[0][1].copy()
# convert to [us]
x = np.divide(x, 1000)
# normalize to 1
y = np.divide(y, sum(y))
print(y)
cummulative_y = np.cumsum(y)
plt.plot(x, y, FORMAT_PRIMARY)
plt.plot(x, cummulative_y, FORMAT_SECONDARY)
if len(xlim) == 2:
axes = plt.gca()
axes.set_xlim(xlim)
# axes.set_ylim([0, 1])
plt.title(name + " (" + str(bucket_size) + " ns buckets, " + str(points[0]) + " Mbit/s)")
plt.xlabel("Latency [us]")
plt.ylabel("Normalized prevalence")
plt.legend(['df', 'cdf'])
print('saving figure')
plt.tight_layout()
save_figure(plt, 'compare-' + str(bucket_size) + 'b-cdf')
print('finished saving figure, now plotting')
# plt.show()
plt.close()
def plot_ccdf_df(compressed_hist, points, bucket_size, xlim, name):
"""
Plots the complementary cumulative distribution function and the plain distribution function
:param compressed_hist: Array of MoonSniff histograms (compressed)
:param points: Values for the x axis
:param bucket_size: Size of the buckets
:param xlim: If used limits the shown area (x-axis) to this array, e.g. [0, 1] will only show
:param name: The name of the image
"""
x, y, inverse = compute_ccdf(compressed_hist, 0)
plt.semilogy(x, y, FORMAT_PRIMARY)
plt.semilogy(x, inverse, FORMAT_SECONDARY)
if len(xlim) == 2:
axes = plt.gca()
axes.set_xlim(xlim)
# axes.set_ylim([0, 1])
plt.title(name + " (" + str(bucket_size) + " ns buckets, " + str(points[0]) + " Mbit/s)")
plt.xlabel("Latency [us]")
plt.ylabel("Normalized prevalence")
plt.legend(['df', 'ccdf'])
print('saving figure')
plt.tight_layout()
save_figure(plt, 'compare-' + str(bucket_size) + 'b-ccdf')
print('finished saving figure, now plotting')
# plt.show()
plt.close()
def box_graph(histograms, xpoints, bucket_size, name):
"""
Plot a box graph representing all histograms
:param histograms: The input histograms (expanded)
:param xpoints: Values for the x axis
:param bucket_size: Size of the buckets
:param name: The name of the image
"""
print("now plotting .. ")
ticks = np.arange(1, len(xpoints) + 1, 1)
# plt.figure(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')
# data set is pretty big, so fliers just cluster up the plot
# instead draw only the min and max
box_parts = plt.boxplot(histograms, showmeans=False, widths=0.7, whis=[5, 95], showfliers=False)
plt.setp(box_parts['boxes'], label='boxes')
plt.setp(box_parts['medians'], label='median')
plt.setp(box_parts['fliers'], label='max/min')
plt.title(name + " (" + str(bucket_size) + " ns buckets)")
plt.xlabel("Average line-rate [Mbit/s]")
plt.ylabel("Latency [ns]")
plt.xticks(ticks, xpoints, rotation='vertical')
# compute mins and maxes
maxes = [hist[len(hist) - 1] for hist in histograms]
mins = [hist[0] for hist in histograms]
max_line = plt.plot(ticks, maxes, color='black', marker='o', linestyle='None', fillstyle='none', label='max/min')
plt.plot(ticks, mins, color='black', marker='o', linestyle='None', fillstyle='none')
if BASELINE is not None:
axes = plt.gca()
base_line = axes.axhline(BASELINE, linestyle='dashed', color='blue', linewidth=0.8, label='baseline')
plt.legend(handles=[box_parts['medians'][0], max_line[0], base_line])
else:
plt.legend(handles=[box_parts['medians'][0], max_line[0]])
# ax = plt.gca()
# ax.set_xticks(ticks)
# ax.set_xticklabels(xpoints)
plt.tight_layout()
save_figure(plt, 'box-' + str(bucket_size) + 'b')
# plt.show()
plt.close()
def violin_graph(histograms, xpoints, bucket_size, name):
"""
Plot a box graph representing all histograms
:param histograms: The input histograms (expanded)
:param xpoints: Values for the x axis
:param bucket_size: Size of the buckets
:param name: The name of the image
"""
print("now plotting .. ")
ticks = np.arange(1, len(xpoints) + 1, 1)
# plt.figure(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')
violin_parts = plt.violinplot(histograms, showmeans=True, showextrema=True, showmedians=True, widths=0.7)
plt.setp(violin_parts['bodies'], facecolor='red', edgecolor='black')
plt.setp(violin_parts['cmedians'], color='black', linestyle='dotted', label='median')
plt.setp(violin_parts['cmeans'], color='blue', label='mean')
plt.setp(violin_parts['cmaxes'], color='black')
plt.setp(violin_parts['cmins'], color='black')
plt.setp(violin_parts['cbars'], color='black')
plt.title(name + " (" + str(bucket_size) + " ns buckets)")
plt.xlabel("Average line-rate [Mbit/s]")
plt.ylabel("Latency [ns]")
plt.xticks(ticks, xpoints, rotation='vertical')
plt.legend(handles=[violin_parts['cmeans'], violin_parts['cmedians']])
# ax = plt.gca()
# ax.set_xticks(ticks)
# ax.set_xticklabels(xpoints)
plt.tight_layout()
save_figure(plt, 'violin-' + str(bucket_size) + 'b')
# plt.show()
plt.close()
def read_samples():
"""
Reads the MoonSniff output and wraps it in python lists for later processing
:return:
hists: Uncompressed histogram, e.g. 10, 1; 14, 3 -> [10, 14, 14, 14]<br>
points: X-axis values (average line-rate associated to each histogram)<br>
compressed_hist: Compressed histogram, [[list of values], [list of amounts]]
"""
skip_rates = False
step_size = int(args.step_size)
begin = int(args.begin)
end = int(args.end) + step_size
hists = list()
points = list()
# list is not expanded as hists
compressed_hist = list()
for rate in range(begin, end, step_size):
value_lst = list()
amount_lst = list()
print(rate)
file = open(args.name + "-" + str(rate) + ".csv")
for line in file:
vals = line.split(",")
number = int(vals[0])
amount = int(vals[1])
value_lst.append(number)
amount_lst.append(amount)
# complete histogram without downsampling
compressed_hist.append((value_lst, amount_lst))
| |
only authed requests get through (default: True)
optionally pass in Auth subclass
error: message to use in case of unhandled exception
methods: HTTP methods to allow through (default: all)
only: only wrap the method to provide
.abort()/unhandled-exception support
throttle: rate-limit clients (default: True)
optionally pass in Throttle subclass, depends on auth
throttle_suffix: suffix the throttle key
throttle the view seperately
'''
return util.valid(
util.defaults({} if opts is None else opts, self.defaults_dispatch),
self.defaults_dispatch.keys()
)
def redirect(self, request, status, location, **headers):
'''redirect(request, 302, '/location') -> response
Create a redirect response.
'''
return self.response(request, status, Location=location, **headers)
def register(self, view, url=None):
'''register(myview)
Register the view with the API.
url can be just a regex or a tuple/list of (regex, kwargs, name,
prefix). It is essentially the same calling convention as
django.conf.urls.url but minus the view parameter.
'''
if url is not None:
if not isinstance(url, basestring) \
and not (isinstance(url, (list, tuple)) and 1 <= len(url) <= 4):
raise self.Error(
'register called with invalid url param: %r' % (url, )
)
if hasattr(view, 'urls'):
raise self.Error(
'Will not register a resource to a url '
'if it has an urls attribute'
)
self.registry.append((view, url))
@property
def registered_resources(self):
'''
Iterate over the registered resources.
for resource in api.registered_resources:
...
Access only the resources without worrying about details of the
registry.
'''
for view in self.registered_views:
if inspect.isfunction(view):
continue
yield view
@property
def registered_views(self):
'''
Iterate over the registered views.
for view in api.registered_views:
...
Access the views without worrying about the registry data structure.
'''
for view, url in self.registry:
yield view
for api in self.included:
for view, url in api.registry:
yield view
for api in self.included_deep:
for view in api.registered_views:
yield view
def resolve(self, paths):
'''resolve([path, ...]) -> resource, [1, ...]
Resolve a URI to an instance as given by the resource pointed to:
from krankshaft.resource import DjangoModelResource
@api
class MyModel(DjangoModelResource):
model = get_model('app', 'MyModel')
# resource = <instance MyModelResource>
# ids = [1]
resource, ids = api.resolve(['/api/v1/mymodel/1/'])
If you're using a resource that adheres to the conventions of krankshaft
(like DjangoModelResource) then you can simply do:
resource.fetch(*ids)
To retrieve a list of instances for those ids. This is required for
linked URI representations of models to work with resources.
The assumption is that the URL defines a single capturing expression
around the primary key of the model. In this case, the number 1 is
assumably captured as either an argument or a keyword argument (name
of the keyword does not matter).
Order of input paths is preserved on output. So you can do this:
resource, ids = api.resolve(paths)
for path, id, instance in zip(paths, ids, resource.fetch(*ids)):
...
And it works as expected.
'''
from django.core.urlresolvers import Resolver404, resolve
if not paths:
raise self.ResolveError('No paths given to resolve')
resource = None
ids = []
for path in paths:
try:
view, args, kwargs = resolve(path)
except Resolver404:
view = None
view_resource = \
getattr(view, 'resource', None) \
or getattr(view, 'im_self', None)
if not view or not view_resource:
raise self.ResolveError(
'Unable to find a resource for path: %s' % path
)
ids.append(args[0] if args else kwargs.values()[0])
if resource is None:
resource = view_resource
if resource != view_resource:
raise self.ResolveError(
'Multiple resources found for given paths'
)
return resource, ids
def response(self, request, status, content=None, **headers):
'''response(request, 200) -> response
Create a response object.
Header name containing underscores will be changed to dash in order to
make it less of a burden syntactically.
Content_Type
Becomes:
Content-Type
'''
from django import http
if status in (301, 302):
location = headers.pop('Location', '')
if status == 301:
response = http.HttpResponsePermanentRedirect(location)
elif status == 302:
response = http.HttpResponseRedirect(location)
else:
response = http.HttpResponse(status=status)
for name, val in headers.items():
response[util.kw_as_header(name)] = val
if content:
response.content = content
return self.hook_response(response)
def reverse(self, name, *args, **kwargs):
'''reverse('myendpoint') -> '/url/for/endpoint/'
Simply a shortcut for using endpoint() on a name so it maps easily to
the name of the endpoint you've decorated with the api.
api = API('v1')
@api(url='^endpoint/$')
def endpoint(request):
...
urlpatterns = patterns('',
url('^api/', include(api.urls)),
)
api.reverse('endpoint') -> '/api/v1/endpoint/'
Of course you can use the standard way by doing:
from django.core.urlresolvers import reverse
reverse(api.endpoint('endpoint'))
Or hardcode it (which isn't very DRY, but...):
reverse('api_v1_endpoint')
'''
from django.core.urlresolvers import reverse
return reverse(self.endpoint(name), *args, **kwargs)
def route(self, obj, request, args, kwargs):
'''route(obj, request, args, kwargs) -> response
Route a request to given obj. If a route method exists on the object,
simply forward control to it. Otherwise, do a simple routing method
based on the HTTP method of the request.
Example:
@api
class SimpleResource(object):
def all(self, request, *args, **kwargs):
...
def route(self, request, args, kwargs):
return self.all(request, *args, **kwargs)
Example with default routing:
@api
class MethodResource(object):
def get(self, request):
...
def post(self, request):
...
If obj is a dictionary, you can specify the handling of each method
specifically.
methods = {
'post': self.post,
}
return api.route(methods, request, args, kwargs)
'''
# assume its an instance of a class
if hasattr(obj, 'route'):
return obj.route(request, args, kwargs)
if isinstance(obj, dict):
avail = obj.copy()
for method in self.methods:
avail.setdefault(method, None)
else:
avail = {
method: getattr(obj, method, None)
for method in self.methods
}
# assume its a class, route to a specific method
method = request.method.lower()
view = avail.get(method)
if not view:
return self.response(request, 405,
Allow=', '.join([
method.upper()
for method, view in avail.iteritems()
if view
])
)
return view(request, *args, **kwargs)
@property
def schema(self):
views = {}
for view in self.registered_views:
try:
name = view.name
except AttributeError:
name = view.__name__
try:
view_schema = view.schema
except AttributeError:
view_schema = self.schema_default(view)
views[name] = view_schema
return {
'resources': views,
}
def schema_default(self, view):
return {
'doc': view.__doc__,
'endpoint': {},
'url': '',
}
def schema_view(self, request):
return self.serialize(request, 200, self.schema)
def serialize(self, request, status, obj,
content_type=None
, opts=None
, **headers
):
'''serialize(request, 200, obj) -> response
Serialize an status and object to a response given a request.
'''
opts = opts or {}
content, content_type = self.serializer.serialize(
obj,
content_type or request.META.get('HTTP_ACCEPT'),
**opts
)
headers['Content-Type'] = content_type
return self.response(request, status, content, **headers)
def throttled(self, request, code=429, **headers):
return self.response(request, code, **headers)
def update_view(self, view):
'''update_view(view) -> view
Hook to make updates to a view.
In this context, a view can be a class or a function. In the class
case, its considered a resource.
'''
# Django's way of marking a view csrf_exempt
view.csrf_exempt = True
return view
@property
def urls(self):
'''
Returns the list of registered endpoints.
For example, in your urls.py:
url('^api/', include(api.urls))
'''
self.load()
urlpatterns = []
for view, url in self.registry:
if url:
urlitem = (url, view, None, self.endpoint(view.__name__))
if not isinstance(url, basestring):
urlitem = (url[0], view) + url[1:]
urlpatterns.append(urlitem)
extraurls = getattr(view, 'urls', None)
if extraurls:
urlpatterns.extend(extraurls)
from django.conf.urls import include, patterns, url
urlpatterns = self.urls_local + patterns('', *urlpatterns)
if self.name:
urlpatterns = patterns('',
url(r'^%s/' % self.name, include(urlpatterns)),
)
return urlpatterns
# TODO "me" endpoint which dumps information about authned?
@property
def urls_local(self):
'''
Returns the list of urls for this specific api.
'''
from django.conf.urls import patterns, url
return patterns('',
url(r'^$', self.wrap(self.schema_view),
name=self.endpoint('schema')
),
)
def wrap(self, view_or_resource=None, register=False, url=None, **opts):
'''wrap(myview) -> wrapped_view
Wrap up a view function in an API container.
Ideally used when setting up the urls property for resources. ie:
@propery
def urls(self):
return [
(r'^path/$', api.wrap(self.route_list)),
(r'^path/(?P<id>\d+)/$', api.wrap(self.route_object)),
]
However, it has the same semantics as the decorator way to wrap a view.
Except that this function defaults register to False (vs True for the
api decorator). So to wrap a view that you dont want to register:
@api.wrap
def myview(request):
...
Options:
register whether or not to register the view (default: False)
url passed directly to register()
See options_dispatch() for more available options.
'''
self.options_dispatch(opts)
def decorator(view_or_resource):
if inspect.isclass(view_or_resource):
view = self.make_resource_helper(view_or_resource, opts)
else:
@functools.wraps(view_or_resource)
def view(request, *args, **kwargs):
return self.dispatch(
view_or_resource,
opts,
| |
max_allele=None, dtype='u1'):
"""Transform genotype calls into allele counts per call.
Parameters
----------
max_allele : int, optional
Highest allele index. Provide this value to speed up computation.
dtype : dtype, optional
Output dtype.
Returns
-------
out : ndarray, uint8, shape (n_variants, n_samples, len(alleles))
Array of allele counts per call.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.to_allele_counts()
<GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8>
2:0:0 1:1:0
1:0:1 0:2:0
0:0:2 0:0:0
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(3, 2) dtype=int64>
0/0 0/2 2/2
>>> v.to_allele_counts()
<GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8>
2:0:0 1:0:1 0:0:2
"""
# determine alleles to count
if max_allele is None:
max_allele = self.max()
alleles = list(range(max_allele + 1))
# set up output array
outshape = self.shape[:-1] + (len(alleles),)
out = np.zeros(outshape, dtype=dtype)
for allele in alleles:
# count alleles along ploidy dimension
allele_match = self.values == allele
if self.mask is not None:
allele_match &= ~self.mask[..., np.newaxis]
np.sum(allele_match, axis=-1, out=out[..., allele])
if self.ndim == 2:
out = GenotypeAlleleCountsVector(out)
elif self.ndim == 3:
out = GenotypeAlleleCountsArray(out)
return out
def to_gt(self, max_allele=None):
"""Convert genotype calls to VCF-style string representation.
Returns
-------
gt : ndarray, string, shape (n_variants, n_samples)
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[1, 2], [2, 1]],
... [[2, 2], [-1, -1]]])
>>> g.to_gt()
chararray([[b'0/0', b'0/1'],
[b'0/2', b'1/1'],
[b'1/2', b'2/1'],
[b'2/2', b'./.']],
dtype='|S3')
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(4, 2) dtype=int64>
0/0 0/2 1/2 2/2
>>> v.to_gt()
chararray([b'0/0', b'0/2', b'1/2', b'2/2'],
dtype='|S3')
>>> g.is_phased = np.ones(g.shape[:-1])
>>> g.to_gt()
chararray([[b'0|0', b'0|1'],
[b'0|2', b'1|1'],
[b'1|2', b'2|1'],
[b'2|2', b'.|.']],
dtype='|S3')
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(4, 2) dtype=int64>
0|0 0|2 1|2 2|2
>>> v.to_gt()
chararray([b'0|0', b'0|2', b'1|2', b'2|2'],
dtype='|S3')
"""
# how many characters needed per allele call?
if max_allele is None:
max_allele = np.max(self)
if max_allele <= 0:
max_allele = 1
nchar = int(np.floor(np.log10(max_allele))) + 1
# convert to string
a = self.astype((np.string_, nchar)).view(np.chararray)
# recode missing alleles
a[self < 0] = b'.'
if self.mask is not None:
a[self.mask] = b'.'
# determine allele call separator
if self.is_phased is None:
sep = b'/'
else:
sep = np.empty(self.shape[:-1], dtype='S1').view(np.chararray)
sep[self.is_phased] = b'|'
sep[~self.is_phased] = b'/'
# join via separator, coping with any ploidy
gt = a[..., 0]
for i in range(1, self.ploidy):
gt = gt + sep + a[..., i]
return gt
def copy(self, *args, **kwargs):
data = self.values.copy(*args, **kwargs)
out = type(self)(data)
if self.mask is not None:
out.mask = self.mask.copy()
if self.is_phased is not None:
out.is_phased = self.is_phased.copy()
return out
def map_alleles(self, mapping, copy=True):
"""Transform alleles via a mapping.
Parameters
----------
mapping : ndarray, int8, shape (n_variants, max_allele)
An array defining the allele mapping for each variant.
copy : bool, optional
If True, return a new array; if False, apply mapping in place
(only applies for arrays with dtype int8; all other dtypes
require a copy).
Returns
-------
gm : GenotypeArray
Examples
--------
>>> import allel
>>> import numpy as np
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[1, 2], [2, 1]],
... [[2, 2], [-1, -1]]], dtype='i1')
>>> mapping = np.array([[1, 2, 0],
... [2, 0, 1],
... [2, 1, 0],
... [0, 2, 1]], dtype='i1')
>>> g.map_alleles(mapping)
<GenotypeArray shape=(4, 2, 2) dtype=int8>
1/1 1/2
2/1 0/0
1/0 0/1
1/1 ./.
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(4, 2) dtype=int8>
0/0 0/2 1/2 2/2
>>> v.map_alleles(mapping)
<GenotypeVector shape=(4, 2) dtype=int8>
1/1 2/1 1/0 1/1
Notes
-----
If a mask has been set, it is ignored by this function.
For arrays with dtype int8 an optimised implementation is used which is
faster and uses far less memory. It is recommended to convert arrays to
dtype int8 where possible before calling this method.
See Also
--------
create_allele_mapping
"""
h = self.to_haplotypes()
hm = h.map_alleles(mapping, copy=copy)
if self.ndim == 2:
gm = GenotypeVector(hm)
else:
gm = hm.to_genotypes(ploidy=self.ploidy)
return gm
def to_haplotypes(self, copy=False):
"""Reshape a genotype array to view it as haplotypes.
Parameters
----------
copy : bool, optional
If True, copy data.
Returns
-------
h : HaplotypeArray, shape (n_variants, n_samples * ploidy)
Haplotype array.
Notes
-----
If genotype calls are unphased, the haplotypes returned by
this function will bear no resemblance to the true haplotypes.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 1], [1, 1]],
... [[0, 2], [-1, -1]]])
>>> g.to_haplotypes()
<HaplotypeArray shape=(3, 4) dtype=int64>
0 0 0 1
0 1 1 1
0 2 . .
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(3, 2) dtype=int64>
0/0 0/1 0/2
>>> v.to_haplotypes()
<HaplotypeArray shape=(3, 2) dtype=int64>
0 0
0 1
0 2
"""
# implement in sub-class
raise NotImplementedError
def compress(self, condition, axis=0, out=None):
"""Return selected slices of an array along given axis.
Parameters
----------
condition : array_like, bool
Array that selects which entries to return. N.B., if len(condition)
is less than the size of the given axis, then output is truncated to the length
of the condition array.
axis : int, optional
Axis along which to take slices. If None, work on the flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
out : Genotypes
A copy of the array without the slices along axis for which `condition`
is false.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 1], [1, 1]],
... [[0, 2], [-1, -1]]])
>>> g.compress([True, False, True], axis=0)
<GenotypeArray shape=(2, 2, 2) dtype=int64>
0/0 0/1
0/2 ./.
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(3, 2) dtype=int64>
0/0 0/1 0/2
>>> v.compress([True, False, True], axis=0)
<GenotypeVector shape=(2, 2) dtype=int64>
0/0 0/2
"""
# implement in sub-class
raise NotImplementedError
def take(self, indices, axis=0, out=None, mode='raise'):
"""Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
indices : array_like
The indices of the values to extract.
axis : int, optional
The axis over which to select values.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 1], [1, 1]],
... [[0, 2], [-1, -1]]])
>>> g.take([0, 2], axis=0)
<GenotypeArray shape=(2, 2, 2) dtype=int64>
0/0 0/1
0/2 ./.
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(3, 2) dtype=int64>
0/0 0/1 0/2
>>> v.take([0, 2], axis=0)
<GenotypeVector shape=(2, 2) dtype=int64>
0/0 0/2
"""
# implement in sub-class
raise NotImplementedError
def concatenate(self, others, axis=0):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
others : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 1], [1, 1]],
... [[0, 2], [-1, -1]]])
>>> g.concatenate([g], axis=0)
<GenotypeArray shape=(6, 2, 2) dtype=int64>
0/0 0/1
0/1 1/1
0/2 ./.
0/0 | |
<filename>BGWpy/BGW/kgrid.py
import os
import subprocess
import numpy as np
from ..core import fortran_str
from ..core import Task
__all__ = ['get_kpt_grid', 'get_kgrid_input', 'get_kpoints', 'get_kqshift',
'get_kpt_grid_nosym', 'KgridTask']
class KgridTask(Task):
def __init__(self,
structure,
ngkpt = 3*[1],
kshift = 3*[.0],
qshift = 3*[.0],
fft = 3*[0],
use_tr=False,
executable='kgrid.x', # TODO remove executable and make bindir a global option
rootname='tmp.kgrid',
clean_after=True,
dirname='',
**kwargs):
"""
Arguments
---------
structure : pymatgen.Structure
Structure object containing information on the unit cell.
ngkpt : list(3), int, optional
K-points grid. Number of k-points along each primitive vector
of the reciprocal lattice.
kshift : list(3), float, optional
Relative shift of the k-points grid along each direction,
as a fraction of the smallest division along that direction.
qshift : list(3), float, optional
Absolute shift of the k-points grid along each direction.
fft : list(3), int, optional
Number of points along each direction of the fft grid.
use_tr : bool
Use time reversal symmetry.
"""
rootname = os.path.join(dirname, rootname)
self.dirname = os.path.dirname(rootname)
self.inputname = rootname + '.in'
self.outputname = rootname + '.out'
self.logname = rootname + '.log'
self.executable = executable
self.clean_after = clean_after
self.structure = structure
self.ngkpt = np.array(ngkpt)
self.kshift = np.array(kshift)
self.qshift = np.array(qshift)
self.fft = fft
self.use_tr = use_tr
def read_kpoints(self):
"""Read a list of kpoints and their weights from kgrid.x output file."""
with open(self.outputname, 'r') as f:
content = f.read()
lines = content.splitlines()[2:]
kpoints = list()
weights = list()
for line in lines:
k = [ float(ki) for ki in line.split()[:3] ]
w = float(line.split()[-1])
kpoints.append(k)
weights.append(w)
return kpoints, weights
@property
def new_dir(self):
return self.dirname and not os.path.exists(self.dirname)
def write(self):
if self.new_dir:
subprocess.call(['mkdir', '-p', self.dirname])
with open(self.inputname, 'w') as f:
f.write(self.get_kgrid_input())
def run(self):
try:
subprocess.call([self.executable, self.inputname,
self.outputname, self.logname])
except OSError as E:
message = (str(E) + '\n\n' +
79 * '=' + '\n\n' +
'Could not find the executable kgrid.x\n' +
'Please make sure it is available for execution.\n' +
'On a computing cluster, you might do this my loading the module:\n' +
' module load berkeleygw\n' +
"If you compiled BerkeleyGW yourself, " +
"make sure that the 'bin' directory\n" +
'of BerkeleyGW is listed in your PATH environment variable.\n' +
'\n' + 79 * '=' + '\n')
raise OSError(message)
def clean_up(self):
"""Remove all temporary files (input, output log)."""
for fname in (self.inputname, self.outputname, self.logname):
if os.path.exists(fname):
try:
os.remove(fname)
except Exception as E:
print(E)
if self.new_dir:
try:
os.removedirs(dirname)
except Exception as E:
print(E)
def get_kgrid_input(self):
"""Make a kgrid.x input, using pymatgen.Structure object."""
structure = self.structure
kshift = self.kshift
qshift = self.qshift
ngkpt = self.ngkpt
fft = self.fft
use_tr = self.use_tr
abc = np.array(structure.lattice.abc)
latt_vec_rel = (structure.lattice.matrix.transpose() / abc).transpose().round(12)
pos_cart_rel = np.dot(structure.frac_coords, latt_vec_rel).round(6)
S = ''
for arr in (ngkpt, kshift, qshift):
S += fortran_str(arr) + '\n'
S += '\n'
for arr in latt_vec_rel.tolist() + [structure.num_sites]:
S += fortran_str(arr) + '\n'
for Z, pos in zip(structure.atomic_numbers, pos_cart_rel):
S += str(Z) + ' ' + fortran_str(pos) + '\n'
for arr in (fft, use_tr):
S += fortran_str(arr) + '\n'
return S
@staticmethod
def get_kqshift(self, ngkpt, kshift, qshift):
"""Add an absolute qshift to a relative kshift."""
kqshiftk = [ kshift[i] + qshift[i] * ngkpt[i] for i in range(3) ]
return kqshiftk
def get_kpt_grid_nosym(self):
"""
Return a list of kpoints generated with out any symmetry,
along with their weights.
"""
ngkpt = self.ngkpt
kshift = self.kshift
qshift = self.qshift
nkx, nky, nkz = ngkpt
kpoints = list()
weights = list()
for ikx in range(nkx):
for iky in range(nky):
for ikz in range(nkz):
k = (np.array([ikx, iky, ikz]) + kshift) / ngkpt + qshift
kpoints.append(k)
weights.append(1.)
return np.array(kpoints), np.array(weights)
def read_symmetries(self):
"""Read the symmetries matrices and translation vectors."""
with open(self.logname, 'r') as f:
while True:
try:
line = f.next()
if 'symmetries of the crystal without FFT grid' in line:
line = f.next()
nsym = int(line)
line = f.next()
assert 'Space group' in line
syms = np.zeros((nsym, 9), dtype=np.int)
taus = np.zeros((nsym, 3), dtype=np.float)
for i in range(nsym):
line = f.next()
parts = line.split()
syms[i,:] = map(int, parts[2:11])
taus[i,:] = map(float, parts[11:14])
break
except StopIteration:
break
except ValueError as e:
raise Exception('Could not parse kgrid file.\n\n' + str(e))
return syms, taus
def get_kpoints(self):
"""Write, run and extract kpoints. Return kpt, wtk."""
try:
self.write()
self.run()
return self.read_kpoints()
finally:
if self.clean_after:
self.clean_up()
def get_symmetries(self):
"""Write, run and extract symmetries."""
try:
self.write()
self.run()
return self.read_symmetries()
finally:
if self.clean_after:
self.clean_up()
def get_kpoints_and_sym(self):
"""Write, run and extract kpoints and symmetries."""
try:
self.write()
self.run()
outkpt = self.read_kpoints()
outsym = self.read_symmetries()
return outkpt, outsym
finally:
if self.clean_after:
self.clean_up()
# =========================================================================== #
""" Constructor functions """
def get_kpt_grid(structure, ngkpt,
executable='kgrid.x', # TODO remove executable and make bindir a global option
rootname='tmp.kgrid', clean_after=True, **kwargs):
"""
Use kgrid.x to compute the list of kpoint and their weight.
Arguments
---------
structure: pymatgen.Structure
The cell definition of the system.
ngkpt: array(3)
The k-point grid.
executable: str
The path to kgrid.x
rootname: str
For the file names
exec_dir: str
Where to write the files.
clean_after: bool
Remove files afterward.
Keyword Arguments
-----------------
Any other argument to pass to get_kgrid_input, including:
kshift:
A k-point shift (relative to the grid spacing).
qshift:
A q-point shift (absolute, in reduced coord.)
Returns
-------
kpts: A list of k-points (as a 2D list).
wtks: A list of weights.
"""
dirname = os.path.dirname(rootname)
new_dir = dirname and not os.path.exists(dirname)
inputname = rootname + '.in'
outputname = rootname + '.out'
logname = rootname + '.log'
inputcontent = get_kgrid_input(structure, ngkpt, **kwargs)
# Write the input
if new_dir:
#os.system('mkdir -p ' + dirname)
subprocess.call(['mkdir', '-p', dirname])
with open(inputname, 'w') as f:
f.write(inputcontent)
# Run kgrid.x
try:
subprocess.call([executable, inputname, outputname, logname])
except OSError as E:
message = (str(E) + '\n\n' +
79 * '=' + '\n\n' +
'Could not find the executable {} .\n'.format(executable) +
'Please make sure it is available for execution.\n' +
'On a computing cluster, you might do this my loading the module:\n' +
' module load berkeleygw\n' +
"If you compiled BerkeleyGW yourself, " +
"make sure that the 'bin' directory\n" +
'of BerkeleyGW is listed in your PATH environment variable.\n' +
'\n' + 79 * '=' + '\n')
raise OSError(message)
# Read the output
with open(outputname, 'r') as f:
outputcontent = f.read()
# Clean up
if clean_after:
for fname in (inputname, outputname, logname):
if os.path.exists(fname):
try:
os.remove(fname)
except Exception as E:
print(E)
if new_dir:
try:
os.removedirs(dirname)
except Exception as E:
print(E)
# Parse the output
return get_kpoints(outputcontent)
def get_kgrid_input(structure, ngkpt, kshift=[.0,.0,.0], qshift=[.0,.0,.0],
fft=[0,0,0], use_tr=False, **kwargs):
"""Make a kgrid.x input, using pymatgen.Structure object."""
abc = np.array(structure.lattice.abc)
latt_vec_rel = (structure.lattice.matrix.transpose() / abc).transpose().round(12)
pos_cart_rel = np.dot(structure.frac_coords, latt_vec_rel).round(6)
S = ''
for arr in (ngkpt, kshift, qshift):
S += fortran_str(arr) + '\n'
S += '\n'
for arr in latt_vec_rel.tolist() + [structure.num_sites]:
S += fortran_str(arr) + '\n'
for Z, pos in zip(structure.atomic_numbers, pos_cart_rel):
S += str(Z) + ' ' + fortran_str(pos) + '\n'
for arr in (fft, use_tr):
S += fortran_str(arr) + '\n'
return S
def get_kpoints(content):
"""Read a list of kpoints and their weights from kgrid.x output file."""
lines = content.splitlines()[2:]
kpoints = list()
weights = list()
for line in lines:
k = [ float(ki) for ki in line.split()[:3] ]
w = float(line.split()[-1])
kpoints.append(k)
weights.append(w)
return kpoints, weights
def get_kqshift(ngkpt, kshift, qshift):
"""Add an absolute qshift to a relative kshift."""
kqshiftk = [ kshift[i] + qshift[i] * ngkpt[i] for i in range(3) ]
return kqshiftk
# ============================================================== #
def get_kpt_grid_nosym(ngkpt, kshift=[.0,.0,.0], qshift=[.0,.0,.0]):
"""
Return a list of kpoints generated without any symmetry,
along with their weights.
"""
ngkpt = np.array(ngkpt)
kshift = np.array(kshift)
qshift = np.array(qshift)
nkx, nky, nkz = ngkpt
kpoints = list()
weights = list()
#for ikx in range(-nkx, nkx):
# for iky in range(-nky, nky):
# for ikz in range(-nkz, nkz):
# k = (np.array([ikx, iky, ikz]) + kshift) / ngkpt * .5 + qshift
# kpoints.append(k)
| |
<filename>ad_fcemu/nes_cpu.py
# -*- coding: utf-8 -*-
import copy
import typing as tp
import config
import log_differ as ld
import nes_file as nf
import nes_game_pad as ngp
import nes_ppu as np
import utils
class Memory(object):
def __init__(self, ppu: np.NesPPU, pad: ngp.GamePad):
self.ram: tp.List[int] = [0] * 0x0800
self.trainer: tp.List[int] = None
self.prg_rom: tp.List[int] = None
self.ppu = ppu
self.pad = pad
def load_nes(self, nes: nf.NesFile):
trainer = nes.prg_rom
if trainer is None:
trainer = [0] * 512
rom = nes.prg_rom
if len(rom) < 32 * 1024:
rom = rom * 2
self.prg_rom = rom
self.trainer = trainer
def __getitem__(self, addr: int):
# 处理镜像
if 0x0800 <= addr <= 0x1FFF:
# Mirrors of $0000-$07FF
addr = (addr - 0x0800) % 0x0800
elif 0x2008 <= addr <= 0x3FFF:
# Mirrors of $2000-2007 (repeats every 8 bytes)
addr = 0x2000 + (addr - 0x2008) % 8
if 0 <= addr <= 0x07ff:
return self.ram[addr]
elif 0x2000 <= addr <= 0x2007:
return self.ppu.read_for_cpu(addr)
elif 0x4000 <= addr <= 0x4013:
# 主动忽略
return 0
elif addr == 0x4014:
return self.ppu.read_for_cpu(addr)
elif addr == 0x4016:
return self.pad.read_for_cpu()
elif 0x4015 <= addr <= 0x5fff:
# 主动忽略
return 0
elif 0x7000 <= addr <= 0x71ff:
return self.trainer[addr]
elif 0x8000 <= addr <= 0xffff:
addr -= 0x8000
return self.prg_rom[addr]
else:
raise IndexError('错误的读地址:<{}>'.format(addr))
def __setitem__(self, addr: int, value: int):
if value < 0 or value > 2 ** 8 - 1:
raise ValueError('<{}> 超过 1 字节的取值范围'.format(value))
# 处理镜像
if 0x0800 <= addr <= 0x1FFF:
# Mirrors of $0000-$07FF
addr = (addr - 0x0800) % 0x0800
elif 0x2008 <= addr <= 0x3FFF:
# Mirrors of $2000-2007 (repeats every 8 bytes)
addr = 0x2000 + (addr - 0x2008) % 8
if 0 <= addr <= 0x07ff:
self.ram[addr] = value
elif 0x2000 <= addr <= 0x2007:
self.ppu.write_for_cpu(addr, value)
elif 0x4000 <= addr <= 0x4013:
# 主动忽略
pass
elif addr == 0x4014:
self.ppu.write_for_cpu(addr, value)
beg = value * 0x100
end = value * 0x100 + 0x100
self.ppu.load_oam(self.ram[beg:end])
elif 0x4015 <= addr <= 0x5fff:
# 主动忽略
pass
else:
raise IndexError('错误的写地址:<{}>'.format(addr))
class _Status(object):
_mapper = {
'carry': 0,
'zero': 1,
'interrupt': 2,
'decimal': 3,
'break_command': 4,
'_ignore': 5,
'overflow': 6,
'negative': 7,
}
def __init__(self, value: int):
self.carry = 0
self.zero = 0
self.interrupt = 0
self.decimal = 0
self.break_command = 0
self._ignore = 1
self.overflow = 0
self.negative = 0
self._setup(value)
def __setattr__(self, key, value):
if key in self._mapper:
if value not in (0, 1):
raise ValueError('错误的 bit 值: <{}>'.format(value))
super().__setattr__(key, value)
def _setup(self, value):
for attr, bit in self._mapper.items():
if attr == '_ignore':
continue
v = (value >> bit) & 1
self.__setattr__(attr, v)
@property
def value(self):
result = 0
for attr, bit in self._mapper.items():
v = self.__getattribute__(attr)
result |= v << bit
return result
def set_negative(self, value: int):
self.negative = (value >> 7) & 1
def set_zero(self, value: int):
if value == 0:
v = 1
else:
v = 0
self.zero = v
class NesCPU(object):
def __init__(self, memory: Memory):
self.pc = 0
self.a = 0
self.x = 0
self.y = 0
self.sp = 0xfd
self.status = _Status(0x34)
self.memory = memory
self.instructions = _InstructionSet(self)
self.opcodes = {
0x00: ('BRK', 'IMP'),
0x01: ('ORA', 'INX'),
0x02: ('KIL', 'IMP'),
0x03: ('SLO', 'INX'),
0x04: ('NOP', 'ZPG'),
0x05: ('ORA', 'ZPG'),
0x06: ('ASL', 'ZPG'),
0x07: ('SLO', 'ZPG'),
0x08: ('PHP', 'IMP'),
0x09: ('ORA', 'IMM'),
0x0A: ('ASL', 'IMP'),
0x0B: ('ANC', 'IMM'),
0x0C: ('NOP', 'ABS'),
0x0D: ('ORA', 'ABS'),
0x0E: ('ASL', 'ABS'),
0x0F: ('SLO', 'ABS'),
0x10: ('BPL', 'REL'),
0x11: ('ORA', 'INY'),
0x12: ('KIL', 'IMP'),
0x13: ('SLO', 'INY'),
0x14: ('NOP', 'ZPX'),
0x15: ('ORA', 'ZPX'),
0x16: ('ASL', 'ZPX'),
0x17: ('SLO', 'ZPX'),
0x18: ('CLC', 'IMP'),
0x19: ('ORA', 'ABY'),
0x1A: ('NOP', 'IMP'),
0x1B: ('SLO', 'ABY'),
0x1C: ('NOP', 'ABX'),
0x1D: ('ORA', 'ABX'),
0x1E: ('ASL', 'ABX'),
0x1F: ('SLO', 'ABX'),
0x20: ('JSR', 'ABS'),
0x21: ('AND', 'INX'),
0x22: ('KIL', 'IMP'),
0x23: ('RLA', 'INX'),
0x24: ('BIT', 'ZPG'),
0x25: ('AND', 'ZPG'),
0x26: ('ROL', 'ZPG'),
0x27: ('RLA', 'ZPG'),
0x28: ('PLP', 'IMP'),
0x29: ('AND', 'IMM'),
0x2A: ('ROL', 'IMP'),
0x2B: ('ANC', 'IMM'),
0x2C: ('BIT', 'ABS'),
0x2D: ('AND', 'ABS'),
0x2E: ('ROL', 'ABS'),
0x2F: ('RLA', 'ABS'),
0x30: ('BMI', 'REL'),
0x31: ('AND', 'INY'),
0x32: ('KIL', 'IMP'),
0x33: ('RLA', 'INY'),
0x34: ('NOP', 'ZPX'),
0x35: ('AND', 'ZPX'),
0x36: ('ROL', 'ZPX'),
0x37: ('RLA', 'ZPX'),
0x38: ('SEC', 'IMP'),
0x39: ('AND', 'ABY'),
0x3A: ('NOP', 'IMP'),
0x3B: ('RLA', 'ABY'),
0x3C: ('NOP', 'ABX'),
0x3D: ('AND', 'ABX'),
0x3E: ('ROL', 'ABX'),
0x3F: ('RLA', 'ABX'),
0x40: ('RTI', 'IMP'),
0x41: ('EOR', 'INX'),
0x42: ('KIL', 'IMP'),
0x43: ('SRE', 'INX'),
0x44: ('NOP', 'ZPG'),
0x45: ('EOR', 'ZPG'),
0x46: ('LSR', 'ZPG'),
0x47: ('SRE', 'ZPG'),
0x48: ('PHA', 'IMP'),
0x49: ('EOR', 'IMM'),
0x4A: ('LSR', 'IMP'),
0x4B: ('ASR', 'IMM'),
0x4C: ('JMP', 'ABS'),
0x4D: ('EOR', 'ABS'),
0x4E: ('LSR', 'ABS'),
0x4F: ('SRE', 'ABS'),
0x50: ('BVC', 'REL'),
0x51: ('EOR', 'INY'),
0x52: ('KIL', 'IMP'),
0x53: ('SRE', 'INY'),
0x54: ('NOP', 'ZPX'),
0x55: ('EOR', 'ZPX'),
0x56: ('LSR', 'ZPX'),
0x57: ('SRE', 'ZPX'),
0x58: ('CLI', 'IMP'),
0x59: ('EOR', 'ABY'),
0x5A: ('NOP', 'IMP'),
0x5B: ('SRE', 'ABY'),
0x5C: ('NOP', 'ABX'),
0x5D: ('EOR', 'ABX'),
0x5E: ('LSR', 'ABX'),
0x5F: ('SRE', 'ABX'),
0x60: ('RTS', 'IMP'),
0x61: ('ADC', 'INX'),
0x62: ('KIL', 'IMP'),
0x63: ('RRA', 'INX'),
0x64: ('NOP', 'ZPG'),
0x65: ('ADC', 'ZPG'),
0x66: ('ROR', 'ZPG'),
0x67: ('RRA', 'ZPG'),
0x68: ('PLA', 'IMP'),
0x69: ('ADC', 'IMM'),
0x6A: ('ROR', 'IMP'),
0x6B: ('ARR', 'IMM'),
0x6C: ('JMP', 'IND'),
0x6D: ('ADC', 'ABS'),
0x6E: ('ROR', 'ABS'),
0x6F: ('RRA', 'ABS'),
0x70: ('BVS', 'REL'),
0x71: ('ADC', 'INY'),
0x72: ('KIL', 'IMP'),
0x73: ('RRA', 'INY'),
0x74: ('NOP', 'ZPX'),
0x75: ('ADC', 'ZPX'),
0x76: ('ROR', 'ZPX'),
0x77: ('RRA', 'ZPX'),
0x78: ('SEI', 'IMP'),
0x79: ('ADC', 'ABY'),
0x7A: ('NOP', 'IMP'),
0x7B: ('RRA', 'ABY'),
0x7C: ('NOP', 'ABX'),
0x7D: ('ADC', 'ABX'),
0x7E: ('ROR', 'ABX'),
0x7F: ('RRA', 'ABX'),
0x80: ('NOP', 'IMM'),
0x81: ('STA', 'INX'),
0x82: ('NOP', 'IMM'),
0x83: ('SAX', 'INX'),
0x84: ('STY', 'ZPG'),
0x85: ('STA', 'ZPG'),
0x86: ('STX', 'ZPG'),
0x87: ('SAX', 'ZPG'),
0x88: ('DEY', 'IMP'),
0x89: ('NOP', 'IMM'),
0x8A: ('TXA', 'IMP'),
0x8B: ('XAA', 'IMM'),
0x8C: ('STY', 'ABS'),
0x8D: ('STA', 'ABS'),
0x8E: ('STX', 'ABS'),
0x8F: ('SAX', 'ABS'),
0x90: ('BCC', 'REL'),
0x91: ('STA', 'INY'),
0x92: ('KIL', 'IMP'),
0x93: ('AHX', 'INY'),
0x94: ('STY', 'ZPX'),
0x95: ('STA', 'ZPX'),
0x96: ('STX', 'ZPY'),
0x97: ('SAX', 'ZPY'),
0x98: ('TYA', 'IMP'),
0x99: ('STA', 'ABY'),
0x9A: ('TXS', 'IMP'),
0x9B: ('TAS', 'ABY'),
0x9C: ('SHY', 'ABX'),
0x9D: ('STA', 'ABX'),
0x9E: ('SHX', 'ABY'),
0x9F: ('AHX', 'ABY'),
0xA0: ('LDY', 'IMM'),
0xA1: ('LDA', 'INX'),
0xA2: ('LDX', 'IMM'),
0xA3: ('LAX', 'INX'),
0xA4: ('LDY', 'ZPG'),
0xA5: ('LDA', 'ZPG'),
0xA6: ('LDX', 'ZPG'),
0xA7: ('LAX', 'ZPG'),
0xA8: ('TAY', 'IMP'),
0xA9: ('LDA', 'IMM'),
0xAA: ('TAX', 'IMP'),
0xAB: ('LAX', 'IMM'),
0xAC: ('LDY', 'ABS'),
0xAD: ('LDA', 'ABS'),
0xAE: ('LDX', 'ABS'),
0xAF: ('LAX', 'ABS'),
0xB0: ('BCS', 'REL'),
0xB1: ('LDA', 'INY'),
0xB2: ('KIL', 'IMP'),
0xB3: ('LAX', 'INY'),
0xB4: ('LDY', 'ZPX'),
0xB5: ('LDA', 'ZPX'),
0xB6: ('LDX', 'ZPY'),
0xB7: ('LAX', 'ZPY'),
0xB8: ('CLV', 'IMP'),
0xB9: ('LDA', 'ABY'),
0xBA: ('TSX', 'IMP'),
0xBB: ('LAS', 'ABY'),
0xBC: ('LDY', 'ABX'),
0xBD: ('LDA', 'ABX'),
0xBE: ('LDX', 'ABY'),
0xBF: ('LAX', 'ABY'),
0xC0: ('CPY', 'IMM'),
0xC1: ('CMP', 'INX'),
0xC2: ('NOP', 'IMM'),
0xC3: ('DCP', 'INX'),
0xC4: ('CPY', 'ZPG'),
0xC5: ('CMP', 'ZPG'),
0xC6: ('DEC', 'ZPG'),
0xC7: ('DCP', 'ZPG'),
0xC8: ('INY', 'IMP'),
0xC9: ('CMP', 'IMM'),
0xCA: ('DEX', 'IMP'),
0xCB: ('AXS', 'IMM'),
0xCC: ('CPY', 'ABS'),
0xCD: ('CMP', 'ABS'),
0xCE: ('DEC', 'ABS'),
0xCF: ('DCP', 'ABS'),
0xD0: ('BNE', 'REL'),
0xD1: ('CMP', 'INY'),
0xD2: ('KIL', 'IMP'),
0xD3: ('DCP', 'INY'),
0xD4: ('NOP', 'ZPX'),
0xD5: ('CMP', 'ZPX'),
0xD6: ('DEC', 'ZPX'),
0xD7: ('DCP', 'ZPX'),
0xD8: ('CLD', 'IMP'),
0xD9: ('CMP', 'ABY'),
0xDA: ('NOP', 'IMP'),
0xDB: ('DCP', 'ABY'),
0xDC: ('NOP', 'ABX'),
0xDD: ('CMP', 'ABX'),
0xDE: ('DEC', 'ABX'),
0xDF: ('DCP', 'ABX'),
0xE0: ('CPX', 'IMM'),
0xE1: ('SBC', 'INX'),
0xE2: ('NOP', 'IMM'),
0xE3: ('ISB', 'INX'),
0xE4: ('CPX', 'ZPG'),
0xE5: ('SBC', 'ZPG'),
0xE6: ('INC', 'ZPG'),
0xE7: ('ISB', 'ZPG'),
0xE8: ('INX', 'IMP'),
0xE9: ('SBC', 'IMM'),
0xEA: ('NOP', 'IMP'),
0xEB: ('SBC', 'IMM'),
0xEC: ('CPX', 'ABS'),
0xED: ('SBC', 'ABS'),
0xEE: ('INC', 'ABS'),
0xEF: ('ISB', 'ABS'),
0xF0: ('BEQ', 'REL'),
0xF1: ('SBC', 'INY'),
0xF2: ('KIL', 'IMP'),
0xF3: ('ISB', 'INY'),
0xF4: ('NOP', 'ZPX'),
0xF5: ('SBC', 'ZPX'),
0xF6: ('INC', 'ZPX'),
0xF7: ('ISB', 'ZPX'),
0xF8: ('SED', 'IMP'),
0xF9: ('SBC', 'ABY'),
0xFA: ('NOP', 'IMP'),
0xFB: ('ISB', 'ABY'),
0xFC: ('NOP', 'ABX'),
0xFD: ('SBC', 'ABX'),
0xFE: ('INC', 'ABX'),
0xFF: ('ISB', 'ABX'),
}
def __setattr__(self, key, value):
if key == 'PC':
if value < 0 | |
x[0:6]
y = y[0:6]
ax.plot(x, y, ':', marker='^', markersize=4, markeredgecolor='k', markeredgewidth=0.5, color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{z=N}$' % gammaval, zorder=4)
elif datakey == 'linalgALLZevalExpl' and datakey in TR_curves_to_plot:
if key == 'TR100g':
x = x[0:8]
y = y[0:8]
if key == 'TR1g':
x = x[0:4]
y = y[0:4]
ax.plot(x, y, '-.', marker='^', markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{z=N}$ to $z=N$; $\lambda_1$' % gammaval, zorder=3)
elif datakey == 'linalgZHATExpl' and datakey in TR_curves_to_plot:
if key == 'TR100g':
zmod = 2
else:
zmod = 3
ax.plot(x, y, '-', markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{1D}}$' % gammaval, zorder=zmod)
elif datakey == 'linalgALLZExplAltAbsorb' and datakey in TR_curves_to_plot:
ax.plot(x[2:], y[2:], ':', marker='s', markersize=4, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{z=z_1}$' % gammaval, zorder=5)
elif datakey == 'fpRouteFlux' and datakey in TR_curves_to_plot:
if gammaval == 1:
ax.plot(x[6:], y[6:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{P}}$' % gammaval, zorder=3)
else:
ax.plot(x[6:], y[6:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{P}}$' % gammaval, zorder=3)
elif datakey == 'fpFlux' and datakey in TR_curves_to_plot:
if gammaval == 1:
ax.plot(x[1:], y[1:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $(\mu z_0)^{-1} $' % gammaval, zorder=3)
else:
ax.plot(x[2:], y[2:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $(\mu z_0)^{-1} $' % gammaval, zorder=3)
elif datakey == 'guessPfixTerm123' and datakey in TR_curves_to_plot:
ax.plot(x, y, '-.', marker=None, markeredgecolor='k', color=colours[idx], # 'k'
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{S}}$' % gammaval, zorder=5-idx)
elif datakey == 'guessPfixTerm1' and datakey in TR_curves_to_plot:
ax.plot(x, y, '--', marker='^', markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: pfix1' % gammaval, zorder=3)
elif datakey == 'guessBlobtimes' and key=='TR100g' and datakey in TR_curves_to_plot:
ax.plot(x[0:16], y[0:16], '--', marker='*', markeredgecolor='k', markeredgewidth=0.5, color=colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{W}}$', zorder=3)
elif datakey == 'guessBlobtimesPosSvals' and key=='TR100g' and datakey in TR_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=100$: blobtimes $s>0$', zorder=3)
elif datakey == 'guessBlobtimesExplicit' and key=='TR100g' and datakey in TR_curves_to_plot:
ax.plot(x[0:17], y[0:17], '--', marker='*', markeredgecolor='k', markeredgewidth=0.5, color=colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{W}}$', zorder=3)
elif datakey == 'guessBoundaryTimeMono1Expl' and datakey in TR_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='k', # colours[idx],
label=r'$\gamma=%d$: FPE boundary time 1' % (gammaval), zorder=3)
elif datakey == 'guessBoundaryTimeMono2Expl' and datakey in TR_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='gray', # colours[idx],
label=r'$\gamma=%d$: FPE boundary time 2' % gammaval, zorder=3)
elif datakey == 'guessBoundaryTimeMono1NoDivExpl' and datakey in TR_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='red', # colours[idx],
label=r'$\gamma=%d$: FPE boundary time 1 NoDiv' % (gammaval), zorder=3)
elif datakey == 'guessBoundaryTimeMono2NoDivExpl' and datakey in TR_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='red', # colours[idx],
label=r'$\gamma=%d$: FPE boundary time 2 NoDiv' % gammaval, zorder=3)
elif datakey == 'guessBoundaryProb1' and datakey in TR_curves_to_plot:
ax.plot(x, y, ':', marker='p', markeredgecolor='k', color=colours[idx], # colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{B}}$ prob 1', zorder=3)
elif datakey == 'guessBoundaryProb3' and datakey in TR_curves_to_plot:
ax.plot(x, y, ':', marker='p', markeredgecolor='k', color=colours[idx], # colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{B}}$ prob 3', zorder=3)
elif datakey == 'guessBoundaryProb1Expl' and datakey in TR_curves_to_plot:
ax.plot(x, y, ':', marker='p', markeredgecolor='k', color='gray', # colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{B}}$ prob 1 Expl', zorder=3)
elif datakey == 'guessBoundaryProb3Expl' and datakey in TR_curves_to_plot:
print "==================================", datakey, idx
ax.plot(x, y, ':', marker='p', markeredgecolor='k', color='gray', # colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{B}}$ prob 3 Expl', zorder=3)
elif datakey == 'guessBoundaryProb1NoDivExpl' and datakey in TR_curves_to_plot:
ax.plot(x, y, ':', marker='p', markeredgecolor='k', color='blue', # colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{B}}$ prob 1 NoDiv Expl', zorder=3)
elif datakey == 'guessBoundaryProb3NoDivExpl' and datakey in TR_curves_to_plot:
print "==================================", datakey, idx
ax.plot(x, y, ':', marker='p', markeredgecolor='k', color='blue', # colours[idx],
label=r'$\gamma=100$: $\langle\tau\rangle_{\mathrm{B}}$ prob 3 NoDiv Expl', zorder=3)
elif datakey == 'tauBintegral' and datakey in TR_curves_to_plot:
print 'd', key, y
ax.plot(x, y, '--', marker='s', markeredgecolor='k', color='r', # colours[idx],
label=r'%s: $\tau_B$ integral' % key, zorder=3)
else:
print 'skipping', datakey
ax.set_xlabel(r'$N$', fontsize=fs)
ax.set_ylabel(r'$\langle\tau\rangle$', fontsize=fs)
plt.xticks(fontsize=fs - 2)
plt.yticks(fontsize=fs - 2)
#plt.legend(bbox_to_anchor=(1.1, 1.05), fontsize=fs-4)
plt.legend(fontsize=fs-4, ncol=2)
# log options
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
# ax.set_xscale("log", nonposx='clip')
ax.set_xscale("log")
# ax_dual.set_xscale("log", nonposx='clip')
ax.set_xlim([np.min(param_set) * 0.9, 1.5 * 1e6])
#ax.set_xlim([np.min(param_set) * 0.9, 1.5 * 1e4])
if flag_ylog10:
# ax.set_yscale("log", nonposx='clip')
ax.set_yscale("log")
# ax_dual.set_yscale("log", nonposx='clip')
#ax.set_ylim([6 * 1e-1, 3 * 1e6])
ax.set_ylim([0.8 * 1e1, 2 * 1e5])
#ax.set_ylim([0.8 * 1e2, 2 * 1e5])
#plt.show()
plt.savefig(basedir + os.sep + 'mfpt_TR_heuristics.pdf')
if mfpt_composite_BL:
BL_curves_to_plot = [
'data', 'linalgZHATExpl', 'linalgALLZExpl', 'fpRouteFlux',
# 'guessBoundaryProb3Expl', linalgALLZevalExpl, 'guessBoundaryTimeMono2Expl', 'fpFlux'
'guessBoundaryTimeMono2Explrev', 'guessBoundaryProb3Explrev',
'tauBintegral'
]
data_ids = ['BL1g', 'BL100g'] #'TR4g'
data_ids_to_data = {'BL1g': 'mfpt_Nvary_mu1e-4_BL_ens240_xall_g1',
#'TR4g': 'mfpt_Nvary_mu1e-4_TR_ens240_xall_g4',
'BL100g': 'mfpt_Nvary_mu1e-4_BL_ens240_xall_g100'}
mfpt_dict = {key: {} for key in data_ids}
for key in data_ids:
mfpt_dir = basedir + os.sep + 'mfpt' + os.sep + data_ids_to_data[key]
mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params = \
read_varying_mean_sd_fpt_and_params(mfpt_dir + os.sep + 'fpt_stats_collected_mean_sd_varying_N.txt',
mfpt_dir + os.sep + 'fpt_stats_collected_mean_sd_varying_N_params.csv')
mfpt_dict[key]['data'] = {'x': param_set, 'y': mean_fpt_varying}
# build heuristics for each data_id...
for key in data_ids:
heuristic_dir = basedir + os.sep + 'heuristic'
fnames = os.listdir(heuristic_dir)
for fname in fnames:
fpath = heuristic_dir + os.sep + fname
file_split_by_underscore = fname.split('_')
if file_split_by_underscore[2] == key:
Narr, mfpt_heuristic = read_mfpt_heuristic(fpath)
mfpt_dict[key][file_split_by_underscore[3][:-4]] = {'x': Narr, 'y': mfpt_heuristic}
# plot data and heuristics on one plot
ax=None
fs=12
colours = [X_DARK, '#ffd966', Z_DARK, BLUE, 'pink', 'brown'] # ['black', 'red', 'green', 'blue']
#colours = [X_DARK, BLUE, Z_DARK, 'pink', 'brown'] # ['black', 'red', 'green', 'blue'] NOV 7 ALT
if ax is None:
plt.figure(figsize=(4, 3))
ax = plt.gca() # TODO
for idx, key in enumerate(data_ids):
subdict = mfpt_dict[key]
print key, subdict.keys()
for datakey in subdict.keys():
x = subdict[datakey]['x']
y = subdict[datakey]['y']
if key == 'BL1g':
gammaval = 1
else:
assert key == 'BL100g'
gammaval = 100
if datakey == 'data' and datakey in BL_curves_to_plot:
ax.plot(x, y, '-', linewidth=0.0, marker='o', markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: data' % gammaval, zorder=3)
elif datakey == 'linalgALLZExpl' and datakey in BL_curves_to_plot:
if key == 'BL100g':
x = x[0:6]
y = y[0:6]
if key == 'BL1g':
x = x[0:3]
y = y[0:3]
#ax.plot(x, y, ':', marker=None, markeredgecolor='k', color=colours[idx],
# label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{B}}$' % gammaval, zorder=3)
ax.plot(x, y, ':', marker='^', markersize=4, markeredgecolor='k', markeredgewidth=0.5, color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{z=N}$' % gammaval, zorder=3)
elif datakey == 'linalgALLZevalExpl' and datakey in BL_curves_to_plot:
if key == 'BL100g':
x = x[0:8]
y = y[0:8]
if key == 'BL1g':
x = x[0:4]
y = y[0:4]
ax.plot(x, y, '-.', marker='^', markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{B}}$ to $z=N$; $\lambda_1$' % gammaval, zorder=3)
elif datakey == 'linalgZHATExpl' and datakey in BL_curves_to_plot:
ax.plot(x, y, '-', markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{1D}}$' % gammaval, zorder=1)
elif datakey == 'fpRouteFlux' and datakey in BL_curves_to_plot:
if gammaval == 1:
ax.plot(x[1:], y[1:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{P}}$' % gammaval, zorder=1)
else:
ax.plot(x[4:], y[4:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{P}}$' % gammaval, zorder=1)
elif datakey == 'fpFlux' and datakey in BL_curves_to_plot:
if gammaval == 1:
ax.plot(x[1:], y[1:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $(\mu z_0)^{-1} $' % gammaval, zorder=3)
else:
ax.plot(x[4:], y[4:], '--', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $(\mu z_0)^{-1} $' % gammaval, zorder=3)
elif datakey == 'guessBoundaryProb1' and key != 'BL1g' and datakey in BL_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='k', # colours[idx],
label=r'$\gamma=%d$: BP1' % gammaval, zorder=3)
elif datakey == 'guessBoundaryProb2' and datakey in BL_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='k', # colours[idx],
label=r'$\gamma=%d$: BP2' % gammaval, zorder=3)
elif datakey == 'guessBoundaryProb3' and datakey in BL_curves_to_plot:
if gammaval == 1:
ax.plot(x[0:3], y[0:3], ':', marker=None, markeredgecolor='k', color=colours[idx], # colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{B}}$' % (gammaval), zorder=3)
else:
ax.plot(x[0:6], y[0:6], ':', marker=None, markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{B}}$' % (gammaval), zorder=3)
elif datakey == 'guessBoundaryProb1Expl' and key != 'BL1g' and datakey in BL_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='k', # colours[idx],
label=r'$\gamma=%d$: BP1' % gammaval, zorder=3)
elif datakey == 'guessBoundaryProb2Expl' and datakey in BL_curves_to_plot:
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='k', # colours[idx],
label=r'$\gamma=%d$: BP2' % gammaval, zorder=3)
elif datakey == 'guessBoundaryProb3Explrev' and datakey in BL_curves_to_plot:
if gammaval == 1:
ax.plot(x[0:3], y[0:3], ':', marker='s', markeredgecolor='k', color=colours[idx], # colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{B}}$ heuristic' % (gammaval), zorder=3)
else:
ax.plot(x[0:6], y[0:6], ':', marker='s', markeredgecolor='k', color=colours[idx],
label=r'$\gamma=%d$: $\langle\tau\rangle_{\mathrm{B}}$ heuristic' % (gammaval), zorder=3)
elif datakey == 'guessBoundaryTimeMono1' and datakey in BL_curves_to_plot:
print 'c', key, y
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='k', # colours[idx],
label=r'$\gamma=%d$: BTM1' % (gammaval), zorder=3)
elif datakey == 'guessBoundaryTimeMono2' and datakey in BL_curves_to_plot:
print 'c', key, y
ax.plot(x, y, '--', marker=None, markeredgecolor='k', color='gray', # colours[idx],
label=r'$\gamma=%d$: BTM2' % gammaval, zorder=3)
elif datakey == 'guessBoundaryTimeMono1Expl' and datakey in BL_curves_to_plot:
print 'c', key, y
ax.plot(x, y, '--', marker=None, markeredgecolor='k', | |
<gh_stars>0
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow
from feast.data_source import FileSource
from feast.entity import Entity
from feast.feature_view import FeatureView
from feast.infra.provider import Provider, get_provider
from feast.offline_store import (
RetrievalJob,
get_offline_store,
get_offline_store_for_retrieval,
)
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.registry import Registry
from feast.repo_config import (
LocalOnlineStoreConfig,
OnlineStoreConfig,
RepoConfig,
load_repo_config,
)
from feast.type_map import python_value_to_proto_value
class FeatureStore:
"""
A FeatureStore object is used to define, create, and retrieve features.
"""
config: RepoConfig
def __init__(
self, repo_path: Optional[str] = None, config: Optional[RepoConfig] = None,
):
if repo_path is not None and config is not None:
raise ValueError("You cannot specify both repo_path and config")
if config is not None:
self.config = config
elif repo_path is not None:
self.config = load_repo_config(Path(repo_path))
else:
self.config = RepoConfig(
metadata_store="./metadata.db",
project="default",
provider="local",
online_store=OnlineStoreConfig(
local=LocalOnlineStoreConfig("online_store.db")
),
)
def _get_provider(self) -> Provider:
return get_provider(self.config)
def _get_registry(self) -> Registry:
return Registry(self.config.metadata_store)
def apply(self, objects: List[Union[FeatureView, Entity]]):
"""Register objects to metadata store and update related infrastructure.
The apply method registers one or more definitions (e.g., Entity, FeatureView) and registers or updates these
objects in the Feast registry. Once the registry has been updated, the apply method will update related
infrastructure (e.g., create tables in an online store) in order to reflect these new definitions. All
operations are idempotent, meaning they can safely be rerun.
Args: objects (List[Union[FeatureView, Entity]]): A list of FeatureView or Entity objects that should be
registered
Examples:
Register a single Entity and FeatureView.
>>> from feast.feature_store import FeatureStore
>>> from feast import Entity, FeatureView, Feature, ValueType, FileSource
>>> from datetime import timedelta
>>>
>>> fs = FeatureStore()
>>> customer_entity = Entity(name="customer", value_type=ValueType.INT64, description="customer entity")
>>> customer_feature_view = FeatureView(
>>> name="customer_fv",
>>> entities=["customer"],
>>> features=[Feature(name="age", dtype=ValueType.INT64)],
>>> input=FileSource(path="file.parquet", event_timestamp_column="timestamp"),
>>> ttl=timedelta(days=1)
>>> )
>>> fs.apply([customer_entity, customer_feature_view])
"""
# TODO: Add locking
# TODO: Optimize by only making a single call (read/write)
# TODO: Add infra update operation (currently we are just writing to registry)
registry = self._get_registry()
for ob in objects:
if isinstance(ob, FeatureView):
registry.apply_feature_view(ob, project=self.config.project)
elif isinstance(ob, Entity):
registry.apply_entity(ob, project=self.config.project)
else:
raise ValueError(
f"Unknown object type ({type(ob)}) provided as part of apply() call"
)
def get_historical_features(
self, entity_df: Union[pd.DataFrame, str], feature_refs: List[str],
) -> RetrievalJob:
"""Enrich an entity dataframe with historical feature values for either training or batch scoring.
This method joins historical feature data from one or more feature views to an entity dataframe by using a time
travel join.
Each feature view is joined to the entity dataframe using all entities configured for the respective feature
view. All configured entities must be available in the entity dataframe. Therefore, the entity dataframe must
contain all entities found in all feature views, but the individual feature views can have different entities.
Time travel is based on the configured TTL for each feature view. A shorter TTL will limit the
amount of scanning that will be done in order to find feature data for a specific entity key. Setting a short
TTL may result in null values being returned.
Args:
entity_df (Union[pd.DataFrame, str]): An entity dataframe is a collection of rows containing all entity
columns (e.g., customer_id, driver_id) on which features need to be joined, as well as a event_timestamp
column used to ensure point-in-time correctness. Either a Pandas DataFrame can be provided or a string
SQL query. The query must be of a format supported by the configured offline store (e.g., BigQuery)
feature_refs: A list of features that should be retrieved from the offline store. Feature references are of
the format "feature_view:feature", e.g., "customer_fv:daily_transactions".
Returns:
RetrievalJob which can be used to materialize the results.
Examples:
Retrieve historical features using a BigQuery SQL entity dataframe
>>> from feast.feature_store import FeatureStore
>>>
>>> fs = FeatureStore(config=RepoConfig(provider="gcp"))
>>> retrieval_job = fs.get_historical_features(
>>> entity_df="SELECT event_timestamp, order_id, customer_id from gcp_project.my_ds.customer_orders",
>>> feature_refs=["customer:age", "customer:avg_orders_1d", "customer:avg_orders_7d"]
>>> )
>>> feature_data = job.to_df()
>>> model.fit(feature_data) # insert your modeling framework here.
"""
registry = self._get_registry()
all_feature_views = registry.list_feature_views(project=self.config.project)
feature_views = _get_requested_feature_views(feature_refs, all_feature_views)
offline_store = get_offline_store_for_retrieval(feature_views)
job = offline_store.get_historical_features(
self.config, feature_views, feature_refs, entity_df
)
return job
def materialize(
self,
feature_views: Optional[List[str]],
start_date: datetime,
end_date: datetime,
) -> None:
"""
Materialize data from the offline store into the online store.
This method loads feature data in the specified interval from either
the specified feature views, or all feature views if none are specified,
into the online store where it is available for online serving.
Args:
feature_views (List[str]): Optional list of feature view names. If selected, will only run
materialization for the specified feature views.
start_date (datetime): Start date for time range of data to materialize into the online store
end_date (datetime): End date for time range of data to materialize into the online store
Examples:
Materialize all features into the online store over the interval
from 3 hours ago to 10 minutes ago.
>>> from datetime import datetime, timedelta
>>> from feast.feature_store import FeatureStore
>>>
>>> fs = FeatureStore(config=RepoConfig(provider="gcp"))
>>> fs.materialize(
>>> start_date=datetime.utcnow() - timedelta(hours=3),
>>> end_date=datetime.utcnow() - timedelta(minutes=10)
>>> )
"""
feature_views_to_materialize = []
registry = self._get_registry()
if feature_views is None:
feature_views_to_materialize = registry.list_feature_views(
self.config.project
)
else:
for name in feature_views:
feature_view = registry.get_feature_view(name, self.config.project)
feature_views_to_materialize.append(feature_view)
# TODO paging large loads
for feature_view in feature_views_to_materialize:
if isinstance(feature_view.input, FileSource):
raise NotImplementedError(
"This function is not yet implemented for File data sources"
)
if not feature_view.input.table_ref:
raise NotImplementedError(
f"This function is only implemented for FeatureViews with a table_ref; {feature_view.name} does not have one."
)
(
entity_names,
feature_names,
event_timestamp_column,
created_timestamp_column,
) = _run_reverse_field_mapping(feature_view)
offline_store = get_offline_store(self.config)
table = offline_store.pull_latest_from_table(
feature_view.input,
entity_names,
feature_names,
event_timestamp_column,
created_timestamp_column,
start_date,
end_date,
)
if feature_view.input.field_mapping is not None:
table = _run_forward_field_mapping(
table, feature_view.input.field_mapping
)
rows_to_write = _convert_arrow_to_proto(table, feature_view)
provider = self._get_provider()
provider.online_write_batch(
self.config.project, feature_view, rows_to_write
)
def _get_requested_feature_views(
feature_refs: List[str], all_feature_views: List[FeatureView]
) -> List[FeatureView]:
"""Get list of feature views based on feature references"""
feature_views_dict = {}
for ref in feature_refs:
ref_parts = ref.split(":")
found = False
for feature_view in all_feature_views:
if feature_view.name == ref_parts[0]:
found = True
feature_views_dict[feature_view.name] = feature_view
continue
if not found:
raise ValueError(f"Could not find feature view from reference {ref}")
feature_views_list = []
for view in feature_views_dict.values():
feature_views_list.append(view)
return feature_views_list
def _run_reverse_field_mapping(
feature_view: FeatureView,
) -> Tuple[List[str], List[str], str, Optional[str]]:
"""
If a field mapping exists, run it in reverse on the entity names,
feature names, event timestamp column, and created timestamp column
to get the names of the relevant columns in the BigQuery table.
Args:
feature_view: FeatureView object containing the field mapping
as well as the names to reverse-map.
Returns:
Tuple containing the list of reverse-mapped entity names,
reverse-mapped feature names, reverse-mapped event timestamp column,
and reverse-mapped created timestamp column that will be passed into
the query to the offline store.
"""
# if we have mapped fields, use the original field names in the call to the offline store
event_timestamp_column = feature_view.input.event_timestamp_column
entity_names = [entity for entity in feature_view.entities]
feature_names = [feature.name for feature in feature_view.features]
created_timestamp_column = feature_view.input.created_timestamp_column
if feature_view.input.field_mapping is not None:
reverse_field_mapping = {
v: k for k, v in feature_view.input.field_mapping.items()
}
event_timestamp_column = (
reverse_field_mapping[event_timestamp_column]
if event_timestamp_column in reverse_field_mapping.keys()
else event_timestamp_column
)
created_timestamp_column = (
reverse_field_mapping[created_timestamp_column]
if created_timestamp_column
and created_timestamp_column in reverse_field_mapping.keys()
else created_timestamp_column
)
entity_names = [
reverse_field_mapping[col] if col in reverse_field_mapping.keys() else col
for col in entity_names
]
feature_names = [
reverse_field_mapping[col] if col in reverse_field_mapping.keys() else col
for col in | |
#!/usr/bin/env python
"""GRR specific AFF4 objects."""
import re
import time
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.aff4_objects import standard
from grr.proto import flows_pb2
class SpaceSeparatedStringArray(rdfvalue.RDFString):
"""A special string which stores strings as space separated."""
def __iter__(self):
for value in self._value.split():
yield value
class VersionString(rdfvalue.RDFString):
@property
def versions(self):
version = str(self)
result = []
for x in version.split("."):
try:
result.append(int(x))
except ValueError:
break
return result
class VFSGRRClient(standard.VFSDirectory):
"""A Remote client."""
# URN of the index for client labels.
labels_index_urn = rdfvalue.RDFURN("aff4:/index/labels/clients")
class SchemaCls(standard.VFSDirectory.SchemaCls):
"""The schema for the client."""
client_index = rdfvalue.RDFURN("aff4:/index/client")
CERT = aff4.Attribute("metadata:cert", rdfvalue.RDFX509Cert,
"The PEM encoded cert of the client.")
FILESYSTEM = aff4.Attribute("aff4:filesystem", rdfvalue.Filesystems,
"Filesystems on the client.")
CLIENT_INFO = aff4.Attribute(
"metadata:ClientInfo", rdfvalue.ClientInformation,
"GRR client information", "GRR client", default="")
LAST_BOOT_TIME = aff4.Attribute("metadata:LastBootTime",
rdfvalue.RDFDatetime,
"When the machine was last booted",
"BootTime")
FIRST_SEEN = aff4.Attribute("metadata:FirstSeen", rdfvalue.RDFDatetime,
"First time the client registered with us",
"FirstSeen")
# Information about the host.
HOSTNAME = aff4.Attribute("metadata:hostname", rdfvalue.RDFString,
"Hostname of the host.", "Host",
index=client_index)
FQDN = aff4.Attribute("metadata:fqdn", rdfvalue.RDFString,
"Fully qualified hostname of the host.", "FQDN",
index=client_index)
SYSTEM = aff4.Attribute("metadata:system", rdfvalue.RDFString,
"Operating System class.", "System")
UNAME = aff4.Attribute("metadata:uname", rdfvalue.RDFString,
"Uname string.", "Uname")
OS_RELEASE = aff4.Attribute("metadata:os_release", rdfvalue.RDFString,
"OS Major release number.", "Release")
OS_VERSION = aff4.Attribute("metadata:os_version", VersionString,
"OS Version number.", "Version")
# ARCH values come from platform.uname machine value, e.g. x86_64, AMD64.
ARCH = aff4.Attribute("metadata:architecture", rdfvalue.RDFString,
"Architecture.", "Architecture")
INSTALL_DATE = aff4.Attribute("metadata:install_date", rdfvalue.RDFDatetime,
"Install Date.", "Install")
# The knowledge base is used for storing data about the host and users.
# This is currently a slightly odd object as we only use some of the fields.
# The proto itself is used in Artifact handling outside of GRR (e.g. Plaso).
# Over time we will migrate fields into this proto, but for now it is a mix.
KNOWLEDGE_BASE = aff4.Attribute("metadata:knowledge_base",
rdfvalue.KnowledgeBase,
"Artifact Knowledge Base", "KnowledgeBase")
GRR_CONFIGURATION = aff4.Attribute(
"aff4:client_configuration", rdfvalue.Dict,
"Running configuration for the GRR client.", "Config")
USER = aff4.Attribute("aff4:users", rdfvalue.Users,
"A user of the system.", "Users")
USERNAMES = aff4.Attribute("aff4:user_names", SpaceSeparatedStringArray,
"A space separated list of system users.",
"Usernames",
index=client_index)
# This information is duplicated from the INTERFACES attribute but is done
# to allow for fast searching by mac address.
MAC_ADDRESS = aff4.Attribute("aff4:mac_addresses", rdfvalue.RDFString,
"A hex encoded MAC address.", "MAC",
index=client_index)
KERNEL = aff4.Attribute("aff4:kernel_version", rdfvalue.RDFString,
"Kernel version string.", "KernelVersion")
# Same for IP addresses.
HOST_IPS = aff4.Attribute("aff4:host_ips", rdfvalue.RDFString,
"An IP address.", "Host_ip",
index=client_index)
PING = aff4.Attribute("metadata:ping", rdfvalue.RDFDatetime,
"The last time the server heard from this client.",
"LastCheckin", versioned=False, default=0)
CLOCK = aff4.Attribute("metadata:clock", rdfvalue.RDFDatetime,
"The last clock read on the client "
"(Can be used to estimate client clock skew).",
"Clock", versioned=False)
CLIENT_IP = aff4.Attribute("metadata:client_ip", rdfvalue.RDFString,
"The ip address this client connected from.",
"Client_ip", versioned=False)
# This is the last foreman rule that applied to us
LAST_FOREMAN_TIME = aff4.Attribute(
"aff4:last_foreman_time", rdfvalue.RDFDatetime,
"The last time the foreman checked us.", versioned=False)
LAST_INTERFACES = aff4.Attribute(
"aff4:last_interfaces", rdfvalue.Interfaces,
"Last seen network interfaces. Full history is maintained in the "
"clientid/network object. Separated for performance reasons.",
versioned=False)
LAST_CRASH = aff4.Attribute(
"aff4:last_crash", rdfvalue.ClientCrash,
"Last client crash.", creates_new_object_version=False,
versioned=False)
VOLUMES = aff4.Attribute(
"aff4:volumes", rdfvalue.Volumes,
"Client disk volumes.")
HARDWARE_INFO = aff4.Attribute(
"aff4:hardware_info", rdfvalue.HardwareInfo,
"Various hardware information.", default="")
# Valid client ids
CLIENT_ID_RE = re.compile(r"^C\.[0-9a-fA-F]{16}$")
@property
def age(self):
"""RDFDatetime at which the object was created."""
# TODO(user) move up to AFF4Object after some analysis of how .age is
# used in the codebase.
aff4_type = self.Get(self.Schema.TYPE)
if aff4_type:
return aff4_type.age
else:
# If there is no type attribute yet, we have only just been created and
# not flushed yet, so just set timestamp to now.
return rdfvalue.RDFDatetime().Now()
def Initialize(self):
# Our URN must be a valid client.id.
self.client_id = rdfvalue.ClientURN(self.urn)
def Update(self, attribute=None, priority=None):
if attribute == "CONTAINS":
flow_id = flow.GRRFlow.StartFlow(client_id=self.client_id,
flow_name="Interrogate",
token=self.token, priority=priority)
return flow_id
def OpenMember(self, path, mode="rw"):
return aff4.AFF4Volume.OpenMember(self, path, mode=mode)
AFF4_PREFIXES = {rdfvalue.PathSpec.PathType.OS: "/fs/os",
rdfvalue.PathSpec.PathType.TSK: "/fs/tsk",
rdfvalue.PathSpec.PathType.REGISTRY: "/registry",
rdfvalue.PathSpec.PathType.MEMORY: "/devices/memory"}
@staticmethod
def ClientURNFromURN(urn):
return rdfvalue.ClientURN(rdfvalue.RDFURN(urn).Split()[0])
@staticmethod
def PathspecToURN(pathspec, client_urn):
"""Returns a mapping between a pathspec and an AFF4 URN.
Args:
pathspec: The PathSpec instance to convert.
client_urn: A URN of any object within the client. We use it to find the
client id.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
client_urn = rdfvalue.ClientURN(client_urn)
if not isinstance(pathspec, rdfvalue.RDFValue):
raise ValueError("Pathspec should be an rdfvalue.")
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
dev = pathspec[0].path
if pathspec[0].HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(pathspec[0].offset / 512)
if (len(pathspec) > 1 and
pathspec[0].pathtype == rdfvalue.PathSpec.PathType.OS and
pathspec[1].pathtype == rdfvalue.PathSpec.PathType.TSK):
result = [VFSGRRClient.AFF4_PREFIXES[rdfvalue.PathSpec.PathType.TSK],
dev]
# Skip the top level pathspec.
pathspec = pathspec[1]
else:
# For now just map the top level prefix based on the first pathtype
result = [VFSGRRClient.AFF4_PREFIXES[pathspec[0].pathtype]]
for p in pathspec:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def GetSummary(self):
"""Gets a client summary object.
Returns:
rdfvalue.ClientSummary
"""
self.max_age = 0
summary = rdfvalue.ClientSummary(client_id=self.urn)
summary.system_info.node = self.Get(self.Schema.HOSTNAME)
summary.system_info.system = self.Get(self.Schema.SYSTEM)
summary.system_info.release = self.Get(self.Schema.OS_RELEASE)
summary.system_info.version = str(self.Get(self.Schema.OS_VERSION, ""))
summary.system_info.kernel = self.Get(self.Schema.KERNEL)
summary.system_info.fqdn = self.Get(self.Schema.FQDN)
summary.system_info.machine = self.Get(self.Schema.ARCH)
summary.system_info.install_date = self.Get(
self.Schema.INSTALL_DATE)
summary.users = self.Get(self.Schema.USER)
summary.interfaces = self.Get(self.Schema.LAST_INTERFACES)
summary.client_info = self.Get(self.Schema.CLIENT_INFO)
summary.serial_number = self.Get(self.Schema.HARDWARE_INFO).serial_number
summary.timestamp = self.age
summary.system_manufacturer = self.Get(
self.Schema.HARDWARE_INFO).system_manufacturer
return summary
class UpdateVFSFileArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.UpdateVFSFileArgs
class UpdateVFSFile(flow.GRRFlow):
"""A flow to update VFS file."""
args_type = UpdateVFSFileArgs
def Init(self):
self.state.Register("get_file_flow_urn")
@flow.StateHandler()
def Start(self):
"""Calls the Update() method of a given VFSFile/VFSDirectory object."""
self.Init()
fd = aff4.FACTORY.Open(self.args.vfs_file_urn, mode="rw",
token=self.token)
# Account for implicit directories.
if fd.Get(fd.Schema.TYPE) is None:
fd = fd.Upgrade("VFSDirectory")
self.state.get_file_flow_urn = fd.Update(
attribute=self.args.attribute,
priority=rdfvalue.GrrMessage.Priority.HIGH_PRIORITY)
class VFSFile(aff4.AFF4Image):
"""A VFSFile object."""
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
STAT = standard.VFSDirectory.SchemaCls.STAT
CONTENT_LOCK = aff4.Attribute(
"aff4:content_lock", rdfvalue.RDFURN,
"This lock contains a URN pointing to the flow that is currently "
"updating this flow.")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdfvalue.PathSpec,
"The pathspec used to retrieve this object from the client.")
FINGERPRINT = aff4.Attribute("aff4:fingerprint",
rdfvalue.FingerprintResponse,
"DEPRECATED protodict containing arrays of "
" hashes. Use AFF4Stream.HASH instead.")
def Update(self, attribute=None, priority=None):
"""Update an attribute from the client."""
if attribute == self.Schema.CONTENT:
# List the directory on the client
currently_running = self.Get(self.Schema.CONTENT_LOCK)
# Is this flow still active?
if currently_running:
flow_obj = aff4.FACTORY.Open(currently_running, token=self.token)
if flow_obj.IsRunning():
return
# The client_id is the first element of the URN
client_id = self.urn.Path().split("/", 2)[1]
# Get the pathspec for this object
pathspec = self.Get(self.Schema.STAT).pathspec
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_id, flow_name="MultiGetFile", token=self.token,
pathspecs=[pathspec], priority=priority)
self.Set(self.Schema.CONTENT_LOCK(flow_urn))
self.Close()
return flow_urn
class MemoryImage(standard.VFSDirectory):
"""The server representation of the client's memory device."""
class SchemaCls(VFSFile.SchemaCls):
LAYOUT = aff4.Attribute("aff4:memory/geometry", rdfvalue.MemoryInformation,
"The memory layout of this image.")
class VFSMemoryFile(aff4.AFF4MemoryStream):
"""A VFS file under a VFSDirectory node which does not have storage."""
class SchemaCls(aff4.AFF4MemoryStream.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
# Support also VFSFile attributes.
STAT = VFSFile.SchemaCls.STAT
HASH = VFSFile.SchemaCls.HASH
PATHSPEC = VFSFile.SchemaCls.PATHSPEC
CONTENT_LOCK = VFSFile.SchemaCls.CONTENT_LOCK
FINGERPRINT = VFSFile.SchemaCls.FINGERPRINT
class VFSAnalysisFile(VFSFile):
"""A VFS file which has no Update method."""
def Update(self, attribute=None):
pass
class GRRForeman(aff4.AFF4Object):
"""The foreman starts flows for clients depending on rules."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Attributes specific to VFSDirectory."""
RULES = aff4.Attribute("aff4:rules", rdfvalue.ForemanRules,
"The rules | |
self.ranges = [[-5, 5], [-5, 5]]
super(Himmelblau, self).__init__()
def _evaluate(self, x):
return (np.power(np.power(x[:, 0], 2) + x[:, 1] - 11, 2) +
np.power(x[:, 0] + np.power(x[:, 1], 2) - 7, 2)).reshape(
-1, 1)
def _derivative(self, x):
raise NoDerivativeError()
class ThreeHumpCamel(TestFunction):
"""
Testfunction as defined by
https://en.wikipedia.org/wiki/Test_functions_for_optimization
This is a 2-dimensional function with an application range bounded by -5
and 5 for both input variables. No derivative has been defined.
"""
def __init__(self):
self.ranges = [[-5, 5], [-5, 5]]
super(ThreeHumpCamel, self).__init__()
def _evaluate(self, x):
return (2.0 * np.power(x[:, 0], 2) - 1.05 * np.power(x[:, 0], 4) +
np.power(x[:, 0], 6) / 6.0 + x[:, 0] * x[:, 1] +
np.power(x[:, 1], 2)).reshape(-1, 1)
def _derivative(self, x):
raise NoDerivativeError()
class Sphere(TestFunction):
"""
Testfunction that returns the squared euclidean distance on evaluation.
Testfunction following the formula:
y = sum_{i=1}^{N} x_i^2
The derivative of this function is implemented as
y' = 2*x
The number of input dimensions for this function is configurable at
initialisation of and instance of this class. For each of these dimensions
the application range is unbounded.
"""
def __init__(self, dimensionality=3):
self.ranges = self.construct_ranges(dimensionality, -np.inf, np.inf)
super(Sphere, self).__init__()
def _evaluate(self, x):
return np.sum(np.power(x, 2), axis=1).reshape(-1, 1)
def _derivative(self, x):
return (2 * x).reshape(-1, 1)
class Ackley(TestFunction):
"""
Ackley function as defined by
https://en.wikipedia.org/wiki/Ackley_function.
This is a 2-dimensional function with an application range bounded by -5
and 5 for each of these dimensions. No derivative has been defined.
"""
def __init__(self):
self.ranges = [[-5, 5], [-5, 5]]
super(Ackley, self).__init__()
def _evaluate(self, x):
a = -20 * np.exp(
-0.2 * np.sqrt(0.5 *
(np.power(x[:, 0], 2) + np.power(x[:, 1], 2))))
f = np.cos(2 * np.pi * x[:, 0])
g = np.cos(2 * np.pi * x[:, 1])
b = -np.exp(0.5 * (f + g))
y = a + b + np.exp(1) + 20
return y.reshape(-1, 1)
def _derivative(self, x):
raise NoDerivativeError()
class Easom(TestFunction):
"""
Easom function as defined by
https://en.wikipedia.org/wiki/Test_functions_for_optimization
This is a 2-dimensional function with an application range bounded by
a box between -x and x for both dimensions, where x can be defined by
the user (100 is default). No derivative has been defined.
Args:
absolute_range: Absolute value of the boundaries of the application
range for the function. Both dimensions will be bounded to the
range [-1 * absolute_range, absolute_range]. Is set to 100 by
default, as is customary for this function.
"""
def __init__(self, absolute_range=100):
self.ranges = [[-absolute_range, absolute_range],
[-absolute_range, absolute_range]]
super(Easom, self).__init__()
def _evaluate(self, x):
y = (-1 * np.cos(x[:, 0]) * np.cos(x[:, 1]) * np.exp(
-1 *
(np.power(x[:, 0] - np.pi, 2) + np.power(x[:, 1] - np.pi, 2))))
return y.reshape(-1, 1)
def _derivative(self, x):
raise NoDerivativeError()
class Cosine(TestFunction):
"""
1-D cosine function meant for posterior sampling.
f(x) = cos(x) + 1
The ranges have been set to [-4*pi, 4*pi].
"""
def __init__(self):
self.ranges = [[-4 * np.pi, 4 * np.pi]]
super(Cosine, self).__init__()
def _evaluate(self, x):
return (np.cos(x) + 1).reshape(-1, 1)
def _derivative(self, x):
return (-np.sin(x) + 1).reshape(-1, 1)
class Block(TestFunction):
"""
Multi-dimensional block function.
A function that is `global_value` everywhere except in the block spanned by
[-`block_size`, `block_size`] in every dimension, where it takes on
`block_value`.
The application range of this function is set to be -10 to 10 for each of
the input dimensions. No derivative has been defined.
Args:
dimensionality: Number of dimensions of the function. By default set
to 3.
block_size: Defines the ranges in which the block function should take
on another value than the global default. Ranges are set for each
dimension as [-block_size, block_size]. Default: 1.
block_value: Value that the function takes *inside* the ranges spanned
by block_size. Default: 1.
global_value: Value that the function takes outside of the ranges
spanned by block_size. Default: 0.
"""
def __init__(self,
dimensionality=3,
block_size=1,
block_value=1,
global_value=0):
self.dimensionality = dimensionality
self.block_size = block_size
self.block_value = block_value
self.global_value = global_value
self.ranges = self.construct_ranges(dimensionality, -10, 10)
super(Block, self).__init__()
def _evaluate(self, x):
boundary = np.array([self.block_size] * self.dimensionality)
inidx = np.all((-1 * boundary <= x) & (x <= boundary), axis=1)
y = self.global_value + (self.block_value - self.global_value) * inidx
return y.reshape(-1, 1)
def _derivative(self, x):
raise NoDerivativeError()
class Bessel(TestFunction):
"""
Bessel function of the first kind.
Depending on the chosen function configuration, the computation of the
function is performed using the jv function from scipy (if `fast` is set
to False):
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jv.html
or the j0 and j1 function (if `fast` is set to True):
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.j0.html,
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.j1.html.
To make sampling from this function possible, 0.5 is added to the evaluated
function value.
The fast version of this function is an approximation of the true Bessel
function.
This is a 1-dimensional function with a range set to -100 to 100.
Args:
fast: Boolean indicating which set of Bessel function implementations
to use. See above for more information.
"""
def __init__(self, fast=False):
self.ranges = [[-100, 100]]
self.fast = bool(fast)
super(Bessel, self).__init__()
def _evaluate(self, x):
if not self.fast:
return special.jv(0, x) + 0.5
return special.j0(x) + 0.5
def _derivative(self, x):
if not self.fast:
return special.jv(1, x)
return special.j1(x)
class ModifiedBessel(TestFunction):
"""
Modified Bessel function of the first kind.
Depending on the chosen function configuration, the computation of the
function is performed using the jv function from scipy (if `fast` is set
to False):
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.kv.html
or the j0 and j1 function (if `fast` is set to True):
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.k0.html,
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.k1.html.
The fast version of this function is an approximation of the true modified
bessel function.
This is a 1-dimensional function with a range set to 0 to 10.
Args:
fast: Boolean indicating which set of Bessel function implementations
to use. See above for more information.
"""
def __init__(self, fast=False):
self.ranges = [[0, 10]]
self.fast = bool(fast)
super(ModifiedBessel, self).__init__()
def _evaluate(self, x):
if not self.fast:
return special.kv(0, x)
return special.k0(x)
def _derivative(self, x):
if not self.fast:
return special.kv(1, x)
return special.k1(x)
class Eggbox(TestFunction):
"""
The Eggbox likelihood function as defined in the multinest paper
https://arxiv.org/pdf/0809.3437.pdf:
L(x, y) = exp(2 + cos(x/2)*cos(y/2))^5
This is a 2-dimensional function bounded 0 and 10*pi in each dimension. No
derivative is defined.
"""
def __init__(self):
self.ranges = [[0, 10 * np.pi], [0, 10 * np.pi]]
super(Eggbox, self).__init__()
def _evaluate(self, x):
y = np.exp(
np.power(2 + np.cos(x[:, 0] / 2.0) * np.cos(x[:, 1] / 2.0), 5))
return y.reshape(-1, 1)
def _derivative(self, x):
raise NoDerivativeError()
class MultivariateNormal(TestFunction):
"""
Multivariate normal distribution
The dimensionality of the function is determined by the determined based on
the provided covariance matrix. The range of the function is [-10, 10]
for each of the dimensions.
Args:
covariance: 2-dimension list or numpy array representing the covariance
matrix to use. By default is is set to the 2-dimensional unit
matrix, making the function 2-dimensional.
"""
def __init__(self, covariance=None):
if covariance is None:
covariance = np.identity(2)
self.covariance = covariance
n_dim = len(covariance)
self.ranges = self.construct_ranges(n_dim, -10, 10)
super(MultivariateNormal, self).__init__()
def _evaluate(self, x):
mu = np.zeros(len(self.covariance))
y = stats.multivariate_normal.pdf(x, mu, self.covariance)
return y.reshape(-1, 1)
def _derivative(self, x):
raise NoDerivativeError()
class GaussianShells(TestFunction):
"""
The Gaussian Shells likelihood function as defined in the multinest paper
https://arxiv.org/pdf/0809.3437.pdf:
L(x, y) = circ(x, c_1, r_1, w_1) + circ(x, c_2, r_2, w_2)
circ(x, c, r, w) = exp(-(|x-c|-r)^2/(2*w^2)) / sqrt(2*pi*w^2)
where x and c are vectors in a flat 2-dimensional space, making this
testfunction 2-dimensional. The ranges of this function are set to
[-10, 10] for both input dimensions.
This is a 2-dimensional function bounded 0 and 10*pi in each dimension. No
derivative is defined.
Args:
c_1: Numpy array or list with two entries, defining the center of the
first gaussian shell. It is set to [2.5, 0] by default.
r_1: Radius of the first gaussian shell. It is 2.0 by default.
w_1: Standard deviation of the first gaussian shell. By default this
value is 0.1.
c_2: Numpy array or list with two entries, defining the center of the
second gaussian shell. It is set to [2.5, 0] by default.
r_2: Radius | |
<reponame>yimingchen95/veidt<gh_stars>10-100
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
import sklearn.metrics.pairwise as smp
from monty.json import MSONable
from scipy.stats import pearsonr
import scipy.spatial.distance as spd
import numpy as np
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "May 11, 2017"
class SimilarityMeasure(MSONable):
"""
Abstract class used to define the various methods that must be implemented by
similarity measurement class. All measurement class must also implement the
standard MSONable as_dict() and from_dict() API
"""
def __init__(self, coeff_1, coeff_2):
"""
Args:
coeff_1: numpy array with dimension (n, 1), n corresponding to
number of wavelength, column corresponding to the absorption
coefficiency. The spectrum need to be normalized to obtain
meaningful result, i.e. the sum of spectrum's intensity need
equal to 1
coeff_2: numpy array with dimension (n, 1). The row and column
definition is the same as spectrum 1. The spectrum need to be
normalized to obtain meaningful result, i.e. the sum of spectrum's intensity
need equal to 1
"""
if len(coeff_1) != len(coeff_2):
raise ValueError('Two spectrum have different wavelength number')
if (not np.allclose(coeff_1.sum(), 1)):
raise ValueError('Spectrum 1 has not been normalized properly')
if (not np.allclose(coeff_2.sum(), 1)):
raise ValueError('Spectrum 2 has not been normalized properly')
self.coeff_1 = coeff_1
self.coeff_2 = coeff_2
self.d_max = None
def normalize_spectrum(self, spec_1):
raise NotImplementedError()
def distance_measure(self):
"""
Compute the distance measures of two spectrum, need to implement in
each similarity measure class
Returns: Distance measure between two spectrum
"""
raise NotImplementedError()
def similarity_measure(self, dist_conversion='bin'):
"""
Compute the similarity measure of two spectrum
Args:
dist_conversion: algorithm used to convert distance measure to
similarity exponential conversion are more sensitive for
detecting extremely fine changes in spectrum difference.
Avaliable options: ['bin', 'exp']
Returns:
Similarity measure between two spectrum
"""
coeff_dist = self.distance_measure()
if dist_conversion == 'bin':
simi_measure = (1 - coeff_dist / self.d_max)
elif dist_conversion == 'exp':
simi_measure = np.exp(-(coeff_dist / (self.d_max - coeff_dist)))
return simi_measure
class Euclidean(SimilarityMeasure):
def __init__(self, coeff_1, coeff_2):
"""
Class to calculate the Euclidean similarity
"""
super().__init__(coeff_1, coeff_2)
self.d_max = np.sqrt(2)
def distance_measure(self):
return spd.euclidean(self.coeff_1, self.coeff_2)
def __str__(self):
return "EuclideanSimilarity"
class Cityblock(SimilarityMeasure):
"""
Cityblock similarity to calculate the Cityblock, i.e. Manhattan, similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 2
def distance_measure(self):
return spd.cityblock(self.coeff_1, self.coeff_2)
def __str__(self):
return "CityblockSimilarity"
class Minkowski(SimilarityMeasure):
"""
Minkowski similarity to calculate the Cityblock, i.e. Manhattan, similarity
"""
def __init__(self, coeff_1, coeff_2, p=4):
"""
Args:
coeff_1: numpy array with dimension (n, 1), n corresponding to
number of wavelength, column corresponding to the absorption
coefficiency. The spectrum need to be normalized to obtain
meaningful result, i.e. the under curve area of spectrum need
equal to 1
coeff_2: numpy array with dimension (n, 1). The row and column
definition is the same as spectrum 1. The spectrum need to be
normalized to obtain meaningful result, i.e. the under curve
area of spectrum nee
p: The order of the norm of the difference
"""
super().__init__(coeff_1, coeff_2)
self.p = p
self.d_max = np.power(2, 1.0 / p)
def distance_measure(self):
return spd.minkowski(self.coeff_1, self.coeff_2, self.p)
def __str__(self):
return "MinkowskiSimilarity"
class Chebyshev(SimilarityMeasure):
"""
Chebyshev similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
return np.max(np.absolute(np.subtract(self.coeff_1, self.coeff_2)))
def __str__(self):
return "ChebyshevSimilarity"
class Sorensen(SimilarityMeasure):
"""
Sorensen similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
nominator = np.sum(np.absolute(np.subtract(self.coeff_1, self.coeff_2)))
denominator = np.sum(np.add(self.coeff_1, self.coeff_2))
return nominator / denominator
def __str__(self):
return "SorensenSimilarity"
class Kulczynski(SimilarityMeasure):
"""
Kulczyniski similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = np.inf
def distance_measure(self):
nominator = np.sum(np.absolute(np.subtract(self.coeff_1, self.coeff_2)))
denominator = np.sum(np.minimum(self.coeff_1, self.coeff_2))
return nominator / denominator
def similarity_measure(self):
coeff_dist = self.distance_measure()
return 1 / coeff_dist
def __str__(self):
return "KulczynskiSimilarity"
class Lorentzian(SimilarityMeasure):
"""
Lorentzian similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 2 * np.log(2)
def distance_measure(self):
return np.sum(np.log(1 + np.absolute(np.subtract(self.coeff_1,
self.coeff_2))))
def __str__(self):
return "LorentzianSimilarity"
class Intersection(SimilarityMeasure):
"""
Intersection similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
return np.sum(np.absolute(np.subtract(self.coeff_1, self.coeff_2))) / 2
def __str__(self):
return "IntersectionSimilarity"
class Czekanowski(SimilarityMeasure):
"""
Czekanowski similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
nominator = np.sum(np.absolute(np.subtract(self.coeff_1, self.coeff_2)))
denominator = np.sum(np.add(self.coeff_1, self.coeff_2))
return nominator / denominator
def __str__(self):
return "CzekanowskiSimilarity"
class Motyka(SimilarityMeasure):
"""
Motyka similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
nominator = np.sum(np.maximum(self.coeff_1, self.coeff_2))
denominator = np.sum(np.add(self.coeff_1, self.coeff_2))
return nominator / denominator
def __str__(self):
return "MotykaSimilarity"
class Ruzicka(SimilarityMeasure):
"""
Ruzicka similarity
"""
def similarity_measure(self):
nominator = np.sum(np.minimum(self.coeff_1, self.coeff_2))
denominator = np.sum(np.maximum(self.coeff_1, self.coeff_2))
return nominator / denominator
def __str__(self):
return "RuzickaSimilarity"
class Tanimoto(SimilarityMeasure):
"""
Tanimoto similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
nominator = np.sum(np.subtract(np.maximum(self.coeff_1, self.coeff_2),
np.minimum(self.coeff_1, self.coeff_2)))
denominator = np.sum(np.maximum(self.coeff_1, self.coeff_2))
return nominator / denominator
def __str__(self):
return "TanimotoSimilarity"
class InnerProduct(SimilarityMeasure):
"""
Inner product similarity
"""
def similarity_measure(self):
return np.inner(self.coeff_1, self.coeff_2)
def __str__(self):
return "InnerProductSimilarity"
class HarmonicMean(SimilarityMeasure):
"""
Harmonic Mean similarity
"""
def similarity_measure(self):
nominator = np.multiply(self.coeff_1, self.coeff_2)
denominator = np.add(self.coeff_1, self.coeff_2)
deno_no_zero_index = np.where(denominator != 0)
return 2 * np.sum(nominator[deno_no_zero_index] /
denominator[deno_no_zero_index])
def __str__(self):
return "HarmonicMeanSimilarity"
class Cosine(SimilarityMeasure):
"""
Cosine similarity: in similarity_measure the default setting returns
standard cosine_similarity
"""
def similarity_measure(self):
return smp.cosine_similarity(self.coeff_1.reshape(1, -1),
self.coeff_2.reshape(1, -1))[0][0]
def __str__(self):
return "CosineSimilarity"
class Jaccard(SimilarityMeasure):
"""
KumarHassebrook similarity
"""
def distance_measure(self):
similarity = self.similarity_measure()
return 1 - similarity
def similarity_measure(self):
"""
The distance measure is the Kumar-Hassebrook similarity between two
vectors
"""
nominator = np.dot(self.coeff_1, self.coeff_2)
denominator = np.sum(np.subtract(
np.add(np.square(self.coeff_1), np.square(self.coeff_2)),
np.multiply(self.coeff_1, self.coeff_2)))
return nominator / denominator
def __str__(self):
return "JaccardSimilarity"
class Dice(SimilarityMeasure):
"""
Dice similarity measure
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
similarity = self.similarity_measure()
return 1 - similarity
def similarity_measure(self):
nominator = 2 * np.dot(self.coeff_1, self.coeff_2)
denominator = np.sum(np.add(np.square(self.coeff_1),
np.square(self.coeff_2)))
return nominator / denominator
def __str__(self):
return "DiceSimilarity"
class Fidelity(SimilarityMeasure):
"""
Fidelity similarity measure
"""
def similarity_measure(self):
return np.sum(np.sqrt(np.abs(np.multiply(self.coeff_1, self.coeff_2))))
def __str__(self):
return "FidelitySimilarity"
class Hellinger(SimilarityMeasure):
"""
Hellinger similarity measure
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 2
def distance_measure(self):
inter_result = np.subtract(np.sqrt(np.abs(self.coeff_1)),
np.sqrt(np.abs(self.coeff_2)))
return np.sqrt(2 * np.sum(np.square(inter_result)))
def __str__(self):
return "HellingerSimilarity"
class Matusita(SimilarityMeasure):
"""
Matusita similarity measure
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = np.sqrt(2)
def distance_measure(self):
inter_result = np.subtract(np.sqrt(np.abs(self.coeff_1)),
np.sqrt(np.abs(self.coeff_2)))
return np.sqrt(np.sum(np.square(inter_result)))
def __str__(self):
return "MatusitaSimilarity"
class Squaredchord(SimilarityMeasure):
"""
Squaredchord similarity measure
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 2
def distance_measure(self):
inter_result = np.subtract(np.sqrt(np.abs(self.coeff_1)),
np.sqrt(np.abs(self.coeff_2)))
return np.sum(np.square(inter_result))
def __str__(self):
return "SquaredchordSimilarity"
class SquaredEuclidean(SimilarityMeasure):
"""
Class to calculate the Squared Euclidean similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 2
def distance_measure(self):
return np.square(spd.euclidean(self.coeff_1, self.coeff_2))
def __str__(self):
return "SquaredEuclideanSimilarity"
class SquaredChiSquare(SimilarityMeasure):
"""
Squared ChiSquare similarity measure
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 2
def distance_measure(self):
nominator = np.square(np.subtract(self.coeff_1, self.coeff_2))
denominator = np.add(self.coeff_1, self.coeff_2)
deno_no_zero = np.where(denominator != 0)
return np.sum(nominator[deno_no_zero] / denominator[deno_no_zero])
def __str__(self):
return "SquaredChiSquare Similarity"
class ProbabilisticSymmetricChiS(SimilarityMeasure):
"""
Squared Probabilistic Symmetric ChiSquare similarity measure
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 4
def distance_measure(self):
nominator = np.square(np.subtract(self.coeff_1, self.coeff_2))
denominator = np.add(self.coeff_1, self.coeff_2)
deno_no_zero_index = np.where(denominator != 0)
return 2 * np.sum(nominator[deno_no_zero_index] / denominator[deno_no_zero_index])
def __str__(self):
return "Probabilistic Symmetric ChiSquare Similarity"
class AvgL1Linf(SimilarityMeasure):
"""
Average L1 L_inf similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 3.0 / 2
def distance_measure(self):
cheby_dist = spd.chebyshev(self.coeff_1, self.coeff_2)
city_dist = spd.cityblock(self.coeff_1, self.coeff_2)
return (cheby_dist + city_dist) / 2
def __str__(self):
return "Average L1 L_inf Similarity"
class MinSymmetricChi(SimilarityMeasure):
"""
MinSymmetric Chisquare similarity
"""
def __init__(self, coeff_1, coeff_2):
super().__init__(coeff_1, coeff_2)
self.d_max = 1
def distance_measure(self):
coeff_1_non_zero = np.where(self.coeff_1 != 0)
coeff_2_non_zero = np.where(self.coeff_2 != 0)
left_term = np.sum(np.divide(np.square(
np.subtract(self.coeff_1, self.coeff_2))[coeff_1_non_zero],
self.coeff_1[coeff_1_non_zero]))
right_term = np.sum(np.divide(np.square(
np.subtract(self.coeff_1, self.coeff_2))[coeff_2_non_zero],
self.coeff_2[coeff_2_non_zero]))
return np.minimum(left_term, right_term)
def __str__(self):
return "minsymmetric Chisquare Similarity"
class PearsonCorrMeasure(SimilarityMeasure):
"""
Pearson Correlation Measure
"""
def __init__(self, coeff_1, coeff_2):
"""
Args:
coeff_1: numpy array with dimension (n, 1), n corresponding to
number of wavelength, column corresponding to the absorption
coefficiency. The spectrum need to be normalized to obtain
meaningful result, i.e. the sum of spectrum's intensity need
equal to 1
coeff_2: | |
self.MARKERS or chunk.isspace():
self._fail_route() # Tags must start with text, not spaces
data.context = data.CX_NOTE_SPACE
elif chunk.isspace():
self._handle_tag_space(data, chunk)
continue
elif data.context & data.CX_NOTE_SPACE:
if data.context & data.CX_QUOTED:
data.context = data.CX_ATTR_VALUE
self._memoize_bad_route()
self._pop()
self._head = data.reset - 1 # Will be auto-incremented
return # Break early
self._fail_route()
elif data.context & data.CX_ATTR_READY:
data.context = data.CX_ATTR_NAME
self._push(contexts.TAG_ATTR)
elif data.context & data.CX_ATTR_NAME:
if chunk == "=":
data.context = data.CX_ATTR_VALUE | data.CX_NOTE_QUOTE
self._emit(tokens.TagAttrEquals())
continue
if data.context & data.CX_NOTE_EQUALS:
self._push_tag_buffer(data)
data.context = data.CX_ATTR_NAME
self._push(contexts.TAG_ATTR)
else: # data.context & data.CX_ATTR_VALUE assured
escaped = self._read(-1) == "\\" and self._read(-2) != "\\"
if data.context & data.CX_NOTE_QUOTE:
data.context ^= data.CX_NOTE_QUOTE
if chunk in "'\"" and not escaped:
data.context |= data.CX_QUOTED
data.quoter = chunk
data.reset = self._head
try:
self._push(self._context)
except BadRoute:
# Already failed to parse this as a quoted string
data.context = data.CX_ATTR_VALUE
self._head -= 1
return
continue
elif data.context & data.CX_QUOTED:
if chunk == data.quoter and not escaped:
data.context |= data.CX_NOTE_SPACE
continue
self._handle_tag_text(chunk)
def _handle_tag_close_open(self, data, token):
"""Handle the closing of a open tag (``<foo>``)."""
if data.context & (data.CX_ATTR_NAME | data.CX_ATTR_VALUE):
self._push_tag_buffer(data)
self._emit(token(padding=data.padding_buffer["first"]))
self._head += 1
def _handle_tag_open_close(self):
"""Handle the opening of a closing tag (``</foo>``)."""
self._emit(tokens.TagOpenClose())
self._push(contexts.TAG_CLOSE)
self._head += 1
def _handle_tag_close_close(self):
"""Handle the ending of a closing tag (``</foo>``)."""
strip = lambda tok: tok.text.rstrip().lower()
closing = self._pop()
if len(closing) != 1 or (not isinstance(closing[0], tokens.Text) or
strip(closing[0]) != strip(self._stack[1])):
self._fail_route()
self._emit_all(closing)
self._emit(tokens.TagCloseClose())
return self._pop()
def _handle_blacklisted_tag(self):
"""Handle the body of an HTML tag that is parser-blacklisted."""
strip = lambda text: text.rstrip().lower()
while True:
this, nxt = self._read(), self._read(1)
if this is self.END:
self._fail_route()
elif this == "<" and nxt == "/":
self._head += 3
if self._read() != ">" or (strip(self._read(-1)) !=
strip(self._stack[1].text)):
self._head -= 1
self._emit_text("</")
continue
self._emit(tokens.TagOpenClose())
self._emit_text(self._read(-1))
self._emit(tokens.TagCloseClose())
return self._pop()
elif this == "&":
self._parse_entity()
else:
self._emit_text(this)
self._head += 1
def _handle_single_only_tag_end(self):
"""Handle the end of an implicitly closing single-only HTML tag."""
padding = self._stack.pop().padding
self._emit(tokens.TagCloseSelfclose(padding=padding, implicit=True))
self._head -= 1 # Offset displacement done by _handle_tag_close_open
return self._pop()
def _handle_single_tag_end(self):
"""Handle the stream end when inside a single-supporting HTML tag."""
stack = self._stack
# We need to find the index of the TagCloseOpen token corresponding to
# the TagOpenOpen token located at index 0:
depth = 1
for index, token in enumerate(stack[2:], 2):
if isinstance(token, tokens.TagOpenOpen):
depth += 1
elif isinstance(token, tokens.TagCloseOpen):
depth -= 1
if depth == 0:
break
elif isinstance(token, tokens.TagCloseSelfclose):
depth -= 1
if depth == 0: # pragma: no cover (untestable/exceptional)
raise ParserError(
"_handle_single_tag_end() got an unexpected "
"TagCloseSelfclose")
else: # pragma: no cover (untestable/exceptional case)
raise ParserError("_handle_single_tag_end() missed a TagCloseOpen")
padding = stack[index].padding
stack[index] = tokens.TagCloseSelfclose(padding=padding, implicit=True)
return self._pop()
def _really_parse_tag(self):
"""Actually parse an HTML tag, starting with the open (``<foo>``)."""
data = _TagOpenData()
self._push(contexts.TAG_OPEN)
self._emit(tokens.TagOpenOpen())
while True:
this, nxt = self._read(), self._read(1)
can_exit = (not data.context & (data.CX_QUOTED | data.CX_NAME) or
data.context & data.CX_NOTE_SPACE)
if this is self.END:
if self._context & contexts.TAG_ATTR:
if data.context & data.CX_QUOTED:
# Unclosed attribute quote: reset, don't die
data.context = data.CX_ATTR_VALUE
self._memoize_bad_route()
self._pop()
self._head = data.reset
continue
self._pop()
self._fail_route()
elif this == ">" and can_exit:
self._handle_tag_close_open(data, tokens.TagCloseOpen)
self._context = contexts.TAG_BODY
if is_single_only(self._stack[1].text):
return self._handle_single_only_tag_end()
if is_parsable(self._stack[1].text):
return self._parse(push=False)
return self._handle_blacklisted_tag()
elif this == "/" and nxt == ">" and can_exit:
self._handle_tag_close_open(data, tokens.TagCloseSelfclose)
return self._pop()
else:
self._handle_tag_data(data, this)
self._head += 1
def _handle_invalid_tag_start(self):
"""Handle the (possible) start of an implicitly closing single tag."""
reset = self._head + 1
self._head += 2
try:
if not is_single_only(self.tag_splitter.split(self._read())[0]):
raise BadRoute()
tag = self._really_parse_tag()
except BadRoute:
self._head = reset
self._emit_text("</")
else:
tag[0].invalid = True # Set flag of TagOpenOpen
self._emit_all(tag)
def _parse_tag(self):
"""Parse an HTML tag at the head of the wikicode string."""
reset = self._head
self._head += 1
try:
tag = self._really_parse_tag()
except BadRoute:
self._head = reset
self._emit_text("<")
else:
self._emit_all(tag)
def _emit_style_tag(self, tag, markup, body):
"""Write the body of a tag and the tokens that should surround it."""
self._emit(tokens.TagOpenOpen(wiki_markup=markup))
self._emit_text(tag)
self._emit(tokens.TagCloseOpen())
self._emit_all(body)
self._emit(tokens.TagOpenClose())
self._emit_text(tag)
self._emit(tokens.TagCloseClose())
def _parse_italics(self):
"""Parse wiki-style italics."""
reset = self._head
try:
stack = self._parse(contexts.STYLE_ITALICS)
except BadRoute as route:
self._head = reset
if route.context & contexts.STYLE_PASS_AGAIN:
new_ctx = contexts.STYLE_ITALICS | contexts.STYLE_SECOND_PASS
try:
stack = self._parse(new_ctx)
except BadRoute:
self._head = reset
self._emit_text("''")
return
else:
self._emit_text("''")
return
self._emit_style_tag("i", "''", stack)
def _parse_bold(self):
"""Parse wiki-style bold."""
reset = self._head
try:
stack = self._parse(contexts.STYLE_BOLD)
except BadRoute:
self._head = reset
if self._context & contexts.STYLE_SECOND_PASS:
self._emit_text("'")
return True
if self._context & contexts.STYLE_ITALICS:
self._context |= contexts.STYLE_PASS_AGAIN
self._emit_text("'''")
else:
self._emit_text("'")
self._parse_italics()
else:
self._emit_style_tag("b", "'''", stack)
return False
def _parse_italics_and_bold(self):
"""Parse wiki-style italics and bold together (i.e., five ticks)."""
reset = self._head
try:
stack = self._parse(contexts.STYLE_BOLD)
except BadRoute:
self._head = reset
try:
stack = self._parse(contexts.STYLE_ITALICS)
except BadRoute:
self._head = reset
self._emit_text("'''''")
else:
reset = self._head
try:
stack2 = self._parse(contexts.STYLE_BOLD)
except BadRoute:
self._head = reset
self._emit_text("'''")
self._emit_style_tag("i", "''", stack)
else:
self._push()
self._emit_style_tag("i", "''", stack)
self._emit_all(stack2)
self._emit_style_tag("b", "'''", self._pop())
else:
reset = self._head
try:
stack2 = self._parse(contexts.STYLE_ITALICS)
except BadRoute:
self._head = reset
self._emit_text("''")
self._emit_style_tag("b", "'''", stack)
else:
self._push()
self._emit_style_tag("b", "'''", stack)
self._emit_all(stack2)
self._emit_style_tag("i", "''", self._pop())
def _parse_style(self):
"""Parse wiki-style formatting (``''``/``'''`` for italics/bold)."""
self._head += 2
ticks = 2
while self._read() == "'":
self._head += 1
ticks += 1
italics = self._context & contexts.STYLE_ITALICS
bold = self._context & contexts.STYLE_BOLD
if ticks > 5:
self._emit_text("'" * (ticks - 5))
ticks = 5
elif ticks == 4:
self._emit_text("'")
ticks = 3
if (italics and ticks in (2, 5)) or (bold and ticks in (3, 5)):
if ticks == 5:
self._head -= 3 if italics else 2
return self._pop()
if not self._can_recurse():
if ticks == 3:
if self._context & contexts.STYLE_SECOND_PASS:
self._emit_text("'")
return self._pop()
if self._context & contexts.STYLE_ITALICS:
self._context |= contexts.STYLE_PASS_AGAIN
self._emit_text("'" * ticks)
elif ticks == 2:
self._parse_italics()
elif ticks == 3:
if self._parse_bold():
return self._pop()
else: # ticks == 5
self._parse_italics_and_bold()
self._head -= 1
def _handle_list_marker(self):
"""Handle a list marker at the head (``#``, ``*``, ``;``, ``:``)."""
markup = self._read()
if markup == ";":
self._context |= contexts.DL_TERM
self._emit(tokens.TagOpenOpen(wiki_markup=markup))
self._emit_text(get_html_tag(markup))
self._emit(tokens.TagCloseSelfclose())
def _handle_list(self):
"""Handle a wiki-style list (``#``, ``*``, ``;``, ``:``)."""
self._handle_list_marker()
while self._read(1) in ("#", "*", ";", ":"):
self._head += 1
self._handle_list_marker()
def _handle_hr(self):
"""Handle a wiki-style horizontal rule (``----``) in the string."""
length = 4
self._head += 3
while self._read(1) == "-":
length += 1
self._head += 1
self._emit(tokens.TagOpenOpen(wiki_markup="-" * length))
self._emit_text("hr")
self._emit(tokens.TagCloseSelfclose())
def _handle_dl_term(self):
"""Handle the term in a description list (``foo`` in ``;foo:bar``)."""
self._context ^= contexts.DL_TERM
if self._read() == ":":
self._handle_list_marker()
else:
self._emit_text("\n")
def _emit_table_tag(self, open_open_markup, tag, style, padding,
close_open_markup, contents, open_close_markup):
"""Emit a table tag."""
self._emit(tokens.TagOpenOpen(wiki_markup=open_open_markup))
self._emit_text(tag)
if style:
self._emit_all(style)
if close_open_markup:
self._emit(tokens.TagCloseOpen(wiki_markup=close_open_markup,
padding=padding))
else:
self._emit(tokens.TagCloseOpen(padding=padding))
if contents:
self._emit_all(contents)
self._emit(tokens.TagOpenClose(wiki_markup=open_close_markup))
self._emit_text(tag)
self._emit(tokens.TagCloseClose())
def _handle_table_style(self, end_token):
"""Handle style attributes for a table until ``end_token``."""
data = _TagOpenData()
data.context = _TagOpenData.CX_ATTR_READY
while True:
this = self._read()
can_exit = (not data.context & data.CX_QUOTED or
data.context & data.CX_NOTE_SPACE)
if this == end_token and can_exit:
if data.context & (data.CX_ATTR_NAME | data.CX_ATTR_VALUE):
self._push_tag_buffer(data)
if this.isspace():
data.padding_buffer["first"] += this
return data.padding_buffer["first"]
if this is self.END or this == end_token:
if self._context & contexts.TAG_ATTR:
if data.context & data.CX_QUOTED:
# Unclosed attribute quote: reset, don't die
data.context = data.CX_ATTR_VALUE
self._memoize_bad_route()
self._pop()
self._head = data.reset
continue
self._pop()
self._fail_route()
else:
self._handle_tag_data(data, this)
self._head += 1
def _parse_table(self):
"""Parse a wikicode table by starting with the first line."""
reset = self._head
self._head += 2
try:
self._push(contexts.TABLE_OPEN)
padding = self._handle_table_style("\n")
except BadRoute:
self._head = reset
self._emit_text("{")
return
style = self._pop()
self._head += 1
restore_point = self._stack_ident
try:
table = self._parse(contexts.TABLE_OPEN)
except BadRoute:
while self._stack_ident != restore_point:
self._memoize_bad_route()
self._pop()
self._head = reset
self._emit_text("{")
return
self._emit_table_tag("{|", "table", style, padding, None, table, "|}")
# Offset displacement done by _parse():
self._head -= 1
def _handle_table_row(self):
"""Parse as style until end of the line, then continue."""
self._head += 2
if not self._can_recurse():
self._emit_text("|-")
self._head -= 1
return
self._push(contexts.TABLE_OPEN | contexts.TABLE_ROW_OPEN)
padding = self._handle_table_style("\n")
style = self._pop()
# Don't parse the style separator:
self._head += 1
row = self._parse(contexts.TABLE_OPEN | contexts.TABLE_ROW_OPEN)
self._emit_table_tag("|-", "tr", | |
<reponame>otherJL0/tmuxp
"""Configuration parsing and export for tmuxp.
tmuxp.config
~~~~~~~~~~~~
"""
import logging
import os
from typing import Dict
from . import exc
logger = logging.getLogger(__name__)
def validate_schema(session_config):
"""
Return True if config schema is correct.
Parameters
----------
session_config : dict
session configuration
Returns
-------
bool
"""
# verify session_name
if "session_name" not in session_config:
raise exc.ConfigError('config requires "session_name"')
if "windows" not in session_config:
raise exc.ConfigError('config requires list of "windows"')
for window in session_config["windows"]:
if "window_name" not in window:
raise exc.ConfigError('config window is missing "window_name"')
if "plugins" in session_config:
if not isinstance(session_config["plugins"], list):
raise exc.ConfigError('"plugins" only supports list type')
return True
def is_config_file(filename, extensions=[".yml", ".yaml", ".json"]):
"""
Return True if file has a valid config file type.
Parameters
----------
filename : str
filename to check (e.g. ``mysession.json``).
extensions : str or list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
bool
"""
extensions = [extensions] if isinstance(extensions, str) else extensions
return any(filename.endswith(e) for e in extensions)
def in_dir(
config_dir=os.path.expanduser("~/.tmuxp"), extensions=[".yml", ".yaml", ".json"]
):
"""
Return a list of configs in ``config_dir``.
Parameters
----------
config_dir : str
directory to search
extensions : list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
list
"""
configs = []
for filename in os.listdir(config_dir):
if is_config_file(filename, extensions) and not filename.startswith("."):
configs.append(filename)
return configs
def in_cwd():
"""
Return list of configs in current working directory.
If filename is ``.tmuxp.py``, ``.tmuxp.json``, ``.tmuxp.yaml``.
Returns
-------
list
configs in current working directory
"""
configs = []
for filename in os.listdir(os.getcwd()):
if filename.startswith(".tmuxp") and is_config_file(filename):
configs.append(filename)
return configs
def expandshell(_path):
"""
Return expanded path based on user's ``$HOME`` and ``env``.
:py:func:`os.path.expanduser` and :py:func:`os.path.expandvars`.
Parameters
----------
path : str
path to expand
Returns
-------
str
path with shell variables expanded
"""
return os.path.expandvars(os.path.expanduser(_path))
def inline(session_config):
"""
Return config in inline form, opposite of :meth:`config.expand`.
Parameters
----------
session_config : dict
Returns
-------
dict
configuration with optional inlined configs.
"""
if (
"shell_command" in session_config
and isinstance(session_config["shell_command"], list)
and len(session_config["shell_command"]) == 1
):
session_config["shell_command"] = session_config["shell_command"][0]
if len(session_config.keys()) == int(1):
session_config = session_config["shell_command"]
if (
"shell_command_before" in session_config
and isinstance(session_config["shell_command_before"], list)
and len(session_config["shell_command_before"]) == 1
):
session_config["shell_command_before"] = session_config["shell_command_before"][
0
]
# recurse into window and pane config items
if "windows" in session_config:
session_config["windows"] = [
inline(window) for window in session_config["windows"]
]
if "panes" in session_config:
session_config["panes"] = [inline(pane) for pane in session_config["panes"]]
return session_config
def expand_cmd(p: Dict) -> Dict:
if isinstance(p, str):
p = {"shell_command": [p]}
elif isinstance(p, list):
p = {"shell_command": p}
elif not p:
p = {"shell_command": []}
assert isinstance(p, dict)
if "shell_command" in p:
cmds = p["shell_command"]
if isinstance(p["shell_command"], str):
cmds = [cmds]
if not cmds or any(a == cmds for a in [None, "blank", "pane"]):
cmds = []
if isinstance(cmds, list) and len(cmds) == int(1):
if any(a in cmds for a in [None, "blank", "pane"]):
cmds = []
for cmd_idx, cmd in enumerate(cmds):
if isinstance(cmd, str):
cmds[cmd_idx] = {"cmd": cmd}
cmds[cmd_idx]["cmd"] = expandshell(cmds[cmd_idx]["cmd"])
p["shell_command"] = cmds
else:
p["shell_command"] = []
return p
def expand(session_config, cwd=None, parent=None):
"""Return config with shorthand and inline properties expanded.
This is necessary to keep the code in the :class:`WorkspaceBuilder` clean
and also allow for neat, short-hand configurations.
As a simple example, internally, tmuxp expects that config options
like ``shell_command`` are a list (array)::
'shell_command': ['htop']
tmuxp configs allow for it to be simply a string::
'shell_command': 'htop'
Kaptan will load JSON/YAML files into python dicts for you.
Parameters
----------
session_config : dict
the configuration for the session
cwd : str
directory to expand relative paths against. should be the dir of the
config directory.
parent : str
(used on recursive entries) start_directory of parent window or session
object.
Returns
-------
dict
"""
# Note: cli.py will expand configs relative to project's config directory
# for the first cwd argument.
if not cwd:
cwd = os.getcwd()
if "session_name" in session_config:
session_config["session_name"] = expandshell(session_config["session_name"])
if "window_name" in session_config:
session_config["window_name"] = expandshell(session_config["window_name"])
if "environment" in session_config:
for key in session_config["environment"]:
val = session_config["environment"][key]
val = expandshell(val)
if any(val.startswith(a) for a in [".", "./"]):
val = os.path.normpath(os.path.join(cwd, val))
session_config["environment"][key] = val
if "global_options" in session_config:
for key in session_config["global_options"]:
val = session_config["global_options"][key]
if isinstance(val, str):
val = expandshell(val)
if any(val.startswith(a) for a in [".", "./"]):
val = os.path.normpath(os.path.join(cwd, val))
session_config["global_options"][key] = val
if "options" in session_config:
for key in session_config["options"]:
val = session_config["options"][key]
if isinstance(val, str):
val = expandshell(val)
if any(val.startswith(a) for a in [".", "./"]):
val = os.path.normpath(os.path.join(cwd, val))
session_config["options"][key] = val
# Any config section, session, window, pane that can contain the
# 'shell_command' value
if "start_directory" in session_config:
session_config["start_directory"] = expandshell(
session_config["start_directory"]
)
start_path = session_config["start_directory"]
if any(start_path.startswith(a) for a in [".", "./"]):
# if window has a session, or pane has a window with a
# start_directory of . or ./, make sure the start_directory can be
# relative to the parent.
#
# This is for the case where you may be loading a config from
# outside your shell current directory.
if parent:
cwd = parent["start_directory"]
start_path = os.path.normpath(os.path.join(cwd, start_path))
session_config["start_directory"] = start_path
if "before_script" in session_config:
session_config["before_script"] = expandshell(session_config["before_script"])
if any(session_config["before_script"].startswith(a) for a in [".", "./"]):
session_config["before_script"] = os.path.normpath(
os.path.join(cwd, session_config["before_script"])
)
if "shell_command" in session_config and isinstance(
session_config["shell_command"], str
):
session_config["shell_command"] = [session_config["shell_command"]]
if "shell_command_before" in session_config:
shell_command_before = session_config["shell_command_before"]
session_config["shell_command_before"] = expand_cmd(shell_command_before)
# recurse into window and pane config items
if "windows" in session_config:
session_config["windows"] = [
expand(window, parent=session_config)
for window in session_config["windows"]
]
elif "panes" in session_config:
pane_configs = session_config["panes"]
for pane_idx, pane_config in enumerate(pane_configs):
pane_configs[pane_idx] = {}
pane_configs[pane_idx].update(expand_cmd(pane_config))
session_config["panes"] = [
expand(pane, parent=session_config) for pane in pane_configs
]
return session_config
def trickle(session_config):
"""Return a dict with "trickled down" / inherited config values.
This will only work if config has been expanded to full form with
:meth:`config.expand`.
tmuxp allows certain commands to be default at the session, window
level. shell_command_before trickles down and prepends the
``shell_command`` for the pane.
Parameters
----------
session_config : dict
the session configuration.
Returns
-------
dict
"""
# prepends a pane's ``shell_command`` list with the window and sessions'
# ``shell_command_before``.
if "start_directory" in session_config:
session_start_directory = session_config["start_directory"]
else:
session_start_directory = None
if "suppress_history" in session_config:
suppress_history = session_config["suppress_history"]
else:
suppress_history = None
for window_config in session_config["windows"]:
# Prepend start_directory to relative window commands
if session_start_directory:
if "start_directory" not in window_config:
window_config["start_directory"] = session_start_directory
else:
if not any(
window_config["start_directory"].startswith(a) for a in ["~", "/"]
):
window_start_path = os.path.join(
session_start_directory, window_config["start_directory"]
)
window_config["start_directory"] = window_start_path
# We only need to trickle to the window, workspace builder checks wconf
if suppress_history is not None:
if "suppress_history" not in window_config:
window_config["suppress_history"] = suppress_history
# If panes were NOT specified for a window, assume that a single pane
# with no shell commands is desired
if "panes" not in window_config:
window_config["panes"] = [{"shell_command": []}]
for pane_idx, pane_config in enumerate(window_config["panes"]):
commands_before = []
# Prepend shell_command_before to commands
if "shell_command_before" in session_config:
commands_before.extend(
session_config["shell_command_before"]["shell_command"]
)
if "shell_command_before" in window_config:
commands_before.extend(
window_config["shell_command_before"]["shell_command"]
)
if "shell_command_before" in pane_config:
commands_before.extend(
pane_config["shell_command_before"]["shell_command"]
)
if "shell_command" in pane_config:
commands_before.extend(pane_config["shell_command"])
window_config["panes"][pane_idx]["shell_command"] = commands_before
# pane_config['shell_command'] = commands_before
return session_config
def import_tmuxinator(session_config):
"""Return tmuxp config from a `tmuxinator`_ yaml config.
.. _tmuxinator: https://github.com/aziz/tmuxinator
Parameters
----------
session_config : dict
python dict for session configuration.
Returns
-------
dict
"""
tmuxp_config = {}
if "project_name" in session_config:
tmuxp_config["session_name"] = session_config.pop("project_name")
elif "name" in session_config:
tmuxp_config["session_name"] = session_config.pop("name")
else:
tmuxp_config["session_name"] = None
if "project_root" in session_config:
tmuxp_config["start_directory"] = session_config.pop("project_root")
elif "root" in session_config:
tmuxp_config["start_directory"] = session_config.pop("root")
if "cli_args" in session_config:
tmuxp_config["config"] = session_config["cli_args"]
if "-f" in tmuxp_config["config"]:
tmuxp_config["config"] = tmuxp_config["config"].replace("-f", "").strip()
elif "tmux_options" in session_config:
tmuxp_config["config"] = session_config["tmux_options"]
if "-f" in tmuxp_config["config"]:
tmuxp_config["config"] = tmuxp_config["config"].replace("-f", "").strip()
if "socket_name" in session_config:
tmuxp_config["socket_name"] = session_config["socket_name"]
tmuxp_config["windows"] = []
if "tabs" in session_config:
session_config["windows"] = session_config.pop("tabs")
if "pre" in session_config and "pre_window" in session_config:
tmuxp_config["shell_command"] = session_config["pre"]
if isinstance(session_config["pre"], str):
tmuxp_config["shell_command_before"] = [session_config["pre_window"]]
else:
tmuxp_config["shell_command_before"] = session_config["pre_window"]
elif "pre" in session_config:
if isinstance(session_config["pre"], str):
tmuxp_config["shell_command_before"] = [session_config["pre"]]
else:
tmuxp_config["shell_command_before"] = session_config["pre"]
if "rbenv" in | |
# This was based on some code from https://github.com/mirumee/saleor
# but adapted to use relay, automatic field detection and some code adjustments
import collections
import collections.abc
import itertools
from django.core.exceptions import (
NON_FIELD_ERRORS,
ImproperlyConfigured,
ValidationError,
)
from django.db import (
models,
transaction,
)
from django.db.models.fields import NOT_PROVIDED
from django.db.models.fields.reverse_related import (
ManyToOneRel,
ManyToManyRel,
)
import graphene
from graphene.relay.mutation import ClientIDMutation
from graphene.types.mutation import MutationOptions
from graphene.types.utils import yank_fields_from_attrs
from graphene_django.registry import get_global_registry
from graphene_django.converter import convert_django_field_with_choices
from graphene.utils.str_converters import to_camel_case
from graphql.error import GraphQLError
from .exceptions import PermissionDenied
from .models import GuardedModel
from .types import (
schema_registry,
schema_for_field,
MutationErrorType,
UploadType,
)
from .perms import (
check_perms,
check_authenticated,
)
from .settings import graphene_django_plus_settings
from .utils import (
get_node,
get_nodes,
update_dict_nested,
get_model_fields,
)
_registry = get_global_registry()
def _get_model_name(model):
model_name = model.__name__
return model_name[:1].lower() + model_name[1:]
def _get_output_fields(model, return_field_name):
model_type = _registry.get_type_for_model(model)
if not model_type: # pragma: no cover
raise ImproperlyConfigured(
"Unable to find type for model {} in graphene registry".format(
model.__name__,
)
)
f = graphene.Field(
lambda: _registry.get_type_for_model(model),
description="The mutated object.",
)
return {return_field_name: f}
def _get_validation_errors(validation_error):
e_list = []
if hasattr(validation_error, "error_dict"):
# convert field errors
for field, field_errors in validation_error.message_dict.items():
for e in field_errors:
if field == NON_FIELD_ERRORS:
field = None
else:
field = to_camel_case(field)
e_list.append(MutationErrorType(field=field, message=e))
else:
# convert non-field errors
for e in validation_error.error_list:
e_list.append(MutationErrorType(message=e.message))
return e_list
def _get_fields(model, only_fields, exclude_fields, required_fields):
ret = collections.OrderedDict()
for name, field in get_model_fields(model):
if (
(only_fields and name not in only_fields)
or name in exclude_fields
or str(name).endswith("+")
or name in ["created_at", "updated_at", "archived_at"]
):
continue
if name == "id":
f = graphene.ID(
description="The ID of the object.",
)
elif isinstance(field, models.FileField):
f = UploadType(
description=field.help_text,
)
elif isinstance(field, models.BooleanField):
f = graphene.Boolean(
description=field.help_text,
)
elif isinstance(field, (models.ForeignKey, models.OneToOneField)):
f = graphene.ID(
description=field.help_text,
)
elif isinstance(field, models.ManyToManyField):
f = graphene.List(
graphene.ID,
description=field.help_text,
)
elif isinstance(field, (ManyToOneRel, ManyToManyRel)):
reverse_rel_include = graphene_django_plus_settings.MUTATIONS_INCLUDE_REVERSE_RELATIONS
# Checking whether it was globally configured to not include reverse relations
if isinstance(field, ManyToOneRel) and not reverse_rel_include and not only_fields:
continue
f = graphene.List(
graphene.ID,
description="Set list of {0}".format(
field.related_model._meta.verbose_name_plural,
),
)
else:
f = convert_django_field_with_choices(field, _registry)
if required_fields is not None:
f.kwargs["required"] = name in required_fields
else:
if isinstance(field, (ManyToOneRel, ManyToManyRel)):
f.kwargs["required"] = not field.null
else:
f.kwargs["required"] = not field.blank and field.default is NOT_PROVIDED
s = schema_for_field(field, name)
s["validation"]["required"] = f.kwargs["required"]
ret[name] = {
"field": f,
"schema": s,
}
return ret
def _is_list_of_ids(field):
return isinstance(field.type, graphene.List) and field.type.of_type == graphene.ID
def _is_id_field(field):
return (
field.type == graphene.ID
or isinstance(field.type, graphene.NonNull)
and field.type.of_type == graphene.ID
)
def _is_upload_field(field):
t = getattr(field.type, "of_type", field.type)
return t == UploadType
class BaseMutationOptions(MutationOptions):
"""Model type options for :class:`BaseMutation` and subclasses."""
#: A list of Django permissions to check against the user
permissions = None
#: If any permission should allow the user to execute this mutation
permissions_any = None
#: If we should allow unauthenticated users to do this mutation
allow_unauthenticated = False
#: The input schema for the schema query
input_schema = None
class BaseMutation(ClientIDMutation):
"""Base mutation enchanced with permission checking and relay id handling."""
class Meta:
abstract = True
#: A list of errors that happened during the mutation
errors = graphene.List(
graphene.NonNull(MutationErrorType),
description="List of errors that occurred while executing the mutation.",
)
@classmethod
def __init_subclass_with_meta__(
cls,
permissions=None,
permissions_any=True,
allow_unauthenticated=False,
input_schema=None,
_meta=None,
**kwargs,
):
if not _meta:
_meta = BaseMutationOptions(cls)
_meta.permissions = permissions or []
_meta.permissions_any = permissions_any
_meta.allow_unauthenticated = allow_unauthenticated
_meta.input_schema = input_schema or {}
super().__init_subclass_with_meta__(_meta=_meta, **kwargs)
iname = cls.Input._meta.name
schema_registry[iname] = {
"object_type": iname,
"fields": list(_meta.input_schema.values()),
}
@classmethod
def get_node(cls, info, node_id, field="id", only_type=None):
"""Get the node object given a relay global id."""
if not node_id:
return None
try:
node = get_node(node_id, only_type)
except (AssertionError, GraphQLError) as e:
raise ValidationError({field: str(e)})
else:
if node is None: # pragma: no cover
raise ValidationError({field: "Couldn't resolve to a node: {}".format(node_id)})
return node
@classmethod
def get_nodes(cls, ids, field, only_type=None):
"""Get a list of node objects given a list of relay global ids."""
try:
instances = get_nodes(ids, only_type)
except GraphQLError as e:
raise ValidationError({field: str(e)})
return instances
@classmethod
def check_permissions(cls, user):
"""Check permissions for the given user.
Subclasses can override this to avoid the permission checking or
extending it. Remember to call `super()` in the later case.
"""
if not cls._meta.allow_unauthenticated and not check_authenticated(user):
return False
if not cls._meta.permissions:
return True
return check_perms(user, cls._meta.permissions, any_perm=cls._meta.permissions_any)
@classmethod
def mutate_and_get_payload(cls, root, info, **data):
"""Mutate checking permissions.
We override the default graphene's method to call
:meth:`.check_permissions` and populate :attr:`.errors` in case
of errors automatically.
The mutation itself should be defined in :meth:`.perform_mutation`.
"""
if not cls.check_permissions(info.context.user):
raise PermissionDenied()
try:
response = cls.perform_mutation(root, info, **data)
if response.errors is None:
response.errors = []
return response
except ValidationError as e:
errors = _get_validation_errors(e)
return cls(errors=errors)
@classmethod
def perform_mutation(cls, root, info, **data):
"""Perform the mutation.
This should be implemented in subclasses to perform the mutation.
"""
raise NotImplementedError
class ModelMutationOptions(BaseMutationOptions):
"""Model type options for :class:`BaseModelMutation` and subclasses."""
#: The Django model.
model = None
#: A list of guardian object permissions to check if the user has
#: permission to perform a mutation to the model object.
object_permissions = None
#: If any object permission should allow the user to perform the mutation.
object_permissions_any = True
#: Exclude the given fields from the mutation input.
exclude_fields = None
#: Include only those fields in the mutation input.
only_fields = None
#: Mark those fields as required (note that fields marked with `null=False`
#: in Django will already be considered required).
required_fields = None
#: The name of the field that will contain the object type. If not
#: provided, it will default to the model's name.
return_field_name = None
class BaseModelMutation(BaseMutation):
"""Base mutation for models.
This will allow mutations for both create and update operations,
depending on if the object's id is present in the input or not.
See :class:`ModelMutationOptions` for a list of meta configurations.
"""
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
object_permissions=None,
object_permissions_any=True,
return_field_name=None,
required_fields=None,
exclude_fields=None,
only_fields=None,
input_schema=None,
_meta=None,
**kwargs,
):
if not model: # pragma: no cover
raise ImproperlyConfigured("model is required for ModelMutation")
if not _meta:
_meta = ModelMutationOptions(cls)
exclude_fields = exclude_fields or []
only_fields = only_fields or []
if not return_field_name:
return_field_name = _get_model_name(model)
fdata = _get_fields(model, only_fields, exclude_fields, required_fields)
input_fields = yank_fields_from_attrs(
{k: v["field"] for k, v in fdata.items()},
_as=graphene.InputField,
)
input_schema = update_dict_nested(
{k: v["schema"] for k, v in fdata.items()},
input_schema or {},
)
fields = _get_output_fields(model, return_field_name)
_meta.model = model
_meta.object_permissions = object_permissions or []
_meta.object_permissions_any = object_permissions_any
_meta.return_field_name = return_field_name
_meta.exclude_fields = exclude_fields
_meta.only_fields = only_fields
_meta.required_fields = required_fields
super().__init_subclass_with_meta__(
_meta=_meta,
input_fields=input_fields,
input_schema=input_schema,
**kwargs,
)
cls._meta.fields.update(fields)
@classmethod
def check_object_permissions(cls, user, instance):
"""Check object permissions for the given user.
Subclasses can override this to avoid the permission checking or
extending it. Remember to call `super()` in the later case.
For this to work, the model needs to implement a `has_perm` method.
The easiest way when using `guardian` is to inherit it
from :class:`graphene_django_plus.models.GuardedModel`.
"""
if not cls._meta.object_permissions:
return True
if not isinstance(instance, GuardedModel):
return True
return instance.has_perm(
user,
cls._meta.object_permissions,
any_perm=cls._meta.object_permissions_any,
)
@classmethod
def get_instance(cls, info, obj_id):
"""Get an object given a relay global id."""
model_type = _registry.get_type_for_model(cls._meta.model)
instance = cls.get_node(info, obj_id, only_type=model_type)
if not cls.check_object_permissions(info.context.user, instance):
raise PermissionDenied()
return instance
@classmethod
def before_save(cls, info, instance, cleaned_input=None):
"""Perform "before save" operations.
Override this to perform any operation on the instance
before its `.save()` method is called.
"""
pass
@classmethod
def after_save(cls, info, instance, cleaned_input=None):
"""Perform "after save" operations.
Override this to perform any operation on the instance
after its `.save()` method is called.
"""
pass
@classmethod
def save(cls, info, instance, cleaned_input=None):
"""Save the instance to the database.
To do something with the instance "before" or "after" saving it,
override either :meth:`.before_save` and/or :meth:`.after_save`.
"""
cls.before_save(info, instance, cleaned_input=cleaned_input)
instance.save()
cls.after_save(info, instance, cleaned_input=cleaned_input)
@classmethod
def before_delete(cls, info, instance):
"""Perform "before delete" operations.
Override this to perform any operation on the instance
before its `.delete()` method is called.
"""
pass
@classmethod
def after_delete(cls, info, instance):
"""Perform "after delete" operations.
Override this to perform any operation | |
import demistomock as demisto
from CommonServerPython import *
import json
import requests
import socket
from typing import Dict, Any, List
from netaddr import IPNetwork, IPAddress
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
'''
Templates for change requests
Change these templates if you have customized your Firewall Change Request or Server Decommission Request worflows to match
your workflow. To view the JSON structure of your customized workflows, create a sample ticket then view the data via the
API: https://<SecureChange IP Address>/securechangeworkflow/api/securechange/tickets/<tickt_ID>.json
'''
FW_CHANGE_REQ = json.loads('''{ "ticket": { "subject": "", "priority": "", "workflow": { "name": "Firewall Change Request",
"uses_topology": true }, "steps": { "step": [ { "name": "Submit Access Request", "tasks": { "task":
{ "fields": { "field": { "@xsi.type": "multi_access_request", "name": "Required Access",
"access_request": { "use_topology": true, "targets": { "target": { "@type": "ANY" } },
"users": { "user": [ "Any" ] }, "sources": { "source": [ { "@type": "IP", "ip_address": "",
"netmask": "255.255.255.255", "cidr": 32 } ] }, "destinations": { "destination": [ { "@type": "IP",
"ip_address": "", "netmask": "255.255.255.255", "cidr": 32 } ] }, "services": { "service":
[ { "@type": "PROTOCOL", "protocol": "", "port": 0 } ] }, "action": "" } } } } } } ] },
"comments": "" } }''')
SERVER_DECOM_REQ = json.loads('''{ "ticket": { "subject": "", "priority": "", "workflow": { "name":
"Server Decommission Request", "uses_topology": false }, "steps": { "step": [ {
"name": "Server Decommission Request", "tasks": { "task": { "fields": { "field": { "@xsi.type":
"multi_server_decommission_request", "name": "Request verification",
"server_decommission_request": { "servers": { "server": { "@type": "IP", "ip_address": "",
"netmask": "255.255.255.255", "cidr": 32 } }, "targets": { "target": { "@type": "ANY" } },
"comment": "" } } } } } } ] }, "comments": "" }}''')
# remove proxy if not set to true in params
if not demisto.params().get('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
def tos_request(tos_app, req_type, path, params=None, headers=None, data=None):
""" Function to access TOS via REST API """
if headers is None:
headers = {
'accept': 'application/json',
'content-type': 'application/json',
'cache-control': 'no-cache'
}
# Get Configuration
tos_ip = ""
tos_user = ""
tos_pass = ""
if tos_app == "st":
tos_ip = demisto.params()['SecureTrack-Server']
tos_user = demisto.params()['SecureTrack-User']['identifier']
tos_pass = demisto.params()['SecureTrack-User']['password']
elif tos_app == "sc":
tos_ip = demisto.params()['SecureChange-Server']
tos_user = demisto.params()['SecureChange-User']['identifier']
tos_pass = demisto.params()['SecureChange-User']['password']
elif tos_app == "sa":
tos_ip = demisto.params()['SecureApp-Server']
tos_user = demisto.params()['SecureApp-User']['identifier']
tos_pass = demisto.params()['SecureApp-User']['password']
verify_ssl = not demisto.params().get('unsecure', False)
url = 'https://' + tos_ip + path
# Go do
if req_type.upper() == 'GET':
try:
res = requests.get(url, params=params, headers=headers, auth=(tos_user, tos_pass), verify=verify_ssl)
except requests.exceptions.RequestException as e:
return_error(str(e))
# Check output
if res.status_code == 200 or res.status_code == 201:
try:
return res.json()
except json.decoder.JSONDecodeError:
return res.content
else:
if res.status_code == 401:
return_error('TOS Reached, Auth Failed. Please check your credentials')
else:
return_error('Error {} Reaching {} to TOS: {}'.format(res.status_code, res.url, res.reason))
elif req_type.upper() == 'POST':
try:
res = requests.post(url, data=data, params=params, headers=headers, auth=(tos_user, tos_pass), verify=verify_ssl)
except requests.exceptions.RequestException as e:
return_error(str(e))
# Check output
if res.status_code == 200 or res.status_code == 201:
try:
return res.json()
except json.decoder.JSONDecodeError:
return res.content
else:
if res.status_code == 401:
return_error('TOS Reached, Auth Failed. Please check your credentials')
else:
return_error('Error {} Reaching {} to TOS: {}'.format(res.status_code, res.url, res.reason))
def valid_ip(ipa):
# ipaddress module not installed by default, using this approach
try:
socket.inet_aton(ipa)
return True
except socket.error:
return False
def path_finder(querystring):
# Define the basic output for the function, augmenting later with TOS data
entry: Dict[str, Any] = {
'Type': entryTypes['note'],
'Contents': '',
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '',
'EntryContext': {}
}
# Ask TOS for the path
o = tos_request('st', 'GET', '/securetrack/api/topology/path', querystring)
# Verify the data and return
try:
entry['EntryContext']['Tufin.Topology.TrafficAllowed'] = o['path_calc_results']['traffic_allowed']
entry['EntryContext']['Tufin.Topology.TrafficDevices'] = [d['name'] for d in o['path_calc_results']['device_info']]
entry['Contents'] = o['path_calc_results']['device_info']
entry['HumanReadable'] = tableToMarkdown(
'Tufin Topology Search for {} to {} via Service {}. Traffic is {}'.format(querystring['src'],
querystring['dst'],
querystring['service'],
('**Denied**', '**Allowed**')[
o['path_calc_results'][
'traffic_allowed']]),
{'Start': querystring['src'], 'Devices in Path': '-->'.join(
['**' + d['name'] + '**' + ' ({})'.format(d['vendor']) for d in o['path_calc_results']['device_info']]),
'End': querystring['dst']}, ['Start', 'Devices in Path', 'End'])
except KeyError:
return_error('Unknown Output Returned')
# Send back to Demisto inside function
return entry
def path_finder_command():
''' Sample query: querystring = {'src':'10.80.80.0','dst':'172.16.200.80',
'service':'tcp:22','includeIncompletePaths':'true'} '''
# Build the query from user input
querystring = {
'src': demisto.args()['source'],
'dst': demisto.args()['destination'],
'service': demisto.args().get('service', 'Any'),
'includeIncompletePaths': 'true'
}
e = path_finder(querystring)
demisto.results(e)
def path_finder_image(querystring):
entry = {
'Type': entryTypes['note'],
'Contents': '',
'ContentsFormat': formats['text'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '',
'EntryContext': {}
}
try:
headers = {'accept': 'image/png', 'content-type': 'application/json', 'cache-control': 'no-cache'}
img = tos_request('st', 'GET', '/securetrack/api/topology/path_image', querystring, headers)
# simple check if we have an image or error message.
if len(img) > 20:
# Send back to Demisto inside function
return fileResult('topo.png', img, entryTypes['image'])
else:
entry['HumanReadable'] = 'No Valid Path Found'
entry['Contents'] = 'No Valid Path Found'
# Send back to Demisto inside function
return entry
except Exception as e:
return_error('Error Running Query: {}'.format(e))
def path_finder_image_command():
''' Sample query: querystring = {'src':'10.80.80.0','dst':'172.16.200.80',
'service':'tcp:80','includeIncompletePaths':'true','displayBlockedStatus':'true'} '''
querystring = {
'src': demisto.args()['source'],
'dst': demisto.args()['destination'],
'service': demisto.args().get('service', 'Any'),
'includeIncompletePaths': 'true',
'displayBlockedStatus': 'true'
}
e = path_finder_image(querystring)
demisto.results(e)
def device_name(devices, device_id):
return [e['name'] + ' ({} {})'.format(e['vendor'], e['model']) for e in devices if int(e['id']) == int(device_id)][0]
def object_lookup(querystring):
entry: Dict[str, Any] = {
'Type': entryTypes['note'],
'Contents': '',
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '',
'EntryContext': {}
}
return_json: Dict[str, List] = {'objects': []}
o = tos_request('st', 'GET', '/securetrack/api/network_objects/search', querystring)
# Validate result
try:
total = int(o['network_objects']['count'])
except KeyError:
total = 0
if total > 0:
device_json = tos_request('st', 'GET', '/securetrack/api/devices')['devices']['device']
objs = o['network_objects']['network_object']
if not isinstance(o['network_objects']['network_object'], list):
objs = [objs]
for obj in objs:
# display_name device_id
return_json['objects'].append({'object_name': obj['display_name'], 'device': device_name(device_json,
obj['device_id']), 'comment': obj['comment']})
else:
entry['HumanReadable'] = 'No Results'
entry['EntryContext']['Tufin.ObjectResolve.NumberOfObjects'] = 0
return entry
# Return to Demisto
entry['Contents'] = json.dumps(return_json)
entry['EntryContext']['Tufin.ObjectResolve.NumberOfObjects'] = total
entry['HumanReadable'] = tableToMarkdown('Object Lookup for {}'.format(querystring['exact_subnet']), return_json['objects'],
['object_name', 'device', 'comment'], underscoreToCamelCase, removeNull=True)
# Send back to Demisto inside function
return entry
def object_lookup_command():
""" Sample query: querystring = {'filter':'subnet','count':'50','exact_subnet':'1.1.1.1'} """
querystring = {
'filter': 'subnet',
'count': '50',
'exact_subnet': demisto.args()['ip']
}
if not valid_ip(querystring['exact_subnet']):
return_error('Invalid IP Address')
return False
e = object_lookup(querystring)
demisto.results(e)
def policy_search(querystring, max_rules_per_device=100):
""" Search policy across all devices. See docs for syntax """
u = '/securetrack/api/rule_search'
entry: Dict[str, Any] = {
'Type': entryTypes['note'],
'Contents': '',
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '',
'EntryContext': {}
}
matches = tos_request('st', 'GET', u, querystring)
search_devices = [e['device_id'] for e in matches['device_list']['device'] if int(e['rule_count']) > 0]
if not len(search_devices):
entry['HumanReadable'] = 'No Results Found'
entry['EntryContext']['Tufin.Policysearch.NumberRulesFound'] = 0
# Send back to Demisto inside function
return entry
else:
rule_total = 0
querystring['count'] = max_rules_per_device
querystring['start'] = 0
rule_return = []
device_json = tos_request('st', 'GET', '/securetrack/api/devices')['devices']['device']
for d in search_devices:
rules = tos_request('st', 'GET', u + '/{}'.format(d), querystring)
# If no matches(there should be) just break the iteration
if rules['rules']['count'] == 0:
break
current_device = device_name(device_json, d)
for rule in rules['rules']['rule']:
rule_total = rule_total + 1
rule_return.append({
'Device': current_device,
'Source': [d['display_name'] for d in rule['src_network']],
'Source Service': [d['display_name'] for d in rule['src_service']],
'Destination': [d['display_name'] for d in rule['dst_network']],
'Destination Service': [d['display_name'] for d in rule['dst_service']],
'Action': rule['action']
})
# Send back to Demisto
entry['Contents'] = json.dumps(rule_return)
entry['EntryContext']['Tufin.Policysearch.NumberRulesFound'] = rule_total
entry['HumanReadable'] = tableToMarkdown('Policy Search Results for {}'.format(querystring['search_text']),
rule_return, ['Device', 'Source', 'Source Service', 'Destination',
'Destination Service', 'Action'], removeNull=True)
# Send back to Demisto inside function
return entry
def policy_search_command():
max_rules_per_device = demisto.params()['MaxRules']
querystring = {'search_text': demisto.args()['search']}
e = policy_search(querystring, max_rules_per_device)
demisto.results(e)
def zone_match(ipaddr):
""" Find the zone for the given IP address """
entry: Dict[str, Any] = {
'Type': entryTypes['note'],
'Contents': '',
'ContentsFormat': formats['text'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '',
'EntryContext': {}
}
try:
zone_list = tos_request('st', 'GET', '/securetrack/api/zones/')
for zone in zone_list['zones']['zone']:
zone_subnets = tos_request('st', 'GET', '/securetrack/api/zones/%s/entries' % zone['id'])
zone.update(zone_subnets)
for subnet in zone_subnets['zone_entries']['zone_entry']:
ipnet = '%s/%s' % (subnet['ip'], subnet['prefix'])
if IPAddress(ipaddr) in IPNetwork(ipnet):
z = {}
z['Name'] = zone['name']
z['ID'] = int(zone['id'])
entry['EntryContext']['Tufin.Zone'] = [z]
entry['Contents'] = zone
entry['HumanReadable'] = tableToMarkdown('Tufin Zone Search for {}'.format(ipaddr),
{'Name': zone['name'], 'ID': zone['id']},
['Name', 'ID'], removeNull=True)
return entry
except Exception as e:
return_error(f'Error retrieving zone: {str(e)}')
entry['EntryContext']['Tufin.Zones'] = [{'Name': 'None', 'ID': 'None'}]
entry['Contents'] = 'Not Found'
entry['HumanReadable'] = tableToMarkdown('Tufin Zone Search for {}'.format(ipaddr),
{'Name': 'Not | |
<class 'list'>
len(all_hiddens= 13
all_hiddens[0].shape= torch.Size([16, 63, 768])
all_hiddens[1].shape= torch.Size([16, 63, 768])
all_hiddens[-1].shape= torch.Size([16, 63, 768])
batch_size= 16
len(all_hiddens)= 12
self.opt['num_last_layer_xlmr']= 1
used_layers= [11]
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
retrieve_reps.shape= torch.Size([51, 768])
token_reps.shape= torch.Size([16, 51, 768])
"""
"""
word_embeds.shape= torch.Size([16, 51, 768])
word_embeds.shape= torch.Size([16, 51, 768])
word_reps.shape= torch.Size([16, 51, 798])
"""
print('word_embeds.shape=', word_embeds.shape)
word_embeds = self.dropout(word_embeds)
print('word_embeds.shape=', word_embeds.shape)
word_feats.append(word_embeds)
word_reps = torch.cat(word_feats, dim=2)
print('word_reps.shape=', word_reps.shape)
# *******************************
"""
In below self.self_att()
input_masks.shape= torch.Size([16, 51])
slf_attn_mask.shape= torch.Size([16, 51, 51])
non_pad_mask.shape= torch.Size([16, 51, 1])
enc_output.shape= torch.Size([16, 51, 798])
position_embed_for_satt= 1
position_ids.shape= torch.Size([16, 51])
enc_output.shape= torch.Size([16, 51, 798])
"""
satt_reps, att_weights = self.self_att(word_reps, pad_masks)
"""
satt_reps.shape= torch.Size([16, 51, 798])
att_weights.shape= torch.Size([16, 51, 51])
adj.shape= torch.Size([16, 51, 51])
gcn_reps.shape= torch.Size([16, 51, 798])
muse_reps.shape= torch.Size([16, 51, 300])
final_reps.shape= torch.Size([16, 51, 1896])
logits.shape= torch.Size([16, 51, 16])
loss= tensor(2.8248, device='cuda:0', grad_fn=<DivBackward0>)
probs.shape= torch.Size([16, 51, 16])
preds.shape= torch.Size([16, 51])
"""
print('satt_reps.shape=', satt_reps.shape)
print('att_weights.shape=', att_weights.shape)
adj = get_full_adj(head_ids, pad_masks, self.opt['device'])
print('adj.shape=', adj.shape)
gcn_reps, _ = self.gcn_layer(word_reps, adj)
print('gcn_reps.shape=', gcn_reps.shape)
muse_reps = self.biw2v_embedding(biw2v_ids)
print('muse_reps.shape=', muse_reps.shape)
final_reps = torch.cat(
[satt_reps, gcn_reps, muse_reps],
dim=2
)
print('final_reps.shape=', final_reps.shape)
logits = self.fc_ED(final_reps) # [batch size, seq len, 16]
print('logits.shape=', logits.shape)
loss, probs, preds = compute_batch_loss(logits, ED_labels, token_masks, instance_weights=lang_weights)
print('loss=', loss)
print('probs.shape=', probs.shape)
print('preds.shape=', preds.shape)
print('=============== ED_model_hf.forward END ============')
return loss, probs, preds
def predict(self, combined_task_inputs):
xlmr_ids, input_mask, label_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, deprel_ids, ner_ids, eid, pad_masks = combined_task_inputs
token_masks = pad_masks.eq(0).float() # 1.0 if true token, else 0
print('========== ED_model.predict START ===============')
"""
token_masks.shape= torch.Size([10, 33])
upos_reps.shape= torch.Size([10, 33, 30])
"""
print('token_masks.shape=', token_masks.shape)
"""
xlmr_ids.shape= torch.Size([10, 53])
biw2v_ids.shape= torch.Size([10, 33])
retrieve_ids.shape= torch.Size([10, 33])
upos_ids.shape= torch.Size([10, 33])
xpos_ids.shape= torch.Size([10, 33])
head_ids.shape= torch.Size([10, 33])
deprel_ids.shape= torch.Size([10, 33])
ner_ids.shape= torch.Size([10, 33])
eid.shape= torch.Size([10])
pad_masks.shape= torch.Size([10, 33])
xlmr_ids= tensor([[ 0, 6, 5, 90621, 47229, 250, 181, 5273, 10408,
6267, 4039, 31245, 71633, 2620, 18684, 6466, 7233, 250,
240, 102468, 368, 6, 185701, 35618, 18004, 159565, 97288,
41468, 152, 94, 13231, 3108, 746, 14272, 3070, 102935,
2103, 153872, 767, 186386, 12581, 30039, 230, 59721, 148726,
755, 230, 6816, 1692, 340, 6, 5, 2],
[ 0, 6, 5, 45869, 53929, 10286, 112847, 593, 50221,
139152, 46416, 179, 83001, 95451, 104042, 240, 13875, 13874,
18004, 39865, 3363, 93319, 136295, 109177, 240, 81881, 189757,
81972, 43060, 230, 11115, 33018, 702, 48102, 46408, 73279,
94, 9580, 199317, 73942, 160700, 35508, 340, 6, 5,
2, 0, 0, 0, 0, 0, 0, 0],
[ 0, 4003, 20621, 862, 18173, 30099, 7624, 906, 141538,
755, 556, 48964, 61501, 65, 123290, 164456, 230, 4569,
74602, 240, 169348, 47769, 48387, 47769, 16994, 396, 113409,
216336, 755, 6, 92127, 36435, 52316, 23628, 65, 32634,
1195, 110813, 240, 34708, 201174, 6, 5, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 6625, 665, 87151, 73397, 906, 129382, 10731, 87509,
6, 114378, 13620, 3015, 96629, 92564, 5202, 3015, 96629,
92564, 3108, 59545, 665, 101375, 258, 25198, 13231, 4003,
3518, 123506, 906, 24832, 755, 194558, 250, 19636, 3518,
98058, 3202, 1692, 6, 5, 2, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 6, 5, 37705, 112600, 376, 40743, 5202, 43228,
12323, 48483, 9787, 1325, 1855, 5081, 2044, 826, 6,
110351, 176, 230, 6, 163970, 19089, 47600, 96517, 16452,
412, 6963, 1533, 862, 18740, 13029, 66087, 1365, 6,
116337, 1692, 6, 5, 2, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 6, 104815, 53411, 6, 130825, 6, 22650, 54563,
240, 6, 97927, 10691, 240, 65525, 6, 224157, 665,
77358, 250, 27952, 35180, 160769, 22366, 19931, 101632, 648,
15776, 179, 26430, 70153, 12337, 2977, 240, 103919, 6,
5, 2, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1333, 214363, 96517, 22327, 8039, 3088, 1335, 51218,
902, 177421, 154597, 1533, 146142, 755, 230, 206210, 15330,
69294, 240, 359, 169368, 4040, 14924, 8428, 35862, 10691,
15493, 72317, 179, 12888, 6, 5, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 6625, 106969, 24094, 917, 10913, 10937, 6, 83188,
13759, 240, 93584, 1335, 86401, 24537, 5706, 5202, 24094,
208045, 862, 155500, 48707, 8665, 45089, 121818, 84341, 412,
220818, 6, 5, 2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 105285, 368, 35000, 230, 12584, 230, 4382, 29928,
240, 141677, 250, 18740, 54610, 60930, 240, 30506, 6,
48699, 140252, 258, 556, 6, 164072, 12589, 96517, 6,
5, 2, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 6625, 55468, 900, 1705, 124630, 151721, 5202, 234180,
3518, 48633, 94, 73441, 23579, 376, 18486, 122608, 340,
240, 37160, 11945, 240, 72647, 120465, 5784, 133131, 6,
5, 2, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]],
device='cuda:0')
biw2v_ids= tensor([[ 6, 114225, 113937, 128409, 1, 1443, 113675, 113666, 163097,
117713, 1, 126519, 1, 113266, 114068, 1, 50, 176957,
1, 113209, 127252, 113173, 113584, 120372, 126250, 113253, 113470,
113165, 117399, 113165, 119105, 177487, 6],
[ 6, 113782, 1, 123638, 1, 1, 131450, 113546, 1,
116631, 113266, 114666, 1, 125284, 1, 115773, 117903, 124178,
113165, 113254, 1, 395, 113309, 1, 176957, 113203, 1,
1, 1, 177487, 6, 0, 0],
[113216, 113383, 1, 113448, 119129, 113264, 120182, 113167, 242005,
137590, 1, 113165, 137330, 1, 1, 234085, 179317, 115962,
162598, 114539, 114727, 114453, 1, 1, 114368, 114375, 6,
0, 0, 0, 0, 0, 0],
[113306, 115538, 113453, 1, 113183, 1, 120945, 1, 1,
1, 113209, 114243, 1, 113395, 1, 113216, 169807, 117207,
116183, 1, 192377, 121340, 6, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0],
[ 6, 114821, 163342, 1, 130192, 1, 150092, 1, 191632,
113165, 1, 123445, 113381, 114054, 193520, 113200, 1, 113604,
134623, 113170, 122650, 6, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0],
[ 1, 113621, 1, 123566, 1, 114004, 1, 1, 117675,
122110, 113447, 113985, 118076, 190493, 171192, 1, 113939, 1,
118621, 6, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0],
[ 1, 113381, 121106, 1, 118400, 113950, 113725, 113200, 1,
113165, 115868, 168565, 1, 117866, 113179, 1, 151891, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0],
[113306, 116827, 113219, 154925, 148370, 1, 113596, 1, 1,
1, 113219, 115695, 1, 114991, 113407, 156650, 115304, 113180,
1, 6, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0],
[125488, 113398, 113165, 1, 113165, 9086, 116472, 1, 119791,
113604, 114109, 115046, 1, 123924, 117632, 119425, 113167, 120572,
113381, 6, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0],
[113306, 116408, 135535, 1, 1, 1, 137810, 176957, 129403,
123127, 177487, 1, 113249, 113236, 1, 113558, 1, 113199,
113472, 6, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0]], device='cuda:0')
retrieve_ids= tensor([[ 2, 3, 4, 6, 8, 10, 11, 12, 13, 16, 18, 19, 22, 24, 25, 26, 28, 29,
30, 31, 32, 34, 35, 36, 38, 40, 41, 42, 43, 46, 47, 49, 51],
[ 2, 3, 4, 6, 8, 10, 12, 14, 15, 16, 18, 19, 20, 22, 24, 25, 26, 27,
29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 41, 42, 44, 0, 0],
[ 1, 2, 3, 4, 5, 6, 7, 10, 11, 13, 15, | |
quote = F, row.names = F)''' % outfile)
#########################################
#########################################
#########################################
@jobs_limit(1, "R")
@follows(mkdir("enrichment.dir"))
@merge([buildGenusDiffList, buildPyruvateOxidaseAnnotations],
r"enrichment.dir/intersection_pyruvate_oxidase.sig")
def testEnrichmentOfPyruvateOxidaseIntersection(infiles, outfile):
'''
test the enrichment fo aerobic bacteria in the intersection
of DNA and RNA differences - upregulated only
'''
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infiles[0])
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infiles[1])
# get the intersection of upregulated genera
R('''diff <- intersect(dna[,1][dna[,2] == "1"], rna[,1][rna[,2] == "1"])''')
# read the annotation file
R('''anno <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infiles[2])
# get annotated genomes with pyruvate oxidase and without
R('''withpo <- anno[,1][anno[,2] == "yes"]''')
R('''nopo <- anno[,1][anno[,2] == "no"]''')
# build matrix for testing
# the use of "po" stands for pyruvate oxidase
R('''diffwithpo <- length(intersect(diff, withpo))''')
R('''diffnopo <- length(intersect(diff, nopo))''')
R('''notdiffwithpo <- length(setdiff(withpo, diffwithpo))''')
R('''notdiffnopo <- length(setdiff(nopo, diffnopo))''')
R('''dat <- data.frame("diff" = c(diffwithpo, diffnopo),
"notdiff" = c(notdiffwithpo, notdiffnopo))''')
R('''print(dat)''')
# barplot those with pyruvate oxidase
R('''toplot <- data.frame("percentage" = c(dat[1,2] / (dat[2,2] + dat[1,2])*100,
dat[1,1] / (dat[1,1] + dat[2,1])*100))''')
R('''toplot$status <- c("background", "intersection_up")''')
outname_barplot = P.snip(outfile, ".sig") + ".pdf"
R('''library(ggplot2)''')
R('''ggplot(toplot, aes(x = status, y = percentage)) + geom_bar(position = "dodge", stat = "identity")''')
R('''ggsave("%s")''' % outname_barplot)
# perform test and write results
R('''ftest <- fisher.test(dat)''')
R('''res <- data.frame("estimate" = ftest$estimate, "p-value" = ftest$p.value)''')
R('''write.table(res, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
#########################################
#########################################
#########################################
@follows(mkdir("diff.dir"))
@merge([os.path.join(PARAMS.get("rna_communitiesdir"), "csvdb"),
os.path.join(PARAMS.get("dna_communitiesdir"), "csvdb")],
"diff.dir/common_genes.tsv")
def buildCommonGeneList(infiles, outfile):
'''
get a list of genes that were common beteween DNA
and RNA analysis - used for downstream analysis
'''
dbh_rna = sqlite3.connect(infiles[0])
cc_rna = dbh_rna.cursor()
dbh_dna = sqlite3.connect(infiles[1])
cc_dna = dbh_dna.cursor()
outf = open(outfile, "w")
rna = set()
dna = set()
for gene in cc_rna.execute("""SELECT taxa
FROM gene_counts_diff
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""").fetchall():
rna.add(gene[0])
for gene in cc_dna.execute("""SELECT taxa
FROM gene_counts_diff
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""").fetchall():
dna.add(gene[0])
for gene in rna.intersection(dna):
outf.write(gene + "\n")
#########################################
#########################################
#########################################
@follows(mkdir("diff.dir"))
@merge([os.path.join(PARAMS.get("rna_communitiesdir"), "csvdb"),
os.path.join(PARAMS.get("dna_communitiesdir"), "csvdb")],
"diff.dir/common_genera.tsv")
def buildCommonGeneraList(infiles, outfile):
'''
get a list of genera that were common beteween DNA
and RNA analysis - used for downstream analysis
'''
dbh_rna = sqlite3.connect(infiles[0])
cc_rna = dbh_rna.cursor()
dbh_dna = sqlite3.connect(infiles[1])
cc_dna = dbh_dna.cursor()
outf = open(outfile, "w")
rna = set()
dna = set()
for gene in cc_rna.execute("""SELECT taxa
FROM genus_diamond_aggregated_counts_diff
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""").fetchall():
rna.add(gene[0])
for gene in cc_dna.execute("""SELECT taxa
FROM genus_diamond_aggregated_counts_diff
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""").fetchall():
dna.add(gene[0])
for gene in rna.intersection(dna):
outf.write(gene + "\n")
#########################################
#########################################
#########################################
@follows(mkdir("diff.dir"))
@transform([os.path.join(PARAMS.get("rna_communitiesdir"), "csvdb"),
os.path.join(PARAMS.get("dna_communitiesdir"), "csvdb")],
regex("(\S+)/(.*NA).*/csvdb"),
add_inputs(buildCommonGeneList),
r"diff.dir/\2_HhaIL10R_vs_WT.diff.genes.tsv")
def buildGeneDiffList(infiles, outfile):
'''
build a list of differentially expressed cogs
between HhaIL10R and WT
'''
# list for sql statement
common = set([x[:-1] for x in open(infiles[1]).readlines()])
common = "(" + ",".join(['"'+x+'"' for x in common]) + ")"
# connect to database
dbh = sqlite3.connect(infiles[0])
cc = dbh.cursor()
# remove any genes that are different between Hh and steady state
# or between aIL10R and steady state
# also only include genes that are different with between colitis
# and other control conditions
# hh = set([x[0] for x in cc.execute("""SELECT taxa
# FROM gene_counts_diff
# WHERE group1 == "Hh"
# AND group2 == "WT"
# AND adj_P_Val < 0.05""").fetchall()])
# hh = "(" + ",".join(['"'+x+'"' for x in hh]) + ")"
# ail10r = set([x[0] for x in cc.execute("""SELECT taxa
# FROM gene_counts_diff
# WHERE group1 == "WT"
# AND group2 == "aIL10R"
# AND adj_P_Val < 0.05""").fetchall()])
# ail10r = "(" + ",".join(['"'+x+'"' for x in ail10r]) + ")"
outf = open(outfile, "w")
for gene in cc.execute("""SELECT taxa
FROM gene_counts_diff
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
AND adj_P_Val < 0.05
AND (logFC > 1 OR logFC < -1)
AND taxa IN %s
ORDER BY logFC DESC""" % common).fetchall():
outf.write(gene[0] + "\n")
outf.close()
# AND taxa NOT IN %s
# AND taxa NOT IN %s
#########################################
#########################################
#########################################
@merge(buildGeneDiffList, "diff.dir/genes_overlap.tsv")
def buildDiffOverlap(infiles, outfile):
'''
overlap gene lists
'''
dna = [set(open(x).readlines()) for x in infiles if "DNA" in x][0]
rna = [set(open(x).readlines()) for x in infiles if "RNA" in x][0]
ndna = len(dna)
nrna = len(rna)
overlap = len(dna.intersection(rna))
outf = open(outfile, "w")
outf.write("nDNA\tnRNA\tnoverlap\n%(ndna)i\t%(nrna)i\t%(overlap)i\n" % locals())
outf.close()
#########################################
#########################################
#########################################
@merge([buildCommonGeneList, buildDiffOverlap],
"diff.dir/genes_overlap.sig")
def testSignificanceOfGenesOverlap(infiles, outfile):
'''
test significance of overlapping diff gene
lists bewteen RNA and DNA using hypergeometric
test
'''
common, overlap = infiles
R('''pop <- read.csv("%s", header = F, sep = "\t", stringsAsFactors = F)''' % common)
R('''overlaps <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % overlap)
# total genes in population
R('''npop <- nrow(pop)''')
# x = number of white balls picked = overlap
R('''x <- overlaps$noverlap''')
# m = total number of white balls = total diff in RNA analysis
R('''m <- overlaps$nRNA''')
# n = total number of black balls = total - diff in RNA analysis
R('''n <- npop - m''')
# k = total balls sampled = number of genera different in DNA analysis
R('''k <- overlaps$nDNA''')
# hypergeometric test
R('''p <- 1-phyper(x,m,n,k)''')
# write result
R('''res <- matrix(ncol = 2, nrow = 5)''')
R('''res[1,1] <- "x"''')
R('''res[2,1] <- "m"''')
R('''res[3,1] <- "n"''')
R('''res[4,1] <- "k"''')
R('''res[5,1] <- "p-value"''')
R('''res[1,2] <- x''')
R('''res[2,2] <- m''')
R('''res[3,2] <- n''')
R('''res[4,2] <- k''')
R('''res[5,2] <- p''')
R('''write.table(as.data.frame(res), file = "%s", quote = F, sep = "\t", row.names = F)''' % outfile)
#########################################
#########################################
#########################################
@merge([os.path.join(PARAMS.get("dna_communitiesdir"), "counts.dir/genus.diamond.aggregated.counts.diff.tsv"),
os.path.join(PARAMS.get("rna_communitiesdir"), "counts.dir/genus.diamond.aggregated.counts.diff.tsv")],
"diff.dir/genus_overlap.sig")
def testSignificanceOfDiffGenusOverlap(infiles, outfile):
'''
use hypergeometric test to determine significance
of overlap between differential genera in DNA and
RNA analyses
'''
dna, rna = infiles
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
R('''dna <- dna[dna$group1 == "HhaIL10R" & dna$group2 == "WT",]''')
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
R('''rna <- rna[rna$group1 == "HhaIL10R" & rna$group2 == "WT",]''')
# must use commonly detected
R('''common <- intersect(rna$taxa, dna$taxa)''')
R('''dna <- dna[dna$taxa %in% common,]''')
R('''rna <- rna[rna$taxa %in% common,]''')
# total population = common genera
R('''npop <- length(common)''')
# x = number of white balls picked = overlap
R('''x <- length(intersect(dna$taxa[dna$P.Value < 0.05], rna$taxa[rna$P.Value < 0.05]))''')
# m = total number of white balls = total diff in RNA analysis
R('''m <- length(rna$taxa[rna$P.Value < 0.05])''')
# n = total number of black balls = total - diff in RNA analysis
R('''n <- npop - m''')
# k = total balls sampled = number of genera different in DNA analysis
R('''k <- length(dna$taxa[dna$P.Value < 0.05])''')
# hypergeometric test
R('''p <- 1-phyper(x,m,n,k)''')
# write result
R('''res <- matrix(ncol = 2, nrow = 5)''')
R('''res[1,1] <- "x"''')
R('''res[2,1] <- "m"''')
R('''res[3,1] <- "n"''')
R('''res[4,1] <- "k"''')
R('''res[5,1] <- "p-value"''')
R('''res[1,2] <- x''')
R('''res[2,2] <- m''')
R('''res[3,2] <- n''')
R('''res[4,2] <- k''')
R('''res[5,2] <- p''')
R('''write.table(as.data.frame(res), file = "%s", quote = F, sep = "\t", row.names = F)''' % outfile)
#########################################
#########################################
#########################################
@follows(buildGenusDiffList, buildGeneDiffList, buildDiffOverlap)
def diff_lists():
pass
#########################################
#########################################
#########################################
@jobs_limit(1, "R")
@transform(buildGeneDiffList,
suffix(".tsv"),
add_inputs([os.path.join(PARAMS.get("dna_communitiesdir"), "genes.dir/gene_counts.norm.matrix"),
os.path.join(PARAMS.get("rna_communitiesdir"), "genes.dir/gene_counts.norm.matrix")]),
".png")
def heatMapDiffGenes(infiles, outfile):
'''
heatmap differences between WT and HhaIL10R groups
'''
# we do this for the different combinations to see how the
# DNA and RNA differences compare
R('''diff <- read.csv("%s", header = F, sep = "\t", stringsAsFactors = F)''' % infiles[0])
R('''library(gplots)''')
R('''library(gtools)''')
for mat in infiles[1]:
if "RNA" in mat:
outname = os.path.dirname(outfile) + "/RNA_" + os.path.basename(outfile)
elif "DNA" in mat:
outname = os.path.dirname(outfile) + "/DNA_" + os.path.basename(outfile)
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % mat)
R('''rownames(dat) <- dat$taxa''')
R('''dat <- dat[, 1:ncol(dat)-1]''')
R('''dat <- dat[diff[,1],]''')
R('''dat <- na.omit(dat)''')
R('''colnames(dat) <- unlist(strsplit(colnames(dat), ".diamond_count"))''')
R('''dat <- dat[, mixedsort(colnames(dat))]''')
R('''samples <- colnames(dat)''')
R('''dat <- t(apply(dat, 1, scale))''')
R('''colnames(dat) <- samples''')
R('''cols <- colorRampPalette(c("blue", "white", "red"))''')
R('''png("%s")''' % outname)
R('''heatmap.2(as.matrix(dat), col = cols, scale = "row", trace | |
label_lens = batch['seq_lens'], batch['target_lens']
seq_lens = seq_lens.to(self.device, non_blocking=True)
label_lens = label_lens.to(self.device, non_blocking=True)
target = (target, label_lens)
o = self.model.nn(input, seq_lens)
else:
o = self.model.nn(input)
self.optimizer.zero_grad()
loss = self.loss_fn(self.model.criterion, o, target)
if not torch.isinf(loss):
loss.backward()
self.optimizer.step()
else:
logger.debug('infinite loss in trial')
if self.lr_scheduler:
self.lr_scheduler.batch_step(loss=loss)
iteration_callback()
# prevent memory leak
del loss, o
self.iterations += self.event_it
logger.debug('Starting evaluation run')
eval_res = self.evaluator(self.model, self.val_set, self.device)
if self.lr_scheduler:
self.lr_scheduler.epoch_step(val_loss=eval_res['val_metric'])
self.stopper.update(eval_res['val_metric'])
self.model.user_metadata['accuracy'].append((self.iterations, float(eval_res['val_metric'])))
logger.info('Saving to {}_{}'.format(self.filename_prefix, self.stopper.epoch))
# fill one_channel_mode after 1 iteration over training data set
im_mode = self.train_set.dataset.im_mode
if im_mode in ['1', 'L']:
self.model.one_channel_mode = im_mode
try:
self.model.hyper_params['completed_epochs'] = self.stopper.epoch
self.model.save_model('{}_{}.mlmodel'.format(self.filename_prefix, self.stopper.epoch))
except Exception as e:
logger.error('Saving model failed: {}'.format(str(e)))
event_callback(epoch=self.stopper.epoch, **eval_res)
@classmethod
def load_model(cls, model_path: str,
load_hyper_parameters: Optional[bool] = False,
message: Callable[[str], None] = lambda *args, **kwargs: None):
logger.info(f'Loading existing model from {model_path} ')
message(f'Loading existing model from {model_path} ', nl=False)
nn = vgsl.TorchVGSLModel.load_model(model_path)
if load_hyper_parameters:
hyper_params = nn.hyper_params
else:
hyper_params = {}
message('\u2713', fg='green', nl=False)
return nn, hyper_params
@classmethod
def recognition_train_gen(cls,
hyper_params: Dict = None,
progress_callback: Callable[[str, int], Callable[[None], None]] = lambda string, length: lambda: None,
message: Callable[[str], None] = lambda *args, **kwargs: None,
output: str = 'model',
spec: str = default_specs.RECOGNITION_SPEC,
append: Optional[int] = None,
load: Optional[str] = None,
device: str = 'cpu',
reorder: bool = True,
training_data: Sequence[Dict] = None,
evaluation_data: Sequence[Dict] = None,
preload: Optional[bool] = None,
threads: int = 1,
load_hyper_parameters: bool = False,
repolygonize: bool = False,
force_binarization: bool = False,
format_type: str = 'path',
codec: Optional[Dict] = None,
resize: str = 'fail',
augment: bool = False):
"""
This is an ugly constructor that takes all the arguments from the command
line driver, finagles the datasets, models, and hyperparameters correctly
and returns a KrakenTrainer object.
Setup parameters (load, training_data, evaluation_data, ....) are named,
model hyperparameters (everything in
kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS) are in in the
`hyper_params` argument.
Args:
hyper_params (dict): Hyperparameter dictionary containing all fields
from
kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS
progress_callback (Callable): Callback for progress reports on various
computationally expensive processes. A
human readable string and the process
length is supplied. The callback has to
return another function which will be
executed after each step.
message (Callable): Messaging printing method for above log but below
warning level output, i.e. infos that should
generally be shown to users.
**kwargs: Setup parameters, i.e. CLI parameters of the train() command.
Returns:
A KrakenTrainer object.
"""
hyper_params_ = default_specs.RECOGNITION_HYPER_PARAMS
# load model if given. if a new model has to be created we need to do that
# after data set initialization, otherwise to output size is still unknown.
if load:
nn, hp = cls.load_model(load,
load_hyper_parameters=load_hyper_parameters,
message=message)
hyper_params_.update(hp)
else:
nn = None
if hyper_params:
hyper_params_.update(hyper_params)
validate_hyper_parameters(hyper_params_)
hyper_params = hyper_params_
DatasetClass = GroundTruthDataset
valid_norm = True
if format_type and format_type != 'path':
logger.info(f'Parsing {len(training_data)} XML files for training data')
if repolygonize:
message('Repolygonizing data')
training_data = preparse_xml_data(training_data, format_type, repolygonize)
evaluation_data = preparse_xml_data(evaluation_data, format_type, repolygonize)
DatasetClass = PolygonGTDataset
valid_norm = False
elif format_type == 'path':
if force_binarization:
logger.warning('Forced binarization enabled in `path` mode. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled in `path` mode. Will be ignored.')
training_data = [{'image': im} for im in training_data]
if evaluation_data:
evaluation_data = [{'image': im} for im in evaluation_data]
valid_norm = True
# format_type is None. Determine training type from length of training data entry
else:
if len(training_data[0]) >= 4:
DatasetClass = PolygonGTDataset
valid_norm = False
else:
if force_binarization:
logger.warning('Forced binarization enabled with box lines. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled with box lines. Will be ignored.')
# preparse input sizes from vgsl string to seed ground truth data set
# sizes and dimension ordering.
if not nn:
spec = spec.strip()
if spec[0] != '[' or spec[-1] != ']':
raise KrakenInputException('VGSL spec {} not bracketed'.format(spec))
blocks = spec[1:-1].split(' ')
m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0])
if not m:
raise KrakenInputException(f'Invalid input spec {blocks[0]}')
batch, height, width, channels = [int(x) for x in m.groups()]
else:
batch, channels, height, width = nn.input
transforms = generate_input_transforms(batch, height, width, channels, hyper_params['pad'], valid_norm, force_binarization)
if len(training_data) > 2500 and not preload:
logger.info('Disabling preloading for large (>2500) training data set. Enable by setting --preload parameter')
preload = False
# implicit preloading enabled for small data sets
if preload is None:
preload = True
# set multiprocessing tensor sharing strategy
if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():
logger.debug('Setting multiprocessing tensor sharing strategy to file_system')
torch.multiprocessing.set_sharing_strategy('file_system')
gt_set = DatasetClass(normalization=hyper_params['normalization'],
whitespace_normalization=hyper_params['normalize_whitespace'],
reorder=reorder,
im_transforms=transforms,
preload=preload,
augmentation=hyper_params['augment'])
bar = progress_callback('Building training set', len(training_data))
if threads:
with Pool(processes=threads) as pool:
for im in pool.imap_unordered(partial(_star_fun, gt_set.parse), training_data, 5):
logger.debug(f'Adding line {im} to training set')
if im:
gt_set.add(**im)
bar()
else:
for im in training_data:
gt_set.add(**im)
bar()
val_set = DatasetClass(normalization=hyper_params['normalization'],
whitespace_normalization=hyper_params['normalize_whitespace'],
reorder=reorder,
im_transforms=transforms,
preload=preload)
bar = progress_callback('Building validation set', len(evaluation_data))
if threads:
with Pool(processes=threads) as pool:
for im in pool.imap_unordered(partial(_star_fun, val_set.parse), evaluation_data, 5):
logger.debug(f'Adding line {im} to validation set')
if im:
val_set.add(**im)
bar()
else:
for im in evaluation_data:
gt_set.add(**im)
bar()
if len(gt_set._images) == 0:
logger.error('No valid training data was provided to the train command. Please add valid XML or line data.')
return None
logger.info(f'Training set {len(gt_set._images)} lines, validation set {len(val_set._images)} lines, alphabet {len(gt_set.alphabet)} symbols')
alpha_diff_only_train = set(gt_set.alphabet).difference(set(val_set.alphabet))
alpha_diff_only_val = set(val_set.alphabet).difference(set(gt_set.alphabet))
if alpha_diff_only_train:
logger.warning(f'alphabet mismatch: chars in training set only: {alpha_diff_only_train} (not included in accuracy test during training)')
if alpha_diff_only_val:
logger.warning(f'alphabet mismatch: chars in validation set only: {alpha_diff_only_val} (not trained)')
logger.info('grapheme\tcount')
for k, v in sorted(gt_set.alphabet.items(), key=lambda x: x[1], reverse=True):
char = make_printable(k)
if char == k:
char = '\t' + char
logger.info(f'{char}\t{v}')
logger.debug('Encoding training set')
# use model codec when given
if append:
# is already loaded
nn = cast(vgsl.TorchVGSLModel, nn)
gt_set.encode(codec)
message('Slicing and dicing model ', nl=False)
# now we can create a new model
spec = '[{} O1c{}]'.format(spec[1:-1], gt_set.codec.max_label()+1)
logger.info(f'Appending {spec} to existing model {nn.spec} after {append}')
nn.append(append, spec)
nn.add_codec(gt_set.codec)
message('\u2713', fg='green')
logger.info(f'Assembled model spec: {nn.spec}')
elif load:
# is already loaded
nn = cast(vgsl.TorchVGSLModel, nn)
# prefer explicitly given codec over network codec if mode is 'both'
codec = codec if (codec and resize == 'both') else nn.codec
try:
gt_set.encode(codec)
except KrakenEncodeException:
message('Network codec not compatible with training set')
alpha_diff = set(gt_set.alphabet).difference(set(codec.c2l.keys()))
if resize == 'fail':
logger.error(f'Training data and model codec alphabets mismatch: {alpha_diff}')
return None
elif resize == 'add':
message('Adding missing labels to network ', nl=False)
logger.info(f'Resizing codec to include {len(alpha_diff)} new code points')
codec = codec.add_labels(alpha_diff)
nn.add_codec(codec)
logger.info(f'Resizing last layer in network to {codec.max_label()+1} outputs')
nn.resize_output(codec.max_label()+1)
gt_set.encode(nn.codec)
message('\u2713', fg='green')
elif resize == 'both':
message('Fitting network exactly to training set ', nl=False)
logger.info(f'Resizing network or given codec to {gt_set.alphabet} code sequences')
gt_set.encode(None)
ncodec, del_labels = codec.merge(gt_set.codec)
logger.info(f'Deleting {len(del_labels)} output classes from network ({len(codec)-len(del_labels)} retained)')
gt_set.encode(ncodec)
nn.resize_output(ncodec.max_label()+1, del_labels)
message('\u2713', fg='green')
else:
logger.error(f'invalid resize parameter value {resize}')
return None
else:
gt_set.encode(codec)
logger.info(f'Creating new model {spec} with {gt_set.codec.max_label()+1} outputs')
spec = '[{} O1c{}]'.format(spec[1:-1], gt_set.codec.max_label()+1)
nn = vgsl.TorchVGSLModel(spec)
# initialize weights
message('Initializing model ', nl=False)
nn.init_weights()
nn.add_codec(gt_set.codec)
# initialize codec
message('\u2713', fg='green')
if nn.one_channel_mode and gt_set.im_mode != nn.one_channel_mode:
logger.warning(f'Neural network has been trained on mode {nn.one_channel_mode} images, training set contains mode {gt_set.im_mode} data. Consider setting `force_binarization`')
if format_type != 'path' and nn.seg_type == 'bbox':
logger.warning('Neural network has been trained on bounding box image information but training set is polygonal.')
# half the number of data loading processes if device isn't cuda and we haven't enabled preloading
if device == 'cpu' and not preload:
loader_threads = threads // 2
else:
loader_threads = threads
train_loader = InfiniteDataLoader(gt_set, batch_size=hyper_params['batch_size'],
shuffle=True,
num_workers=loader_threads,
pin_memory=True,
collate_fn=collate_sequences)
threads = max(threads - loader_threads, 1)
# don't encode validation set as the alphabets may not match causing encoding failures
val_set.no_encode()
val_loader = DataLoader(val_set,
batch_size=hyper_params['batch_size'],
num_workers=loader_threads,
pin_memory=True,
collate_fn=collate_sequences)
logger.debug('Constructing {} optimizer (lr: {}, momentum: {})'.format(hyper_params['optimizer'], hyper_params['lrate'], hyper_params['momentum']))
# updates model's hyper params with users defined
nn.hyper_params = hyper_params
# set model type metadata field
nn.model_type = 'recognition'
# set mode to trainindg
nn.train()
# set number of OpenMP threads
logger.debug(f'Set OpenMP threads to {threads}')
nn.set_num_threads(threads)
if hyper_params['optimizer'] | |
\b
{
"traintuples": list[{
"algo_key": str,
"data_manager_key": str,
"train_data_sample_keys": list[str],
"traintuple_id": str,
"in_models_ids": list[str],
"tag": str,
"metadata": dict
}],
"composite_traintuples": list[{
"composite_traintuple_id": str,
"algo_key": str,
"data_manager_key": str,
"train_data_sample_keys": list[str],
"in_head_model_id": str,
"in_trunk_model_id": str,
"out_trunk_model_permissions": {
"authorized_ids": list[str],
},
"tag": str,
"metadata": dict
}]
"aggregatetuples": list[{
"aggregatetuple_id": str,
"algo_key": str,
"worker": str,
"in_models_ids": list[str],
"tag": str,
"metadata": dict
}],
"testtuples": list[{
"objective_key": str,
"data_manager_key": str,
"test_data_sample_keys": list[str],
"traintuple_id": str,
"tag": str,
"metadata": dict
}],
"clean_models": bool,
"tag": str,
"metadata": dict
}
Disable the auto batching to upload all the tuples of the
compute plan at once.
If the auto batching is enabled, change the `batch_size` to define the number of
tuples uploaded in each batch (default 20).
"""
if no_auto_batching and batch_size:
raise click.BadOptionUsage('--batch_size',
"The --batch_size option cannot be used when using "
"--no_auto_batching.")
client = get_client(ctx.obj)
res = client.add_compute_plan(data, auto_batching=not no_auto_batching, batch_size=batch_size)
printer = printers.get_asset_printer(assets.COMPUTE_PLAN, ctx.obj.output_format)
printer.print(res, is_list=False)
@add.command('aggregate_algo')
@click.argument('data', type=click.Path(exists=True, dir_okay=False), callback=load_json_from_path,
metavar="PATH")
@click_global_conf_with_output_format
@click_global_conf_retry_timeout
@click.pass_context
@error_printer
def add_aggregate_algo(ctx, data):
"""Add aggregate algo.
The path must point to a valid JSON file with the following schema:
\b
{
"name": str,
"description": path,
"file": path,
"permissions": {
"public": bool,
"authorized_ids": list[str],
},
"metadata": dict
}
\b
Where:
- name: name of the algorithm
- description: path to a markdown file describing the algo
- file: path to tar.gz or zip archive containing the algorithm python
script and its Dockerfile
- permissions: define asset access permissions
"""
client = get_client(ctx.obj)
key = client.add_aggregate_algo(data)
res = ctx.obj.retry(client.get_aggregate_algo)(key)
printer = printers.get_asset_printer(assets.AGGREGATE_ALGO, ctx.obj.output_format)
printer.print(res, is_list=False)
@add.command('composite_algo')
@click.argument('data', type=click.Path(exists=True, dir_okay=False), callback=load_json_from_path,
metavar="PATH")
@click_global_conf_with_output_format
@click_global_conf_retry_timeout
@click.pass_context
@error_printer
def add_composite_algo(ctx, data):
"""Add composite algo.
The path must point to a valid JSON file with the following schema:
\b
{
"name": str,
"description": path,
"file": path,
"permissions": {
"public": bool,
"authorized_ids": list[str],
},
"metadata": dict
}
\b
Where:
- name: name of the algorithm
- description: path to a markdown file describing the algo
- file: path to tar.gz or zip archive containing the algorithm python
script and its Dockerfile
- permissions: define asset access permissions
"""
client = get_client(ctx.obj)
key = client.add_composite_algo(data)
res = ctx.obj.retry(client.get_composite_algo)(key)
printer = printers.get_asset_printer(assets.COMPOSITE_ALGO, ctx.obj.output_format)
printer.print(res, is_list=False)
@add.command('traintuple')
@click.option('--algo-key', required=True)
@click.option('--dataset-key', required=True)
@click.option('--data-samples-path', 'data_samples', required=True,
type=click.Path(exists=True, resolve_path=True, dir_okay=False),
callback=load_json_from_path)
@click.option('--in-model-key', 'in_models_keys', type=click.STRING, multiple=True,
help='In model traintuple key.')
@click.option('--tag')
@click_global_conf_with_output_format
@click_global_conf_retry_timeout
@click_option_metadata
@click.pass_context
@error_printer
def add_traintuple(ctx, algo_key, dataset_key, data_samples, in_models_keys, tag, metadata):
"""Add traintuple.
The option --data-samples-path must point to a valid JSON file with the
following schema:
\b
{
"keys": list[str],
}
\b
Where:
- keys: list of data sample keys
"""
client = get_client(ctx.obj)
data = {
'algo_key': algo_key,
'data_manager_key': dataset_key,
}
if data_samples:
data['train_data_sample_keys'] = load_data_samples_keys(data_samples)
if tag:
data['tag'] = tag
if metadata:
data['metadata'] = metadata
if in_models_keys:
data['in_models_keys'] = in_models_keys
key = client.add_traintuple(data)
res = ctx.obj.retry(client.get_traintuple)(key)
printer = printers.get_asset_printer(assets.TRAINTUPLE, ctx.obj.output_format)
printer.print(res, is_list=False)
@add.command('aggregatetuple')
@click.option('--algo-key', required=True, help="Aggregate algo key.")
@click.option('--in-model-key', 'in_models_keys', type=click.STRING, multiple=True,
help='In model traintuple key.')
@click.option('--worker', required=True, help='Node ID for worker execution.')
@click.option('--rank', type=click.INT)
@click.option('--tag')
@click_global_conf_with_output_format
@click_global_conf_retry_timeout
@click_option_metadata
@click.pass_context
@error_printer
def add_aggregatetuple(ctx, algo_key, in_models_keys, worker, rank, tag, metadata):
"""Add aggregatetuple."""
client = get_client(ctx.obj)
data = {
'algo_key': algo_key,
'worker': worker,
}
if in_models_keys:
data['in_models_keys'] = in_models_keys
if rank is not None:
data['rank'] = rank
if tag:
data['tag'] = tag
if metadata:
data['metadata'] = metadata
key = client.add_aggregatetuple(data)
res = ctx.obj.retry(client.get_aggregatetuple)(key)
printer = printers.get_asset_printer(assets.AGGREGATETUPLE, ctx.obj.output_format)
printer.print(res, is_list=False)
@add.command('composite_traintuple')
@click.option('--algo-key', required=True)
@click.option('--dataset-key', required=True)
@click.option('--data-samples-path', 'data_samples', required=True,
type=click.Path(exists=True, resolve_path=True, dir_okay=False),
callback=load_json_from_path)
@click.option('--head-model-key',
help='Must be used with --trunk-model-key option.')
@click.option('--trunk-model-key',
help='Must be used with --head-model-key option.')
@click.option('--out-trunk-model-permissions-path', 'out_trunk_model_permissions',
type=click.Path(exists=True, resolve_path=True, dir_okay=False),
callback=load_json_from_path,
help='Load a permissions file.')
@click.option('--tag')
@click_global_conf_with_output_format
@click_global_conf_retry_timeout
@click_option_metadata
@click.pass_context
@error_printer
def add_composite_traintuple(ctx, algo_key, dataset_key, data_samples, head_model_key,
trunk_model_key, out_trunk_model_permissions, tag, metadata):
"""Add composite traintuple.
The option --data-samples-path must point to a valid JSON file with the
following schema:
\b
{
"keys": list[str],
}
\b
Where:
- keys: list of data sample keys
The option --out-trunk-model-permissions-path must point to a valid JSON file with the
following schema:
\b
{
"authorized_ids": list[str],
}
"""
if head_model_key and not trunk_model_key:
raise click.BadOptionUsage('--trunk-model-key',
"The --trunk-model-key option is required when using "
"--head-model-key.")
if trunk_model_key and not head_model_key:
raise click.BadOptionUsage('--head-model-key',
"The --head-model-key option is required when using "
"--trunk-model-key.")
client = get_client(ctx.obj)
data = {
'algo_key': algo_key,
'data_manager_key': dataset_key,
'in_head_model_key': head_model_key,
'in_trunk_model_key': trunk_model_key,
}
if data_samples:
data['train_data_sample_keys'] = load_data_samples_keys(data_samples)
if out_trunk_model_permissions:
data['out_trunk_model_permissions'] = out_trunk_model_permissions
if tag:
data['tag'] = tag
if metadata:
data['metadata'] = metadata
key = client.add_composite_traintuple(data)
res = ctx.obj.retry(client.get_composite_traintuple)(key)
printer = printers.get_asset_printer(assets.COMPOSITE_TRAINTUPLE, ctx.obj.output_format)
printer.print(res, is_list=False)
@add.command('testtuple')
@click.option('--objective-key', required=True)
@click.option('--dataset-key')
@click.option('--traintuple-key', required=True)
@click.option('--data-samples-path', 'data_samples',
type=click.Path(exists=True, resolve_path=True, dir_okay=False),
callback=load_json_from_path)
@click.option('--tag')
@click_global_conf_with_output_format
@click_global_conf_retry_timeout
@click_option_metadata
@click.pass_context
@error_printer
def add_testtuple(ctx, objective_key, dataset_key, traintuple_key, data_samples, tag, metadata):
"""Add testtuple.
The option --data-samples-path must point to a valid JSON file with the
following schema:
\b
{
"keys": list[str],
}
\b
Where:
- keys: list of data sample keys
"""
client = get_client(ctx.obj)
data = {
'objective_key': objective_key,
'data_manager_key': dataset_key,
'traintuple_key': traintuple_key,
}
if data_samples:
data['test_data_sample_keys'] = load_data_samples_keys(data_samples)
if tag:
data['tag'] = tag
if metadata:
data['metadata'] = metadata
key = client.add_testtuple(data)
res = ctx.obj.retry(client.get_testtuple)(key)
printer = printers.get_asset_printer(assets.TESTTUPLE, ctx.obj.output_format)
printer.print(res, is_list=False)
@cli.command()
@click.argument('asset-name', type=click.Choice([
assets.ALGO,
assets.COMPUTE_PLAN,
assets.COMPOSITE_ALGO,
assets.AGGREGATE_ALGO,
assets.DATASET,
assets.OBJECTIVE,
assets.TESTTUPLE,
assets.TRAINTUPLE,
assets.COMPOSITE_TRAINTUPLE,
assets.AGGREGATETUPLE,
]))
@click.argument('asset-key')
@click_option_expand
@click_global_conf_with_output_format
@click.pass_context
@error_printer
def get(ctx, expand, asset_name, asset_key):
"""Get asset definition."""
expand_valid_assets = (assets.DATASET, assets.TRAINTUPLE, assets.OBJECTIVE, assets.TESTTUPLE,
assets.COMPOSITE_TRAINTUPLE, assets.AGGREGATETUPLE, assets.COMPUTE_PLAN)
if expand and asset_name not in expand_valid_assets: # fail fast
raise click.UsageError(
f'--expand option is available with assets {expand_valid_assets}')
client = get_client(ctx.obj)
# method must exist in sdk
method = getattr(client, f'get_{asset_name.lower()}')
res = method(asset_key)
printer = printers.get_asset_printer(asset_name, ctx.obj.output_format)
printer.print(res, profile=ctx.obj.profile, expand=expand)
@cli.command('list')
@click.argument('asset-name', type=click.Choice([
assets.ALGO,
assets.COMPUTE_PLAN,
assets.COMPOSITE_ALGO,
assets.AGGREGATE_ALGO,
assets.DATA_SAMPLE,
assets.DATASET,
assets.OBJECTIVE,
assets.TESTTUPLE,
assets.TRAINTUPLE,
assets.COMPOSITE_TRAINTUPLE,
assets.AGGREGATETUPLE,
assets.NODE,
]))
@click.option('-f', '--filter', 'filters',
help='Only display assets that exactly match this filter. Valid syntax is: '
'<asset>:<property>:<value>',
multiple=True)
@click.option('--and', 'filters_logical_clause',
help='Combine filters using logical ANDs',
flag_value='and',
default=True)
@click.option('--or', 'filters_logical_clause',
help='Combine filters using logical ORs',
flag_value='or')
@click.option('--advanced-filters',
callback=validate_json,
help='Filter results using a complex search (must be a JSON array of valid filters). '
'Incompatible with the --filter option')
@click_global_conf_with_output_format
@click.pass_context
@error_printer
def list_(ctx, asset_name, filters, filters_logical_clause, advanced_filters):
"""List assets."""
client = get_client(ctx.obj)
# method must exist in sdk
method = getattr(client, f'list_{asset_name.lower()}')
# handle filters
if advanced_filters and filters:
raise click.UsageError('The --filter and --advanced-filters options are mutually exclusive')
elif filters:
filters = list(filters)
if filters_logical_clause == 'or':
# insert 'OR' between each filter
n = len(filters)
for i in range(n - 1):
filters.insert(i + 1, 'OR')
elif advanced_filters:
filters = advanced_filters
res = method(filters)
printer = printers.get_asset_printer(asset_name, ctx.obj.output_format)
dict_res = [result.dict(exclude_none=False, by_alias=True) for result in res]
printer.print(dict_res, is_list=True)
@cli.command()
@click.argument('asset-name', type=click.Choice([
assets.ALGO,
assets.COMPOSITE_ALGO,
assets.AGGREGATE_ALGO,
assets.DATASET,
assets.OBJECTIVE,
]))
@click.argument('asset-key')
@click_global_conf
@click.pass_context
@error_printer
def describe(ctx, asset_name, asset_key):
"""Display asset description."""
client = get_client(ctx.obj)
# method must exist in sdk
method = getattr(client, f'describe_{asset_name.lower()}')
description = method(asset_key)
renderer = consolemd.Renderer()
renderer.render(description)
@cli.command()
@click.argument('asset-name', type=click.Choice([
assets.ALGO,
assets.COMPOSITE_ALGO,
assets.AGGREGATE_ALGO,
assets.DATASET,
assets.OBJECTIVE,
]))
@click.argument('key')
@click.option('--folder', type=click.Path(), help='destination folder',
default='.')
@click_global_conf
@click.pass_context
@error_printer
def download(ctx, asset_name, key, folder):
"""Download asset implementation.
\b
- algo: the algo and its dependencies
- dataset: the opener script
- objective: the metrics and its dependencies
"""
client = get_client(ctx.obj)
# method must exist in sdk
method = getattr(client, f'download_{asset_name.lower()}')
res = method(key, folder)
display(res)
@cli.command()
@click.argument('objective_key')
@click_option_expand
@click.option('--sort',
type=click.Choice(['asc', 'desc']),
default='desc',
show_default=True,
help='Sort models by highest to lowest perf or vice versa')
@click_global_conf_with_output_format
@click.pass_context
@error_printer
def leaderboard(ctx, expand, objective_key, sort):
"""Display objective leaderboard"""
client = get_client(ctx.obj)
board = client.leaderboard(objective_key, sort=sort)
printer = printers.get_leaderboard_printer(ctx.obj.output_format)
printer.print(board, expand=expand)
@cli.group()
@click.pass_context
def cancel(ctx):
"""Cancel execution of an asset."""
pass
@cancel.command('compute_plan')
@click.argument('compute_plan_key', type=click.STRING)
@click_global_conf_with_output_format
@click.pass_context
def cancel_compute_plan(ctx, compute_plan_key):
"""Cancel execution of a compute plan."""
client = get_client(ctx.obj)
# method must exist in sdk
res = client.cancel_compute_plan(compute_plan_key)
printer = printers.get_asset_printer(assets.COMPUTE_PLAN, ctx.obj.output_format)
printer.print(res, profile=ctx.obj.profile)
@cli.group()
@click.pass_context
def update(ctx):
"""Update asset."""
pass
@update.command('data_sample')
@click.argument('data_samples', type=click.Path(exists=True, dir_okay=False),
callback=load_json_from_path, metavar="DATA_SAMPLES_PATH")
@click.option('--dataset-key', required=True)
@click_global_conf
@click.pass_context
@error_printer
def update_data_sample(ctx, data_samples, dataset_key):
"""Link data samples with dataset.
The data samples path must point to a valid JSON file with the following
schema:
\b
{
"keys": list[str],
}
\b
Where:
- keys: list of data sample keys
"""
client = get_client(ctx.obj)
res = client.link_dataset_with_data_samples(
dataset_key, load_data_samples_keys(data_samples, option="DATA_SAMPLES_PATH"))
display(res)
@update.command('dataset')
@click.argument('dataset-key')
@click.argument('objective-key')
@click_global_conf
@click.pass_context
@error_printer
def update_dataset(ctx, dataset_key, objective_key):
"""Link dataset with objective."""
client = get_client(ctx.obj)
res = client.link_dataset_with_objective(dataset_key, objective_key)
display(res)
@update.command('compute_plan')
@click.argument('compute_plan_key', type=click.STRING)
@click.argument('tuples', type=click.Path(exists=True, dir_okay=False),
callback=load_json_from_path, metavar="TUPLES_PATH")
@click.option('--no-auto-batching', '-n', is_flag=True,
help='Disable the auto batching feature')
@click.option('--batch-size', '-b', type=int,
help='Batch size for the auto batching',
default=DEFAULT_BATCH_SIZE, show_default=True)
@click_global_conf_with_output_format
@click.pass_context
@error_printer
def update_compute_plan(ctx, compute_plan_key, tuples, no_auto_batching, batch_size):
"""Update compute plan.
The tuples path must point to a valid JSON file with the following schema:
\b
{
"traintuples": list[{
"algo_key": str,
"data_manager_key": str,
"train_data_sample_keys": list[str],
"traintuple_id": str,
"in_models_ids": list[str],
"tag": str,
"metadata": dict,
}],
"composite_traintuples": list[{
"composite_traintuple_id": str,
"algo_key": str,
"data_manager_key": str,
| |
[int(v**meshRefParam) for v in [nCellsOuter, nCellsNose, nCellsSpan]],
[expOuter, expOuter, expOuter, expOuter,
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expNoseOffset, 1/expNoseOffset),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expNoseOuter, 1/expNoseOuter),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expNoseOuter, 1/expNoseOuter),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expNoseOffset, 1/expNoseOffset),
1, 1, 1, 1],
grading="edgeGrading", ret=True),
# lower surface domain
OFiO.writeBlock(blockMeshToolbox.make3dBlock([6,8,11,7], nVert2D),
[int(v**meshRefParam) for v in [nCellsOuter, nCellsLower, nCellsSpan]],
[expOuter, expOuter, expOuter, expOuter,
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expLongLowerOffset, 1/expLongLowerOffset),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expLongLowerOuter, 1/expLongLowerOuter),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expLongLowerOuter, 1/expLongLowerOuter),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expLongLowerOffset, 1/expLongLowerOffset),
1, 1, 1, 1],
grading="edgeGrading", ret=True),
# trailing edge domain
OFiO.writeBlock(blockMeshToolbox.make3dBlock([7,11,10,5], nVert2D),
[int(v**meshRefParam) for v in [nCellsOuter, nCellsTe, nCellsSpan]],
[expOuter, expOuter, expOuter, expOuter,
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expTeOffset, 1/expTeOffset),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expTeOuter, 1/expTeOuter),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expTeOuter, 1/expTeOuter),
"((0.5 0.5 {}) (0.5 0.5 {}))".format(expTeOffset, 1/expTeOffset),
1, 1, 1, 1],
grading="edgeGrading", ret=True),
]
# ===
# define edges in 2D
# discard 0th and -1st points to make interpolation work a bit better
# TODO does it really help?
edges = {
"upperSurface": (np.vstack([xUpper[1:-1], yUpper[1:-1]]), 0, 1),
"upperOffsetSurface": (np.vstack([xUpperOffset[1:-1], yUpperOffset[1:-1]]), 4, 5),
"lowerSurface": (np.vstack([xLower[1:-1], yLower[1:-1]]), 2, 3,),
"lowerOffsetSurface": (np.vstack([xLowerOffset[1:-1], yLowerOffset[1:-1]]), 6, 7),
"noseSurface": (np.vstack([xNose[1:-1], yNose[1:-1]]), 0, 2),
"noseOffsetSurface": (np.vstack([xNoseOffset[1:-1], yNoseOffset[1:-1]]), 4, 6),
"teOffsetArc": (np.array([xTeOffset, yTeOffset]), 7, 5),
"upperDomainArc": (np.array([0, Rdomain]), 10, 9),
"fwdDomainArc": (np.array([-Rdomain+x0Circle, 0]), 9, 8),
"lowerDomainArc": (np.array([0, -Rdomain]), 8, 11),
"aftDomainArc": (np.array([Rdomain+x0Circle, 0]), 11, 10),
# "upperSurface": (np.vstack([xUpper, yUpper]), 0, 1),
# "upperOffsetSurface": (np.vstack([xUpperOffset, yUpperOffset]), 4, 5),
# "lowerSurface": (np.vstack([xLower, yLower]), 2, 3,),
# "lowerOffsetSurface": (np.vstack([xLowerOffset, yLowerOffset]), 6, 7),
# "noseSurface": (np.vstack([xNose, yNose]), 0, 2),
# "noseOffsetSurface": (np.vstack([xNoseOffset, yNoseOffset]), 4, 6),
# "teOffsetArc": (np.array([xTeOffset, yTeOffset]), 7, 5),
# "upperDomainArc": (np.array([0, Rdomain]), 10, 9),
# "fwdDomainArc": (np.array([-Rdomain+x0Circle, 0]), 9, 8),
# "lowerDomainArc": (np.array([0, -Rdomain]), 8, 11),
# "aftDomainArc": (np.array([Rdomain+x0Circle, 0]), 11, 10),
}
# ===
# assemble the dictionary
s = ""
s += OFiO.writeHeader(True)
s += OFiO.writeFoamFileLabel(name="blockMeshDict", ret=True)
# TODO could automate the front-back bollocks by defining the edges in 2D
# ---
# write edge interpolation points
for e in edges:
if "Surface" in e:
s += OFiO.writePointsList("{}Front".format(e),
np.vstack((edges[e][0], np.zeros((1,edges[e][0].shape[1])))).T, ret=True)
s += OFiO.writePointsList("{}Back".format(e),
np.vstack((edges[e][0], span+np.zeros((1,edges[e][0].shape[1])))).T, ret=True)
# ---
# write block vertices
s += "convertToMeters {:.6e};\n".format(scale)
s += OFiO.writePointsList("vertices", blockPoints, ret=True)
# ---
# write the blocks
s += "blocks\n"
s += "(\n"
for block in blocks:
s += "\t{}\n".format(block)
s += ");\n"
# ---
# write the edges
s += "edges\n"
s += "(\n"
for e in edges:
if "Surface" in e:
s += "\t"+OFiO.writeEdge("spline", edges[e][1], edges[e][2],
"{}Front".format(e), ret=True)
s += "\t"+OFiO.writeEdge("spline", edges[e][1]+nVert2D, edges[e][2]+nVert2D,
"{}Back".format(e), ret=True)
if "Arc" in e:
s += "\t"+OFiO.writeEdge("arc", edges[e][1], edges[e][2],
np.append(edges[e][0], 0), ret=True)
s += "\t"+OFiO.writeEdge("arc", edges[e][1]+nVert2D, edges[e][2]+nVert2D,
np.append(edges[e][0], span), ret=True)
s += ");\n"
# ---
# write the boundaries
s += "boundary\n"
s += "(\n"
s += OFiO.writeBoundary("outer", blockMeshToolbox.makePatches([[10,9], [9,8], [8,11], [10,11]], nVert2D),
patchType="patch", ret=True)
s += OFiO.writeBoundary("foil", blockMeshToolbox.makePatches([[0,1], [1,3], [3,2], [2,0]], nVert2D),
patchType="wall", ret=True)
frontPatchFaces = [
[1,5,4,0],
[0,4,6,2],
[2,6,7,3],
[3,7,5,1],
[5,10,9,4],
[4,9,8,6],
[6,8,11,7],
[7,11,10,5],
]
backPatchFaces = [[v+nVert2D for v in face] for face in frontPatchFaces]
s += OFiO.writeBoundary("front", frontPatchFaces, patchType="empty", ret=True)
s += OFiO.writeBoundary("back", backPatchFaces, patchType="empty", ret=True)
s += ");\n"
# ---
# write the merge pairs
s += "mergePatchPairs\n"
s += "(\n"
s += ");\n"
# ---
# export to the case
with open(os.path.join(case, "system/blockMeshDict"), "w") as blockMeshDictFile:
blockMeshDictFile.write(s)
# ===
# plot the domain in 2D
if plotDomain:
print("Expansion ratio at the wall {}".format(expWall**(1./(nCellsWall-1.))))
ds = np.min(blockMeshToolbox.blockMeshSpacing(nCellsWall, expWall)) * dsNearZone
print("Wall spacing {}".format(ds))
plt.plot(xu, yu, "k--", xl, yl, "k--")
plt.plot(xuOffset, yuOffset, "k:", xlOffset, ylOffset, "k:")
plt.plot(xNose, yNose, "r", xUpper, yUpper, "b", xLower, yLower, "g")
plt.plot(xNoseOffset, yNoseOffset, "r", xUpperOffset, yUpperOffset, "b", xLowerOffset, yLowerOffset, "g")
for i in range(nVert2D):
plt.plot(blockPoints[i,0], blockPoints[i,1], "k.")
plt.text(blockPoints[i,0], blockPoints[i,1], i, size=14)
i = -1
for e in edges:
if "Arc" in e:
i += 1
plt.plot(edges[e][0][0], edges[e][0][1], "r.")
plt.text(edges[e][0][0], edges[e][0][1], i, size=14, color="r")
t = np.linspace(0, 2.*np.pi, 101)
plt.plot(Rdomain*np.cos(t)+x0Circle, Rdomain*np.sin(t), "k:")
plt.show()
# ===
# UNIT TEST
if __name__ == "__main__":
case = "./standardAirfoilCase"
# test offsets - NACA66mod with t/c = 0.21 and m/c = 0.065
xc = np.array([ 0.00000000e+00, 2.52972302e-04, 5.11170138e-04,
7.74813953e-04, 1.04413844e-03, 1.31939381e-03,
1.60084715e-03, 1.88878404e-03, 2.18351024e-03,
2.48535367e-03, 2.79466660e-03, 3.11182811e-03,
3.43724690e-03, 3.77136444e-03, 4.11465860e-03,
4.46764771e-03, 4.83089531e-03, 5.20501557e-03,
5.59067949e-03, 5.98862219e-03, 6.39965131e-03,
6.82465691e-03, 7.26462304e-03, 7.72064151e-03,
8.19392816e-03, 8.68584244e-03, 9.19791093e-03,
9.73185593e-03, 1.02896304e-02, 1.08734610e-02,
1.14859021e-02, 1.21299027e-02, 1.28088932e-02,
1.35268951e-02, 1.42886660e-02, 1.50998906e-02,
1.59674385e-02, 1.68997180e-02, 1.79071736e-02,
1.90030009e-02, 2.02042075e-02, 2.15332380e-02,
2.30205723e-02, 2.47090867e-02, 2.66618463e-02,
2.89771825e-02, 3.18211507e-02, 3.55088344e-02,
4.07625050e-02, 5.00000000e-02, 8.95833333e-02,
1.29166667e-01, 1.68750000e-01, 2.08333333e-01,
2.47916667e-01, 2.87500000e-01, 3.27083333e-01,
3.66666667e-01, 4.06250000e-01, 4.45833333e-01,
4.85416667e-01, 5.25000000e-01, 5.64583333e-01,
6.04166667e-01, 6.43750000e-01, 6.83333333e-01,
7.22916667e-01, 7.62500000e-01, 8.02083333e-01,
8.41666667e-01, 8.81250000e-01, 9.20833333e-01,
9.60416667e-01, 1.00000000e+00])
xu = np.array([ 0.00000000e+00, -1.60517783e-03, -2.22593706e-03,
-2.63101176e-03, -2.92061388e-03, -3.13405568e-03,
-3.29152685e-03, -3.40498354e-03, -3.48212572e-03,
-3.52823885e-03, -3.54706232e-03, -3.54132506e-03,
-3.51300981e-03, -3.46363127e-03, -3.39427698e-03,
-3.30572139e-03, -3.19851830e-03, -3.07296532e-03,
-2.92920866e-03, -2.76720200e-03, -2.58677028e-03,
-2.38757560e-03, -2.16911223e-03, -1.93075623e-03,
-1.67168960e-03, -1.39094197e-03, -1.08733881e-03,
-7.59497547e-04, -4.05776245e-04, -2.42695565e-05,
3.87277455e-04, 8.31514217e-04, 1.31156833e-03,
1.83116437e-03, 2.39473717e-03, 3.00765344e-03,
3.67641849e-03, 4.40907139e-03, 5.21564852e-03,
6.10888648e-03, 7.10528029e-03, 8.22667792e-03,
9.50287629e-03, 1.09759405e-02, 1.27079799e-02,
1.47962285e-02, 1.74052189e-02, 2.08464061e-02,
2.58360300e-02, 3.47851134e-02, 7.43650814e-02,
1.14716561e-01, 1.55291993e-01, 1.96000472e-01,
2.36856726e-01, 2.77841649e-01, 3.18926216e-01,
3.60076737e-01, 4.01262696e-01, 4.42465928e-01,
4.83674867e-01, 5.24879339e-01, 5.66063575e-01,
6.07207260e-01, 6.48317831e-01, 6.89450361e-01,
7.30636043e-01, 7.71871494e-01, 8.13111879e-01,
8.53488486e-01, 8.91614148e-01, 9.28662752e-01,
9.65290160e-01, 1.00166351e+00])
yu = np.array([ 0. , 0.00319315, 0.004805 , 0.00608989, 0.00720914,
0.00822565, 0.00917165, 0.01006638, 0.01092238, 0.01174847,
0.01255114, 0.01333535, 0.01410502, 0.01486343, 0.01561327,
0.01635687, 0.01709632, 0.01783344, 0.01856996, 0.01930747,
0.02004748, 0.02079154, 0.02154105, 0.02229755, 0.02306253,
0.02383757, 0.02462433, 0.0254246 , 0.02624027, 0.02707344,
0.02792643, 0.0288018 , 0.0297025 , 0.03063184, 0.03159371,
0.03259256, 0.03363381, 0.03472386, 0.03587058, 0.03708368,
0.03837554, 0.0397622 , 0.041265 , 0.04291344, 0.04475001,
0.04683942, 0.04928888, 0.05230153, 0.05633085, 0.06282875,
0.08543216, 0.1031238 , 0.11793925, 0.13058609, 0.14129618,
0.15022948, 0.15746291, 0.16301593, 0.16695981, 0.16930766,
0.17002028, 0.16898637, 0.16597683, 0.16084636, 0.15377405,
0.14507521, 0.13483098, 0.1225152 , 0.1076245 , 0.09025714,
0.07118659, 0.05074654, 0.02920515, 0.00679226])
xl = np.array([ 0. , 0.00211112, 0.00324828, 0.00418064, 0.00500889,
0.00577284, 0.00649322, 0.00718255, 0.00784915, 0.00849895,
0.0091364 , 0.00976498, 0.0103875 , 0.01100636, 0.01162359,
0.01224102, 0.01286031, 0.013483 , 0.01411057, 0.01474445,
0.01538607, 0.01603689, 0.01669836, 0.01737204, 0.01805955,
0.01876263, 0.01948316, 0.02022321, 0.02098504, 0.02177119,
0.02258453, 0.02342829, 0.02430622, 0.02522263, 0.02618259,
0.02719213, 0.02825846, 0.02939036, 0.0305987 , 0.03189712,
0.03330313, 0.0348398 , 0.03653827, 0.03844223, 0.04061571,
0.04315814, 0.04623708, 0.05017126, 0.05568898, 0.06521489,
0.10480159, 0.14361677, 0.18220801, 0.22066619, 0.25897661,
0.29715835, 0.33524045, 0.3732566 , 0.4112373 , 0.44920074,
0.48715847, 0.52512066, 0.56310309, 0.60112607, 0.63918217,
0.67721631, 0.71519729, 0.75312851, 0.79105479, 0.82984485,
0.87088585, 0.91300392, 0.95554317, 0.99833649])
yl = np.array([ 0. , -0.00298213, -0.00437915, -0.00544533, -0.00634181,
-0.00713129, -0.00784588, -0.00850458, -0.00911973, -0.00969994,
-0.01025147, -0.01077903, -0.01128626, -0.01177615, -0.01225106,
-0.012713 , -0.01316369, -0.01360453, -0.01403682, -0.01446165,
-0.01488007, -0.01529297, -0.01570118, -0.01610549, -0.01650664,
-0.01690535, -0.01730229, -0.01769816, -0.01809361, -0.01848938,
-0.01888613, -0.01928466, -0.01968572, -0.02009018, -0.02049898,
-0.02091315, -0.02133391, -0.02176261, -0.02220081, -0.02265046,
-0.02311387, -0.02359392, -0.02409432, -0.02461995, -0.02517753,
-0.02577665, -0.02643296, -0.02717892, -0.02808615, -0.0293734 ,
-0.03284726, -0.03518085, -0.03727422, -0.03917386, -0.04068171,
-0.041652 , -0.04203048, -0.04194298, -0.04157625, -0.04101972,
-0.04024955, -0.0390939 , -0.03721265, -0.03448677, -0.03122782,
-0.02794459, -0.02499887, -0.02228178, -0.01980884, -0.01761415,
-0.01572236, -0.01353848, -0.01062282, -0.00679226])
makeFoilGrid(xc, xu, yu, xl, yl, case,
dsNearZone = 0.1, # size of the near-field mesh zone
Rdomain = 25., # radius of the domain
span = 1., # span
scale = 1.0, # for uniformly scaling the mesh
xcNose = 0.05, # x/c from which to start the nose mesh block
xcAftOverhang = 0.04, # extend offset domain by this much aft of TE for better cell quality
spreadNoseAngle = 5.0, # multiplier for the angle between nose extrusion vectors
spreadTeAngle = 2.5,
#
nCellsSpan = 1, # spanwise no. cells
#
nCellsWall = 100, # wall-normal no. cells in near- and far-field
nCellsOuter = 75,
#
nCellsNose = 35, # no. cells along the foil surface
nCellsTe = 15,
nCellsUpper = 150,
nCellsLower = 150,
#
meshRefParam = 1., # for uniformly scaling default mesh size
#
# foil-normal directions; >1 to cluster points closer to the foil
expOuter = 250.,
expWall = 600.,
# foil surface
expLongUpperLe = 3.,
expLongUpperTe = 3.,
expLongLowerLe = 4.,
expLongLowerTe = 4.,
# at the near-field block edge; | |
background_color=neutral, foreground_color=styles.textcolor)
config.trackLabelArray.append(label)
self.trackDisplayGrid.add_widget(config.trackLabelArray[-1])
label = CheckBox(size_hint_y=None, size_hint_x=.10, color=[1, 1, 1, 4], height=config.baseheight)
label.background_checkbox_normal="." + os.sep + "resources" + os.sep + "ui_images" + os.sep + "checkbox_off.png"
label.background_checkbox_down="." + os.sep + "resources" + os.sep + "ui_images" + os.sep + "checkbox_on.png"
config.trackStatusLabelArray.append(label)
self.trackDisplayGrid.add_widget(config.trackStatusLabelArray[-1])
self.trackDisplay.add_widget(self.trackDisplayGrid)
self.tracksMainBox.add_widget(self.trackDisplay)
self.tracksAItem.add_widget(self.tracksMainBox)
self.rightAccordion.add_widget(self.tracksAItem)
##---------------------------------------------------------------------------------------
# holder panel for custom
##---------------------------------------------------------------------------------------
self.customAccordionItem = AccordionItem(title='Custom', background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight)
self.customStackAccordion = Accordion(orientation='vertical', size_hint=(1,1), min_space = config.aiheight)
self.customAccordionItem.add_widget(self.customStackAccordion)
try:
if config.use_custom_panel == True:
self.leftAccordion.add_widget(self.customAccordionItem)
except:
config.use_custom_panel = True
self.leftAccordion.add_widget(self.customAccordionItem)
##---------------------------------------------------------------------------------------
# holder panel for maps
##---------------------------------------------------------------------------------------
self.mapAccordionItem = AccordionItem(title='Maps', background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight)
self.mapStackAccordion = Accordion(orientation='vertical', size_hint=(1,1), min_space = config.aiheight)
self.mapAccordionItem.add_widget(self.mapStackAccordion)
try:
if config.use_map_panel == True:
self.leftAccordion.add_widget(self.mapAccordionItem)
except:
config.use_map_panel = True
self.leftAccordion.add_widget(self.mapAccordionItem)
##---------------------------------------------------------------------------------------
# holder panel for generators
##---------------------------------------------------------------------------------------
self.generatorAccordionItem = AccordionItem(title='Generators', background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight)
self.generatorStackAccordion = Accordion(orientation='vertical', size_hint=(1,1), min_space = config.aiheight)
self.generatorAccordionItem.add_widget(self.generatorStackAccordion)
self.leftAccordion.add_widget(self.generatorAccordionItem)
##---------------------------------------------------------------------------------------
# holder panel for oracles
##---------------------------------------------------------------------------------------
self.oracleAccordionItem = AccordionItem(title='Oracles', background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight)
self.oracleStackAccordion = Accordion(orientation='vertical', size_hint=(1,1), min_space = config.aiheight)
self.oracleAccordionItem.add_widget(self.oracleStackAccordion)
self.leftAccordion.add_widget(self.oracleAccordionItem)
##---------------------------------------------------------------------------------------
# custom oracle panels
##---------------------------------------------------------------------------------------
#self.panelsBox = BoxLayout(orientation="vertical")
for i in oracle_module:
methodToCall = getattr( i, 'initPanel' )
self.oracleStackAccordion.add_widget(methodToCall(self), 1)
##---------------------------------------------------------------------------------------
# custom custom panels
##---------------------------------------------------------------------------------------
#self.panelsBox = BoxLayout(orientation="vertical")
for i in custom_module:
methodToCall = getattr( i, 'initPanel' )
self.customStackAccordion.add_widget(methodToCall(self), 1)
##---------------------------------------------------------------------------------------
# custom maps panels
##---------------------------------------------------------------------------------------
#self.panelsBox = BoxLayout(orientation="vertical")
for i in map_module:
methodToCall = getattr( i, 'initPanel' )
self.mapStackAccordion.add_widget(methodToCall(self))
##---------------------------------------------------------------------------------------
# custom generator panels
##---------------------------------------------------------------------------------------
#self.panelsBox = BoxLayout(orientation="vertical")
for i in gen_module:
methodToCall = getattr( i, 'initPanel' )
self.generatorStackAccordion.add_widget(methodToCall(self), 1)
#-----------------------------------------
### Functions
#-----------------------------------------
# testing
def text_entered(self, *args):
for arg in args:
print(arg)
#---------------------------------------------------------------------------------------
# miscellaneous functions
#---------------------------------------------------------------------------------------
# trap on main textinput so hitting 'enter' submits text instead of a line break
def key_action(self, *args):
#print "got a key event: %s" % list(args)
#print(self.textInput.text, self.textInput.focus)
if self.textInput.focus == True and args[1] == 32:
# trap for pythy
if config.general['use_pythy_auto_complete'] == True:
for i in range(len(oracle_module)):
if oracle_module[i].__name__ == "pythy":
methodToCall = getattr( oracle_module[i], "autoPredict" )
answer = methodToCall(self)
if args[1] == 13 and self.textInput.focus == True:
#print("Defocus and send text.")
if len(self.textInput.text) > 0:
new_text = self.textInput.text
if config.general['enter_behavior'] != "None":
passthrough = True
is_roll = False
roll_result = False
answer = False
blocktag = config.general['enter_behavior']
matchList = [ [ new_text.startswith("-a"), 'aside', new_text[2:] ], [ new_text.startswith("-p"), 'plain', new_text[2:] ], [ new_text.endswith("-a"), 'aside', new_text[:-2] ], [ new_text.endswith("-p"), 'plain', new_text[:-2] ] ]
for test in matchList:
if test[0]:
blocktag = test[1]
new_text = test[2].strip()
try:
is_roll = int(new_text[0])
except:
is_roll = False
if is_roll or "roll" in new_text or "Roll" in new_text:
roll_result = parseTextForDice(new_text)
if "??" in new_text:
new_text = new_text.replace('??', '?')
index = 99
for i in range(len(oracle_module)):
if oracle_module[i].__name__ == config.oracle:
index = i
if index < 99:
methodToCall = getattr( oracle_module[index], config.oracle_func )
answer = methodToCall()
else:
answer = "No oracle found."
if answer or (roll_result and not is_roll):
updateCenterDisplay(self, new_text, 'query')
passthrough = False
if answer:
updateCenterDisplay(self, answer, 'oracle')
passthrough = False
if roll_result:
updateCenterDisplay(self, roll_result, 'result')
passthrough = False
if passthrough == True:
updateCenterDisplay(self, new_text, blocktag)
quicksave(self, config.curr_game_dir)
self.textInput.text = ""
if config.general['enter_behavior'] == 'plain':
checkForTrigger(self)
return True
elif args[1] == 96 and config.debug == True: # really sloppy, takes a screenshot on tilde
timestamp = '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now())
Window.screenshot(name=config.curr_game_dir + 'screenshot_' + timestamp + '.png')
def pressGenericButton(self, button):
button.background_color = accent2
def releaseSave(self, *args):
args[0].background_color = neutral
quicksave(self, config.curr_game_dir)
saveconfig(self, config.curr_game_dir)
# make logs
for i in imports.log_template:
methodToCall = getattr( i, 'makeLogFile' )
methodToCall(self)
updateCenterDisplay(self, "Content and configuration saved.", 'ephemeral')
clearActor(self, *args)
clearThread(self, *args)
# generic function calls
def miscChartRoll(self, *args):
args[0].background_color = neutral
result = eval(args[0].function)()
updateCenterDisplay(self, result)
# scenario
def showBlock(self, *args):
args[0].background_color = neutral
showCurrentBlock(self)
def showExits(self, *args):
args[0].background_color = neutral
showCurrentExits(self)
#---------------------------------------------------------------------------------------
# PC, threads, tracks, & actor panel copy functions
#---------------------------------------------------------------------------------------
def copyTracksToMain(self, *args):
args[0].background_color = neutral
result = ""
for i in range(len(config.trackLabelArray)):
if len(config.trackLabelArray[i].text) > 0:
result = result + "\n" + config.trackLabelArray[i].text
if config.trackStatusLabelArray[i].active:
result = result + " [X]"
result = "[Tracked] " + result
updateCenterDisplay(self, result, "mechanic1")
def copyActorToMain(self, *args):
args[0].background_color = neutral
result = ""
for i in range(len(config.actorArray)):
result = result + "\n" + config.actorArray[i] + " [" + config.actorStatusArray[i] + "]"
result = "[Actors] " + result
updateCenterDisplay(self, result, "mechanic1")
def jumpToActor(self, button):
button.background_color = neutral
value = button.value
self.actorDisplay.scroll_to(config.actorLabelArray[value], padding=40)
def toggleActorIndexSize(self, button):
button.background_color = neutral
if button.value == 0:
self.actorDisplay.size_hint=(1,.80)
self.actorIndexDisplay.size_hint=(1,.20)
button.value = 1
elif button.value == 1:
self.actorDisplay.size_hint=(1,.30)
self.actorIndexDisplay.size_hint=(1,.70)
button.value = 2
elif button.value == 2:
self.actorDisplay.size_hint=(1,.70)
self.actorIndexDisplay.size_hint=(1,.30)
button.value = 0
config.general['actor_index_state'] = button.value
def copyPCsToMain(self, button):
button.background_color = neutral
result = ""
sheet = button.sheet
for i in range(len(config.pcKeyLabelArray[sheet])):
#if len(config.pcKeyLabelArray[sheet][i].text) > 0:
result = result + " | " + config.pcKeyLabelArray[sheet][i].text + ": " + config.pcValueLabelArray[sheet][i].text
result = "[PC " + str(sheet) + "] " + result
updateCenterDisplay(self, result, "mechanic1")
def copyThreadsToMain(self, *args):
args[0].background_color = neutral
result = ""
for i in range(len(config.threadArray)):
result = result + "\n" + config.threadArray[i] + " [" + config.threadStatusArray[i] + "]"
result = "[Threads] " + result
updateCenterDisplay(self, result, "mechanic1")
#---------------------------------------------------------------------------------------
# submit text input buttons
#---------------------------------------------------------------------------------------
def releaseQuestion(self, *args):
args[0].background_color = neutral
if len(self.textInput.text) > 0:
updateCenterDisplay(self, self.textInput.text, 'query')
index = 99
for i in range(len(oracle_module)):
if oracle_module[i].__name__ == config.oracle:
index = i
if index < 99:
methodToCall = getattr( oracle_module[index], config.oracle_func )
answer = methodToCall()
else:
answer = "No oracle found."
updateCenterDisplay(self, answer, 'oracle')
self.textInput.text = ""
quicksave(self, config.curr_game_dir)
def getSeed(self, *args):
args[0].background_color = neutral
if len(self.textInput.text) > 0:
updateCenterDisplay(self, self.textInput.text, 'query')
index=-9
match = 'seeds'
if config.general['seed_func'] == 'useMythicComplex':
match = 'mythic'
for i in range(len(oracle_module)):
#print(oracle_module[i].__name__)
if oracle_module[i].__name__ == match:
index = i
try:
methodToCall = getattr( oracle_module[index], config.general['seed_func'] )
if config.general['seed_func'] == 'useThreePartSeed':
methodToCall(self, config.general['seed_type'], config.general['seed_subtype'])
else:
methodToCall(self, config.general['seed_type'])
except:
updateCenterDisplay(self, "[Seed] " + seed_action() + " " + seed_subject(), 'oracle')
quicksave(self, config.curr_game_dir)
self.textInput.text = ""
def getSeedAlternate(self, *args):
args[0].background_color = neutral
if len(self.textInput.text) > 0:
updateCenterDisplay(self, self.textInput.text, 'query')
index=-9
for i in range(len(oracle_module)):
if oracle_module[i].__name__ == 'seeds':
index = i
try:
methodToCall = getattr( oracle_module[index], config.general['seed_alt_func'] )
if config.general['seed_alt_func'] == 'useThreePartSeed':
methodToCall(self, config.general['seed_alt_type'], config.general['seed_alt_subtype'])
else:
methodToCall(self, config.general['seed_alt_type'])
except:
updateCenterDisplay(self, "[Seed] " + seed_action() + " " + seed_subject(), 'oracle')
quicksave(self, config.curr_game_dir)
self.textInput.text = ""
def releaseRoll(self, *args):
self.rollSubmitButton.background_color = neutral
result = rollDice(self.textInput.text)
updateCenterDisplay(self, result, 'result')
quicksave(self, config.curr_game_dir)
if result != "Please use standard dice notation, ie, 1d10 or 2d6x3.":
self.textInput.text = ""
def releasePlayer(self, *args):
self.playerSubmitButton.background_color = neutral
if len(self.textInput.text) > 0:
new_text = self.textInput.text
updateCenterDisplay(self, new_text, 'plain')
quicksave(self, config.curr_game_dir)
self.textInput.text = ""
checkForTrigger(self)
def releaseDM(self, *args):
self.dmSubmitButton.background_color = neutral
if len(self.textInput.text) > 0:
new_text = self.textInput.text
updateCenterDisplay(self, new_text, "aside")
quicksave(self, config.curr_game_dir)
self.textInput.text = ""
def releaseThread(self, *args):
self.threadSubmitButton.background_color = neutral
if len(self.textInput.text) > 0:
new_text = "" + self.textInput.text + ""
updateThreadDisplay(self, new_text, "Current")
updateCenterDisplay(self, "[New Thread] " + new_text, 'aside')
quicksave(self, config.curr_game_dir)
self.textInput.text = ""
def releaseAddActor(self, *args):
self.addActorButton.background_color = neutral
if len(self.textInput.text) > 0:
new_text = self.textInput.text
updateActorDisplay(self, new_text, 'Current')
updateCenterDisplay(self, "[New Actor] " + new_text, 'aside')
updateActorIndex(self)
quicksave(self, config.curr_game_dir)
self.textInput.text = ""
#---------------------------------------------------------------------------------------
# center status bar
#---------------------------------------------------------------------------------------
def releaseTrackerUp(self, *args):
args[0].background_color = neutral
config.general['tracker'] = config.general['tracker'] + 1
if config.general['use_main_tracker_for_mythic'] == True:
for i in range(len(oracle_module)):
if oracle_module[i].__name__ == 'mythic':
methodToCall = getattr( oracle_module[i], 'setChaosFactor' )
result = methodToCall("up")
config.general['tracker'] = config.general['mythic_chaos_factor']
updateCenterDisplay(self, result, 'result')
self.trackLabel.text = str(config.general['tracker'])
def releaseTrackerDown(self, *args):
args[0].background_color = neutral
config.general['tracker'] = config.general['tracker'] - 1
if config.general['use_main_tracker_for_mythic'] == True:
for i in range(len(oracle_module)):
if oracle_module[i].__name__ == 'mythic':
| |
<gh_stars>0
import base64
import json
import os
import pickle
import tempfile
import time
from io import BytesIO
from tempfile import NamedTemporaryFile
import cv2
import imageio
import numpy as np
import streamlit as st
import torch
from numpy.lib.type_check import imag
from PIL import Image
# from streamlit_webrtc import VideoTransformerBase, webrtc_streamer
from termcolor import colored, cprint
from datasets.FreiHAND.kinematics import mano_to_mpii
from utils.draw3d import (display_image_with_mesh_joints, display_video_with_mesh_joints,
save_a_image_with_mesh_joints)
from utils.progress.bar import Bar
from utils.read import save_mesh
from utils.transforms import rigid_align
from utils.vis import (base_transform, cnt_area, inv_base_tranmsform, map2uv,
registration, tensor2array)
st.set_page_config(
layout="wide",
page_title="Hand Pose Estimation"
)
def get_image_download_link(img,filename,text):
buffered = BytesIO()
img.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode()
href = f'<a href="data:file/jpg;base64,{img_str}" download="{filename}">{text}</a>'
return href
class Runner(object):
def __init__(self, args, model, faces, device):
super(Runner, self).__init__()
self.args = args
self.model = model
self.faces = faces
self.device = device
self.face = torch.from_numpy(self.faces[0].astype(np.int64)).to(self.device)
def set_train_loader(self, train_loader, epochs, optimizer, scheduler, writer, board, start_epoch=0):
self.train_loader = train_loader
self.max_epochs = epochs
self.optimizer = optimizer
self.scheduler = scheduler
self.writer = writer
self.board = board
self.start_epoch = start_epoch
self.epoch = max(start_epoch - 1, 0)
self.total_step = self.start_epoch * (len(self.train_loader.dataset) // self.writer.args.batch_size)
self.loss = self.model.loss
if self.args.dataset=='Human36M':
self.j_regressor = self.train_loader.dataset.h36m_joint_regressor
self.j_eval = self.train_loader.dataset.h36m_eval_joint
else:
self.j_regressor = self.train_loader.dataset.j_regressor
self.std = train_loader.dataset.std.to(self.device)
def set_eval_loader(self, eval_loader):
self.eval_loader = eval_loader
if self.args.phase != 'train':
if self.args.dataset=='Human36M':
self.j_regressor = self.eval_loader.dataset.h36m_joint_regressor
self.j_eval = self.eval_loader.dataset.h36m_eval_joint
else:
self.j_regressor = self.eval_loader.dataset.j_regressor
self.std = eval_loader.dataset.std.to(self.device)
self.board = None
def set_demo(self, args):
with open(os.path.join(args.work_dir, 'template', 'MANO_RIGHT.pkl'), 'rb') as f:
mano = pickle.load(f, encoding='latin1')
self.j_regressor = np.zeros([21, 778])
self.j_regressor[:16] = mano['J_regressor'].toarray()
for k, v in {16: 333, 17: 444, 18: 672, 19: 555, 20: 744}.items():
self.j_regressor[k, v] = 1
self.std = torch.tensor(0.20)
def train(self):
best_error = np.float('inf')
for epoch in range(self.start_epoch, self.max_epochs + 1):
self.epoch = epoch
t = time.time()
train_loss = self.train_a_epoch()
t_duration = time.time() - t
self.scheduler.step()
info = {
'current_epoch': self.epoch,
'epochs': self.max_epochs,
'train_loss': train_loss,
't_duration': t_duration
}
self.writer.print_info(info)
if self.args.dataset=='Human36M':
test_error = self.evaluation_withgt()
if test_error < best_error:
best_error = test_error
self.writer.save_checkpoint(self.model, self.optimizer, self.scheduler, self.epoch, best=True)
self.writer.save_checkpoint(self.model, self.optimizer, self.scheduler, self.epoch, last=True)
if self.args.dataset=='FreiHAND' and self.eval_loader is not None:
self.evaluation()
def board_img(self, phase, n_iter, img, **kwargs):
# print(rendered_mask.shape, rendered_mask.max(), rendered_mask.min())
self.board.add_image(phase + '/img', tensor2array(img), n_iter)
if kwargs.get('mask_pred') is not None:
self.board.add_image(phase + '/mask_gt', tensor2array(kwargs['mask_gt'][0]), n_iter)
self.board.add_image(phase + '/mask_pred', tensor2array(kwargs['mask_pred'][0]), n_iter)
if kwargs.get('uv_pred') is not None:
self.board.add_image(phase + '/uv_gt', tensor2array(kwargs['uv_gt'][0].sum(dim=0).clamp(max=1)), n_iter)
self.board.add_image(phase + '/uv_pred', tensor2array(kwargs['uv_pred'][0].sum(dim=0).clamp(max=1)), n_iter)
if kwargs.get('uv_prior') is not None:
self.board.add_image(phase + '/uv_prior', tensor2array(kwargs['uv_prior'][0].sum(dim=0).clamp(max=1)), n_iter)
def board_scalar(self, phase, n_iter, lr=None, **kwargs):
for key, val in kwargs.items():
if 'loss' in key:
self.board.add_scalar(phase + '/' + key, val.item(), n_iter)
if lr:
self.board.add_scalar('lr', lr, n_iter)
def phrase_data(self, data):
for key, val in data.items():
if isinstance(val, list):
data[key] = [d.to(self.device) for d in data[key]]
else:
data[key] = data[key].to(self.device)
return data
def train_a_epoch(self):
self.model.train()
total_loss = 0
bar = Bar(colored("TRAIN", color='blue'), max=len(self.train_loader))
for step, data in enumerate(self.train_loader):
t = time.time()
data = self.phrase_data(data)
self.optimizer.zero_grad()
out = self.model(data['img'])
loss = self.loss(pred=out['mesh_pred'], gt=data.get('mesh_gt'), uv_pred=out.get('uv_pred'), uv_gt=data.get('uv_gt'),
mask_pred=out.get('mask_pred'), mask_gt=data.get('mask_gt'), face=self.face,
uv_prior=out.get('uv_prior'), uv_prior2=out.get('uv_prior2'), mask_prior=out.get('mask_prior'))
loss['loss'].backward()
total_loss += loss['loss'].item()
self.optimizer.step()
step_duration = time.time() - t
self.total_step += 1
self.board_scalar('train', self.total_step, self.optimizer.param_groups[0]['lr'], **loss)
bar.suffix = (
'({epoch}/{max_epoch}:{batch}/{size}) '
'time: {time:.3f} | '
'loss: {loss:.4f} | '
'l1_loss: {l1_loss:.4f} | '
'lr: {lr:.6f} | '
).format(epoch=self.epoch, max_epoch=self.max_epochs, batch=step, size=len(self.train_loader),
loss=loss['loss'], l1_loss=loss['l1_loss'], time=step_duration,
lr=self.optimizer.param_groups[0]['lr'])
bar.next()
if self.total_step % 100 == 0:
info = {
'train_loss': loss['loss'],
'epoch': self.epoch,
'total_step': self.total_step,
'step_duration': step_duration,
'lr': self.optimizer.param_groups[0]['lr']
}
self.writer.print_step(info)
bar.finish()
self.board_img('train', self.epoch, data['img'][0], mask_gt=data.get('mask_gt'), mask_pred=out.get('mask_pred'), uv_gt=data.get('uv_gt'), uv_pred=out.get('uv_pred'), uv_prior=out.get('uv_prior'))
return total_loss / len(self.train_loader)
def evaluation(self):
if self.eval_loader is None:
raise Exception('Please set_eval_loader before evaluation')
args = self.args
self.model.eval()
xyz_pred_list, verts_pred_list = list(), list()
bar = Bar(colored("EVAL", color='green'), max=len(self.eval_loader))
with torch.no_grad():
for step, data in enumerate(self.eval_loader):
data = self.phrase_data(data)
out = self.model(data['img'])
# silhouette
mask_pred = out.get('mask_pred')
if mask_pred is not None:
mask_pred = (mask_pred[0] > 0.3).cpu().numpy().astype(np.uint8)
mask_pred = cv2.resize(mask_pred, (data['img'].size(3), data['img'].size(2)))
try:
contours, _ = cv2.findContours(mask_pred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=cnt_area, reverse=True)
poly = contours[0].transpose(1, 0, 2).astype(np.int32)
except:
poly = None
else:
mask_pred = np.zeros([data['img'].size(3), data['img'].size(2)])
poly = None
# vertex
pred = out['mesh_pred'][0] if isinstance(out['mesh_pred'], list) else out['mesh_pred']
vertex = (pred[0].cpu() * self.std.cpu()).numpy()
uv_pred = out['uv_pred']
if uv_pred.ndim == 4:
uv_point_pred, uv_pred_conf = map2uv(uv_pred.cpu().numpy(), (data['img'].size(2), data['img'].size(3)))
else:
uv_point_pred, uv_pred_conf = (uv_pred * args.size).cpu().numpy(), [None,]
vertex, align_state = registration(vertex, uv_point_pred[0], self.j_regressor, data['K'][0].cpu().numpy(), args.size, uv_conf=uv_pred_conf[0], poly=poly)
vertex2xyz = mano_to_mpii(np.matmul(self.j_regressor, vertex))
xyz_pred_list.append(vertex2xyz)
verts_pred_list.append(vertex)
# if args.phase == 'eval':
# save_a_image_with_mesh_joints(inv_base_tranmsform(data['img'][0].cpu().numpy())[:, :, ::-1], mask_pred, poly, data['K'][0].cpu().numpy(), vertex, self.faces[0], uv_point_pred[0], vertex2xyz,
# os. path.join(args.out_dir, 'eval', str(step) + '_plot.jpg'))
bar.suffix = '({batch}/{size})' .format(batch=step+1, size=len(self.eval_loader))
bar.next()
bar.finish()
# save to a json
xyz_pred_list = [x.tolist() for x in xyz_pred_list]
verts_pred_list = [x.tolist() for x in verts_pred_list]
with open(os.path.join(args.out_dir, args.exp_name + '.json'), 'w') as fo:
json.dump([xyz_pred_list, verts_pred_list], fo)
cprint('Save json file at ' + os.path.join(args.out_dir, args.exp_name + '.json'), 'green')
def evaluation_withgt(self):
# self.writer.print_str('Eval error on set')
self.model.eval()
joint_errors = []
pa_joint_errors = []
duration = [0,]
bar = Bar(colored("TEST", color='yellow'), max=len(self.eval_loader))
with torch.no_grad():
for i, data in enumerate(self.eval_loader):
data = self.phrase_data(data)
t1 = time.time()
out = self.model(data['img'])
torch.cuda.synchronize()
if i > 10:
duration.append((time.time()-t1)*1000)
gt = data['mesh_gt'][0] if isinstance(data['mesh_gt'], list) else data['mesh_gt']
xyz_gt = data['xyz_gt']
pred = out['mesh_pred'][0] if isinstance(out['mesh_pred'], list) else out['mesh_pred']
pred = (pred[0].cpu() * self.std.cpu()).numpy()
joint_pred = np.dot(self.j_regressor, pred)
gt = (gt[0].cpu() * self.std.cpu()).numpy()
xyz_gt = (xyz_gt[0].cpu() * self.std.cpu()).numpy()
rel_joint_pred = joint_pred[self.j_eval, :] * 1000
rel_joint_gt = xyz_gt[self.j_eval, :] * 1000
joint_errors.append(np.sqrt(np.sum((rel_joint_gt - rel_joint_pred) ** 2, axis=1)))
pa_joint_errors.append(np.sqrt(np.sum((rel_joint_gt - rigid_align(rel_joint_pred, rel_joint_gt)) ** 2, axis=1)))
bar.suffix = (
'({batch}/{size}) '
'MPJPE:{j:.3f} '
'PA-MPJPE:{pa_j:.3f} '
'T:{t:.0f}'
).format(batch=i, size=len(self.eval_loader), j=np.array(joint_errors).mean(), pa_j=np.array(pa_joint_errors).mean(), t=np.array(duration).mean())
bar.next()
bar.finish()
j_error = np.array(joint_errors).mean()
pa_j_error = np.array(pa_joint_errors).mean()
if self.board is not None:
self.board_scalar('test', self.epoch, **{'j_loss': j_error, 'pa_j_loss': pa_j_error})
self.board_img('test', self.epoch, data['img'][0], uv_gt=data['uv_gt'], uv_pred=out['uv_pred'], mask_gt=data.get('mask_gt'), mask_pred=out.get('mask_pred'))
return pa_j_error
def demo_image(self, img):
args = self.args
self.model.eval()
with torch.no_grad():
image = cv2.resize(img, (args.size, args.size))
input = torch.from_numpy(base_transform(image, size=args.size)).unsqueeze(0).to(self.device)
K = np.array([[500, 0, 112], [0, 500, 112], [0, 0, 1]])
K[0, 0] = K[0, 0] / 224 * args.size
K[1, 1] = K[1, 1] / 224 * args.size
K[0, 2] = args.size // 2
K[1, 2] = args.size // 2
out = self.model(input)
# vertex
pred = out['mesh_pred'][0] if isinstance(out['mesh_pred'], list) else out['mesh_pred']
vertex = (pred[0].cpu() * self.std.cpu()).numpy()
uv_pred = out['uv_pred']
if uv_pred.ndim == 4:
uv_point_pred, uv_pred_conf = map2uv(uv_pred.cpu().numpy(), (input.size(2), input.size(3)))
else:
uv_point_pred, uv_pred_conf = (uv_pred * args.size).cpu().numpy(), [None,]
vertex, align_state = registration(vertex, uv_point_pred[0], self.j_regressor, K, args.size, uv_conf=uv_pred_conf[0], poly=None)
vertex2xyz = mano_to_mpii(np.matmul(self.j_regressor, vertex))
image_out = display_image_with_mesh_joints(image[..., ::-1], K, vertex, self.faces[0], uv_point_pred[0], vertex2xyz)
return image_out
def demo_video(self, video_capture):
args = self.args
self.model.eval()
with torch.no_grad():
cap = video_capture
while True:
ret, img = cap.read()
if not ret:
continue
K = np.array([[500, 0, 112], [0, 500, 112], [0, 0, 1]])
K[0, 0] = K[0, 0] / 224 * args.size
K[1, 1] = K[1, 1] / 224 * args.size
K[0, 2] = args.size // 2
K[1, 2] = args.size // 2
image = cv2.resize(img, (args.size, args.size))
input = torch.from_numpy(base_transform(image, size=args.size)).unsqueeze(0).to(self.device)
start = time.time()
out = self.model(input)
# vertex
pred = out['mesh_pred'][0] if isinstance(out['mesh_pred'], list) else out['mesh_pred']
vertex = (pred[0].cpu() * self.std.cpu()).numpy()
uv_pred = out['uv_pred']
if uv_pred.ndim == 4:
uv_point_pred, uv_pred_conf = map2uv(uv_pred.cpu().numpy(), (input.size(2), input.size(3)))
else:
uv_point_pred, uv_pred_conf = (uv_pred * args.size).cpu().numpy(), [None,]
vertex, align_state = registration(vertex, uv_point_pred[0], self.j_regressor, K, args.size, uv_conf=uv_pred_conf[0], poly=None)
vertex2xyz = mano_to_mpii(np.matmul(self.j_regressor, vertex))
image_out = display_video_with_mesh_joints(image[..., ::-1], K, vertex, self.faces[0], uv_point_pred[0], vertex2xyz)
end=time.time()
fps = (end-start) ** -1
if fps:
cv2.putText(image_out, f'FPS: {int(fps)}', (5,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)
frame_out = cv2.resize(image_out, (350, 350))
return frame_out
def demo(self):
st.title("Hand Pose Estimation")
activities = ["Image","Video"]
# st.set_option('deprecation.showfileUploaderEncoding', False)
st.sidebar.markdown("# Choose Input Source")
choice = st.sidebar.selectbox("Choose preferred mode:", activities)
link = '[©Developed by ITIHandPoseTeam](https://github.com/itihandpose)'
st.sidebar.markdown(link, unsafe_allow_html=True)
if choice == 'Image':
st.markdown(
'''<p style='text-align: left; font-size: 15px'>Hand Pose Estimation is done using <a href="https://arxiv.org/abs/2112.02753">MobRecon</a></p>''',
unsafe_allow_html=True)
buffer = st.file_uploader("Choose an image", type=['jpg', 'jpeg', 'jfif', 'png'])
temp_file = NamedTemporaryFile(delete=False)
if buffer:
temp_file.write(buffer.getvalue())
img = cv2.imread(temp_file.name)[..., ::-1]
img = cv2.resize(img, (350, 350))
img_out = self.demo_image(img)
imgRGB=cv2.cvtColor(img_out,cv2.COLOR_BGR2RGB)
place_h = st.columns(2)
place_h[0].image(img)
place_h[1].image(imgRGB)
result = Image.fromarray(imgRGB)
st.markdown(get_image_download_link(result, "out.jpg", 'Download image'), unsafe_allow_html=True)
if choice == 'Video':
st.markdown(
'''<p style='text-align: left; font-size: 15px'>Hand Pose Estimation | |
"thePed": """: The ped to check. """,
"headState": """: head state, use true if you want the ped be headless, use false to give back the head. """
},
result='returns true if successful, false otherwise' ,
),
url='setPedHeadless',
),
field=FunctionOOPField(
name='headless',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="setPedOnFire",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='setOnFire',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='isOnFire',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function can be used to set a ped on fire or extinguish a fire on it.' ,
arguments={
"thePed": """The ped that we want to set/unset """,
"isOnFire": """true to set the ped on fire, false to extinguish any fire on him """
},
result='returns true if successful, false otherwise' ,
),
url='setPedOnFire',
),
field=FunctionOOPField(
name='onFire',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="setPedOnFire",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='setOnFire',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='isOnFire',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function can be used to set a ped on fire or extinguish a fire on it.' ,
arguments={
"thePed": """The ped that we want to set/unset """,
"isOnFire": """true to set the ped on fire, false to extinguish any fire on him """
},
result='returns true if successful, false otherwise' ,
),
url='setPedOnFire',
),
field=FunctionOOPField(
name='onFire',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="setPedOxygenLevel",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='setOxygenLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='oxygen',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to set the oxygen level of a ped.' ,
arguments={
"thePed": """: the ped whose oxygen level you want to modify. """,
"oxygen": """: the amount of oxygen you want to set on the ped. Native values are from 0 to 1000. Each of the stamina (22) and underwater stamina (225) Template:Stats|stat maximum adds a bonus of 1500. So the maximum oxygen level is 4000. """
},
result='returns true if the oxygen level was changed succesfully. returns false if an invalid ped and/or oxygen level was specified.' ,
),
url='setPedOxygenLevel',
),
field=FunctionOOPField(
name='oxygenLevel',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="setPedVoice",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='setVoice',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='voiceType',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='voiceName',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Changes the voice of a ped.' ,
arguments={
"thePed": """the ped whose voice to change. """,
"voiceType": """the voice type. See ped voices for possible types. """,
"voiceName": """the voice name within the specified type. See ped voices for possible voices. """
},
result='returns true when the voice was successfully set, false otherwise.' ,
),
url='setPedVoice',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="setPedWalkingStyle",
class_name='Ped|ped',
method=FunctionData(
signature=FunctionSignature(
name='setWalkingStyle',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='style',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Sets the walking style of a ped. A walking style consists of a set of animations that are used for walking, running etc.' ,
arguments={
"thePed": """the ped whose walking style to change. """,
"style": """the walking style to set.
The possible walking styles are: """
},
result='returns true if successful, false otherwise.' ,
),
url='setPedWalkingStyle',
),
field=FunctionOOPField(
name='walkingStyle',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="setPedWalkingStyle",
class_name='Ped|ped',
method=FunctionData(
signature=FunctionSignature(
name='setWalkingStyle',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='style',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Sets the walking style of a ped. A walking style consists of a set of animations that are used for walking, running etc.' ,
arguments={
"thePed": """the ped whose walking style to change. """,
"style": """the walking style to set.
The possible walking styles are: """
},
result='returns true if successful, false otherwise.' ,
),
url='setPedWalkingStyle',
),
field=FunctionOOPField(
name='walkingStyle',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="setPedWeaponSlot",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='setWeaponSlot',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='weaponSlot',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes the selected weapon slot of a ped.' ,
arguments={
"thePed": """the ped whose weapon slot you want to set. In a clientside script, this cannot be used on remote players. """,
"weaponSlot": """the weapon slot to set. """
},
result='returns true if successful in setting the peds equipped weapon slot, false otherwise.' ,
),
url='setPedWeaponSlot',
),
field=FunctionOOPField(
name='weaponSlot',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="setPedWeaponSlot",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='setWeaponSlot',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='weaponSlot',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes the selected weapon slot of a ped.' ,
arguments={
"thePed": """the ped whose weapon slot you want to set. In a clientside script, this cannot be used on remote players. """,
"weaponSlot": """the weapon slot to set. """
},
result='returns true if successful in setting the peds equipped weapon slot, false otherwise.' ,
),
url='setPedWeaponSlot',
),
field=FunctionOOPField(
name='weaponSlot',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="setPedWearingJetpack",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='setWearingJetpack',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='state',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to give or take a jetpack from a ped, it wont work if the ped is in a vehicle.\nAs such, you should either expect it to fail sometimes, or repeatedly try to give a jetpack every second or so until isPedWearingJetpack returns true. Alternatively, you can force the ped into a safe position (e.g. standing on the ground) before giving the jetpack, or use a pickup to handle it.}}' ,
arguments={
"thePed": """The ped you want to give a jetpack to. """,
"state": """A boolean representing whether to give or take the jetpack. """
},
result='returns true if a jetpack was successfully set for the ped, false if setting it failed.' ,
),
url='setPedWearingJetpack',
),
field=FunctionOOPField(
name='jetpack',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
client=[
],
),
CompoundOOPData(
server=[
FunctionOOP(
description="""Set the variable to nil to execute [[removePedFromVehicle]]""",
base_function_name="warpPedIntoVehicle",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='warpIntoVehicle',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
| |
np.rot90(self.im)
self.update_image()
def on_rotate(self):
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
position = self.canvas.get_points()
x = (position[0][0], position[1][0])
y = (position[0][1], position[1][1])
self.im = rotate_image(self.im, x, y)
self.update_image()
def update_image(self, fig=None, ax=None):
try:
self.verticalLayout_2.removeWidget(self.canvas)
self.canvas.setParent(None)
self.verticalLayout_2.removeWidget(self.navigation_toolbar)
self.navigation_toolbar.setParent(None)
self.show_image(fig, ax)
except:
pass
def window_adjust(self, channel):
self.min_sat = np.percentile(self.im[:, :, channel], 1)
self.max_sat = np.percentile(self.im[:, :, channel], 99)
def on_activated(self):
if self.channel_box.currentIndex() == 0:
self.window_adjust(0)
self.update_combo()
elif self.channel_box.currentIndex() == 1:
self.window_adjust(1)
self.update_combo()
elif self.channel_box.currentIndex() == 2:
self.window_adjust(2)
self.update_combo()
def update_combo(self):
self.channel = self.channel_box.currentIndex()
try:
self.verticalLayout_2.removeWidget(self.canvas)
self.canvas.setParent(None)
self.verticalLayout_2.removeWidget(self.navigation_toolbar)
self.navigation_toolbar.setParent(None)
self.show_image()
except:
pass
class Film2DoseToolbar(NavigationToolbar2):
def _init_toolbar(self):
super(Film2DoseToolbar, self)._init_toolbar()
def __init__(self, canvas, parent, coordinates=True):
super(Film2DoseToolbar, self).__init__(canvas, parent, coordinates)
self.rect = None
self.points = []
self.crop_points = []
self.crop_index = []
def home(self, *args):
super(Film2DoseToolbar, self).home(*args)
self.crop_points = []
def draw_rubberband(self, event, x0, y0, x1, y1):
# its img[y: y + h, x: x + w]
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
self.rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
self.canvas.drawRectangle(self.rect)
self.points.append([event.xdata, event.ydata])
def release_zoom(self, event):
super(Film2DoseToolbar, self).release_zoom(event)
if self.points:
x0, y0 = self.points[0][0], self.points[0][1]
x1, y1 = self.points[-1][0], self.points[-1][1]
self.crop_points.append([x0, y0, x1, y1])
self.points = []
self.get_crop_points()
def get_crop_points(self):
if self.crop_points:
return self.crop_points[-1]
class DoseConversionWidget(QtGui.QWidget, DoseConversionQT.Ui_Form_film2dose):
def __init__(self, parent=None):
super(DoseConversionWidget, self).__init__(parent)
self.setupUi(self)
self.file_name = ''
self.cal = None # calibration object
self.calib_data = {} # Calibration data
self.fig = None
self.channel = 0
self.eq = 0
self.crop_index = []
self.image_widget = EditImageWidget()
self.dose = None
self.dose_widget = None
self.method_titles = {'Single Channel': 'Single_Channel', 'Robust Average': 'Robust_Average',
'Robust Multichannel': 'Robust_RGB'}
self.method = 'Single_Channel'
self.setWindowTitle(
QtGui.QApplication.translate("Film2Dose", "Dose Conversion", None, QtGui.QApplication.UnicodeUTF8))
self.set_connections()
self.set_param()
def set_param(self):
self.file_name, _ = QtGui.QFileDialog.getOpenFileName(self, "Open calibration object",
QtCore.QDir.currentPath(),
"Film2Dose calibration object (*.fco)")
self.cal = load(self.file_name)
self.calib_data['doses'] = self.cal.doses
self.calib_data['cal_od'] = self.cal.cal_od
self.calib_data['eqt'] = self.cal.calib_data['eqt']
self.calib_data['sigparam'] = self.cal.sigparam
self.image_widget.read_image()
self.image_widget.show_image()
self.gridLayout_2.addWidget(self.image_widget, 3, 1, 1, 1)
def set_connections(self):
self.to_dose.clicked.connect(self.dose_conversion)
self.method_combo.activated[str].connect(self.on_method)
self.calib_button.clicked.connect(self.on_calib)
self.import_image.clicked.connect(self.on_import)
def _on_import(self):
self.gridLayout_2.removeWidget(self.dose_widget)
self.gridLayout_2.removeWidget(self.image_widget)
self.dose_widget = OptimizedDoseWidget()
self.image_widget = EditImageWidget()
self.image_widget.read_image()
self.image_widget.show_image()
self.gridLayout_2.addWidget(self.image_widget, 3, 1, 1, 1)
def on_calib(self):
self.file_name, _ = QtGui.QFileDialog.getOpenFileName(self, "Open calibration object",
QtCore.QDir.currentPath(),
"Film2Dose calibration object (*.fco)")
self.cal = load(self.file_name)
self.calib_data['doses'] = self.cal.doses
self.calib_data['cal_od'] = self.cal.cal_od
self.calib_data['eqt'] = self.cal.calib_data['eqt']
self.calib_data['sigparam'] = self.cal.sigparam
def on_method(self, txt):
self.method = self.method_titles[txt]
def dose_conversion(self):
self.dose = Model(self.cal, od2pixel(self.image_widget.im), self.image_widget.delta)
lat_corr = self.lateral_checkbox.isChecked()
delta = self.image_widget.delta
self.gridLayout_2.removeWidget(self.dose_widget)
self.dose_widget = OptimizedDoseWidget()
if self.method == 'Single_Channel':
self.dose_widget.set_method(self.method)
sc_dose = self.dose.single_channel_dose(lat_corr=lat_corr)
self.dose_widget.set_image(sc_dose, delta, 0, 'ftd-dose', self.calib_data)
self.dose_widget.show_image()
self.gridLayout_2.addWidget(self.dose_widget, 3, 2, 1, 1)
elif self.method == 'Robust_Average':
self.dose_widget.set_method(self.method)
rob_dose = self.dose.robust_average_dose(lat_corr=lat_corr)
self.dose_widget.set_image(rob_dose, delta, 0, 'ftd-dose', self.calib_data)
self.dose_widget.show_image()
self.gridLayout_2.addWidget(self.dose_widget, 3, 2, 1, 1)
elif self.method == 'Robust_RGB':
self.dose_widget.set_method(self.method)
rob_dose = self.dose.robust2dose(lat_corr=lat_corr)
self.dose_widget.set_image(rob_dose, delta, 0, 'ftd-dose', self.calib_data)
self.dose_widget.show_image()
self.gridLayout_2.addWidget(self.dose_widget, 3, 2, 1, 1)
def update_cropped(self):
crop_position = self.toolbar.get_crop_points()
if crop_position:
self.crop_index = get_crop(self.im, self.delta, crop_position)
def update_saturation(self):
self.update_cropped()
def on_import(self):
QtGui.QMessageBox.information(None, "Warning", "You already have an image!")
reply = QtGui.QMessageBox.question(self, "Warning",
"You already have an image! Do you want to import a different file?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No | QtGui.QMessageBox.Cancel)
if reply == QtGui.QMessageBox.Yes:
self._on_import()
elif reply == QtGui.QMessageBox.No:
self.questionLabel.setText("No")
else:
self.questionLabel.setText("Cancel")
class BatchDoseConversionWidget(DoseConversionWidget):
def __init__(self, parent=None):
self.image_files = []
self.pbar = None
super(BatchDoseConversionWidget, self).__init__(parent)
self.setWindowTitle(
QtGui.QApplication.translate("Dose Conversion", "Batch Dose Conversion", None,
QtGui.QApplication.UnicodeUTF8))
self.resize(400, 400)
def set_param(self):
self.file_name, _ = QtGui.QFileDialog.getOpenFileName(self, "Open calibration object",
QtCore.QDir.currentPath(),
"Film2Dose calibration object (*.fco)")
self.image_files, _ = QtGui.QFileDialog.getOpenFileNames(self, "Open 48 bits tiff Calibration Files",
QtCore.QDir.currentPath(),
"48 bit Tiff Files (*.tif)")
self.cal = load(self.file_name)
self.calib_data['doses'] = self.cal.doses
self.calib_data['cal_od'] = self.cal.cal_od
self.calib_data['eqt'] = self.cal.calib_data['eqt']
self.calib_data['sigparam'] = self.cal.sigparam
def dose_conversion(self):
lat_corr = self.lateral_checkbox.isChecked()
if self.pbar is not None:
self.pbar.close()
self.pbar = QtGui.QProgressBar(self)
if self.method == 'Single_Channel':
self.pbar.move(150, 150)
self.setWindowTitle('Optimizing')
self.pbar.setMinimum(0)
self.pbar.setMaximum(len(self.image_files))
self.pbar.show()
step = 0
for path in self.image_files:
# direct dose conversion
self.image_widget.read_image(path)
self.dose = Model(self.cal, od2pixel(self.image_widget.im), self.image_widget.delta)
sc_dose = self.dose.single_channel_dose(lat_corr=lat_corr)
step += 1
self.pbar.setValue(step)
im = Fim2DoseImage(sc_dose, self.dose.delta, 'ftd-dose', calib_data=self.calib_data)
im.set_calculation_method(self.method)
fileName, fileExtension = os.path.splitext(path)
file_name = fileName + '-dose.ftd'
save(im, file_name)
if self.method == 'Robust_Average':
self.pbar.move(150, 150)
self.setWindowTitle('Optimizing')
self.pbar.setMinimum(0)
self.pbar.setMaximum(len(self.image_files))
self.pbar.show()
step = 0
for path in self.image_files:
# direct dose conversion
self.image_widget.read_image(path)
self.dose = Model(self.cal, od2pixel(self.image_widget.im), self.image_widget.delta)
sc_dose = self.dose.robust_average_dose(lat_corr=lat_corr)
step += 1
self.pbar.setValue(step)
im = Fim2DoseImage(sc_dose, self.dose.delta, 'ftd-dose', calib_data=self.calib_data)
im.set_calculation_method(self.method)
fileName, fileExtension = os.path.splitext(path)
file_name = fileName + '-dose.ftd'
save(im, file_name)
if self.method == 'Robust_RGB':
self.pbar.move(150, 150)
self.setWindowTitle('Optimizing')
self.pbar.setMinimum(0)
self.pbar.setMaximum(len(self.image_files))
self.pbar.show()
step = 0
for path in self.image_files:
# direct dose conversion
self.image_widget.read_image(path)
self.dose = Model(self.cal, od2pixel(self.image_widget.im), self.image_widget.delta)
sc_dose = self.dose.robust2dose(lat_corr=lat_corr)
step += 1
self.pbar.setValue(step)
im = Fim2DoseImage(sc_dose, self.dose.delta, 'ftd-dose', calib_data=self.calib_data)
im.set_calculation_method(self.method)
fileName, fileExtension = os.path.splitext(path)
file_name = fileName + '-dose.ftd'
save(im, file_name)
# noinspection PyUnresolvedReferences
class FitWidget(QtGui.QWidget, FitCurvesQt.Ui_Form):
fit_done = QtCore.Signal(object)
def __init__(self, parent=None):
super(FitWidget, self).__init__(parent)
self.file_name = ''
self.cal = None
self.fig = None
self.channel = 0
self.eq = 0
self.setupUi(self)
self.set_connections()
def set_cal(self, cal):
self.cal = cal
def set_connections(self):
self.comboBox.activated.connect(self.on_activated)
self.radioButton_eq1.clicked.connect(self.on_radio1)
self.RadioButton_eq2.clicked.connect(self.on_radio2)
self.radioButton_3.clicked.connect(self.on_radio3)
self.select_curve.clicked.connect(self.curve_select)
self.finish_button.clicked.connect(self.save_calib)
def on_radio1(self):
self.update()
self.eq = 1
self.calib_channel()
self._show_image()
def on_radio2(self):
self.update()
self.eq = 2
self.calib_channel()
self._show_image()
def on_radio3(self):
self.update()
self.eq = 3
self.calib_channel()
self._show_image()
def set_param(self):
self.file_name, _ = QtGui.QFileDialog.getOpenFileName(self, "Open calibration object",
QtCore.QDir.currentPath(),
"Film2Dose calibration object (*.fco)")
self.cal = load(self.file_name)
def on_activated(self):
if self.comboBox.currentIndex() == 0:
self.update_combo()
elif self.comboBox.currentIndex() == 1:
self.update_combo()
elif self.comboBox.currentIndex() == 2:
self.update_combo()
def update_combo(self):
self.channel = self.comboBox.currentIndex()
self.update()
self.eq = 1
self.calib_channel()
self._show_image()
# self.radioButton_eq1.setChecked(True)
def calib_channel(self):
# print('Calibrating channel: ' + self.channels[channel])
try:
# self.cal.fit_curve(self.channel, self.eq)
self.fig = self.cal.show_curve(self.channel, self.eq)
except:
msg = "Check your calibration data!"
QtGui.QMessageBox.critical(self, "Incomplete calibration data",
msg,
QtGui.QMessageBox.Abort)
def _show_image(self):
"""
Shows a Matplotlib Canvas on a QT OptimizeCalibration windows using a Navigation Toolbar.
"""
self.figure_canvas = FigureCanvas(self.fig)
self.verticalLayout_8.addWidget(self.figure_canvas)
self.navigation_toolbar = NavigationToolbar2(self.figure_canvas, self)
self.navigation_toolbar.setIconSize(QtCore.QSize(46, 46))
self.verticalLayout_8.addWidget(self.navigation_toolbar, 0)
def curve_select(self):
channel_txt = self.comboBox.currentText()
if not all(self.cal.is_fitted.values()):
msg = "<p> Are you selecting Equation %s to %s ?</p>" % (self.eq, channel_txt)
reply = QtGui.QMessageBox.question(self, "Confirm your choice",
msg,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.fit_channel()
else:
cl = [key for key in self.cal.is_fitted.keys() if self.cal.is_fitted[key] is True]
tx = ' and '.join(cl)
message = "<p>There are selected equations for %s channel</p>" \
"<p>Do you want to select a different equations for each channel?</p>" % tx
reply = QtGui.QMessageBox.question(self, "Confirm your choice",
message,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.cal.reset()
self.fit_channel()
def fit_channel(self):
pol, sigma, R, df = self.cal.fit_curve(self.channel, self.eq)
self.cal.rgb_pol.append(np.flipud(pol)) # inverting coeff order
self.cal.fit_unc.append([sigma, R, df])
self.cal.eqt.append(self.eq)
self.cal.sigparam[self.channel] = self.cal.calc_uncertainty(self.channel, self.eq)
self.cal.calib_data[self.cal.channels[self.channel]] = self.cal.get_interp(self.eq, pol)
# setting channel calibrated
self.cal.is_fitted[self.cal.channels[self.channel]] = True
@QtCore.Slot()
def save_calib(self):
if all(self.cal.is_fitted.values()):
file_name, _ = QtGui.QFileDialog.getSaveFileName(None, "Save calibration object",
QtCore.QDir.currentPath(),
"Film2Dose calibration object (*.fco)")
self.cal.calib_rgb()
self.fit_done.emit(self.cal)
if file_name:
save(self.cal, file_name)
else:
cl = [key for key in self.cal.is_fitted.keys() if self.cal.is_fitted[key] is False]
tx = ' and '.join(cl)
message = "<p>You still need to select equations for %s channel</p>" % tx
QtGui.QMessageBox.information(None, "Missing Data", message)
@property
def fitted_cal(self):
return self.cal
def update(self):
try:
self.verticalLayout_8.removeWidget(self.figure_canvas)
self.figure_canvas.setParent(None)
self.verticalLayout_8.removeWidget(self.navigation_toolbar)
self.navigation_toolbar.setParent(None)
except:
pass
class MplCalibrationWidget(QtGui.QWidget):
def __init__(self, parent, fig, npoints=None):
QtGui.QWidget.__init__(self, parent)
self.canvas = MplCalibrationCanvas(fig)
# THESE TWO LINES WERE ADDED
self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)
self.canvas.setFocus()
# # Add a Main layout
self.l = QtGui.QVBoxLayout(parent)
self.l.addWidget(self.canvas)
self.navigation_toolbar = NavigationToolbar2(self.canvas, self)
self.navigation_toolbar.setIconSize(QtCore.QSize(46, 46))
self.l.addWidget(self.navigation_toolbar)
def get_points(self):
return self.canvas.get_points()
def add_table(self, table):
self.l.addWidget(table)
class CalibrationWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(CalibrationWidget, self).__init__(parent)
self.setWindowTitle(
QtGui.QApplication.translate("Film2Dose", "gafchromic film calibration", None,
QtGui.QApplication.UnicodeUTF8))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/scanner.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
self.mpl_widget = None
self.resize(1280, 800)
self.cal = None
self.tableWidget = QtGui.QTableWidget()
self.load_cal_images()
def load_cal_images(self):
file_name, _ = QtGui.QFileDialog.getOpenFileNames(self, "Open 48 bits tiff Calibration Files",
QtCore.QDir.currentPath(),
"48 bit Tiff Files (*.tif)")
actual_path = os.path.dirname(file_name[0])
channel = 0 # red
if file_name:
self.cal = Film2DoseCalibration(filename=file_name)
self.cal.read_image()
file_name, _ = QtGui.QFileDialog.getOpenFileName(self, "Open calibration doses file",
actual_path,
"txt Files (*.txt)")
doses = read_cal_doses(file_name)
self.cal.set_dose_points(doses)
cim = self.cal.calib_image[:, :, channel]
calim = np.flipud(cim)
# show image to calibrate
fig, ax = display_fig(calim, self.cal.delta, col_map='Greys', limits=(0, 1))
ax.set_title('Dose calibration points')
# Attach to Widget
self.mpl_widget = MplCalibrationWidget(self, fig)
self.show()
# get dose points Position
position = self.mpl_widget.get_points()
self.cal.set_points_position(position)
# fill Table Widget
self._fill_table()
self.mpl_widget.add_table(self.tableWidget)
file_name, _ | |
<gh_stars>0
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines external repositories needed by rules_webtesting."""
load("//web/internal:platform_http_file.bzl", "platform_http_file")
load("@bazel_gazelle//:deps.bzl", "go_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
# NOTE: URLs are mirrored by an asynchronous review process. They must
# be greppable for that to happen. It's OK to submit broken mirror
# URLs, so long as they're correctly formatted. Bazel's downloader
# has fast failover.
def web_test_repositories(**kwargs):
"""Defines external repositories required by Webtesting Rules.
This function exists for other Bazel projects to call from their WORKSPACE
file when depending on rules_webtesting using http_archive. This function
makes it easy to import these transitive dependencies into the parent
workspace. This will check to see if a repository has been previously defined
before defining a new repository.
Alternatively, individual dependencies may be excluded with an
"omit_" + name parameter. This is useful for users who want to be rigorous
about declaring their own direct dependencies, or when another Bazel project
is depended upon (e.g. rules_closure) that defines the same dependencies as
this one (e.g. com_google_guava.) Alternatively, a whitelist model may be
used by calling the individual functions this method references.
Please note that while these dependencies are defined, they are not actually
downloaded, unless a target is built that depends on them.
Args:
**kwargs: omit_... parameters used to prevent importing specific
dependencies.
"""
if should_create_repository("bazel_skylib", kwargs):
bazel_skylib()
if should_create_repository("com_github_blang_semver", kwargs):
com_github_blang_semver()
if should_create_repository("com_github_gorilla_context", kwargs):
com_github_gorilla_context()
if should_create_repository("com_github_gorilla_mux", kwargs):
com_github_gorilla_mux()
if should_create_repository("com_github_tebeka_selenium", kwargs):
com_github_tebeka_selenium()
if should_create_repository("com_github_urllib3", kwargs):
com_github_urllib3()
if should_create_repository("com_google_code_findbugs_jsr305", kwargs):
com_google_code_findbugs_jsr305()
if should_create_repository("com_google_code_gson", kwargs):
com_google_code_gson()
if should_create_repository(
"com_google_errorprone_error_prone_annotations",
kwargs,
):
com_google_errorprone_error_prone_annotations()
if should_create_repository("com_google_guava", kwargs):
com_google_guava()
if should_create_repository("com_squareup_okhttp3_okhttp", kwargs):
com_squareup_okhttp3_okhttp()
if should_create_repository("com_squareup_okio", kwargs):
com_squareup_okio()
if should_create_repository("commons_codec", kwargs):
commons_codec()
if should_create_repository("commons_logging", kwargs):
commons_logging()
if should_create_repository("junit", kwargs):
junit()
if should_create_repository("net_bytebuddy", kwargs):
net_bytebuddy()
if should_create_repository("org_apache_commons_exec", kwargs):
org_apache_commons_exec()
if should_create_repository("org_apache_httpcomponents_httpclient", kwargs):
org_apache_httpcomponents_httpclient()
if should_create_repository("org_apache_httpcomponents_httpcore", kwargs):
org_apache_httpcomponents_httpcore()
if should_create_repository("org_hamcrest_core", kwargs):
org_hamcrest_core()
if should_create_repository("org_jetbrains_kotlin_stdlib", kwargs):
org_jetbrains_kotlin_stdlib()
if should_create_repository("org_json", kwargs):
org_json()
if should_create_repository("org_seleniumhq_py", kwargs):
org_seleniumhq_py()
if should_create_repository("org_seleniumhq_selenium_api", kwargs):
org_seleniumhq_selenium_api()
if should_create_repository("org_seleniumhq_selenium_remote_driver", kwargs):
org_seleniumhq_selenium_remote_driver()
if kwargs.keys():
print("The following parameters are unknown: " + str(kwargs.keys()))
def should_create_repository(name, args):
"""Returns whether the name repository should be created.
This allows creation of a repository to be disabled by either an
"omit_" _+ name parameter or by previously defining a rule for the repository.
The args dict will be mutated to remove "omit_" + name.
Args:
name: The name of the repository that should be checked.
args: A dictionary that contains "omit_...": bool pairs.
Returns:
boolean indicating whether the repository should be created.
"""
key = "omit_" + name
if key in args:
val = args.pop(key)
if val:
return False
if native.existing_rule(name):
return False
return True
def browser_repositories(firefox = False, chromium = False, sauce = False):
"""Sets up repositories for browsers defined in //browsers/....
This should only be used on an experimental basis; projects should define
their own browsers.
Args:
firefox: Configure repositories for //browsers:firefox-native.
chromium: Configure repositories for //browsers:chromium-native.
sauce: Configure repositories for //browser/sauce:chrome-win10.
"""
if chromium:
org_chromium_chromedriver()
org_chromium_chromium()
if firefox:
org_mozilla_firefox()
org_mozilla_geckodriver()
if sauce:
com_saucelabs_sauce_connect()
def bazel_skylib():
http_archive(
name = "bazel_skylib",
sha256 = "",
strip_prefix = "bazel-skylib-e9fc4750d427196754bebb0e2e1e38d68893490a",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/archive/e9fc4750d427196754bebb0e2e1e38d68893490a.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/archive/e9fc4750d427196754bebb0e2e1e38d68893490a.tar.gz",
],
)
def com_github_blang_semver():
go_repository(
name = "com_github_blang_semver",
importpath = "github.com/blang/semver",
sha256 = "3d9da53f4c2d3169bfa9b25f2f36f301a37556a47259c870881524c643c69c57",
strip_prefix = "semver-3.5.1",
urls = [
"https://mirror.bazel.build/github.com/blang/semver/archive/v3.5.1.tar.gz",
"https://github.com/blang/semver/archive/v3.5.1.tar.gz",
],
)
def com_github_gorilla_context():
go_repository(
name = "com_github_gorilla_context",
importpath = "github.com/gorilla/context",
sha256 = "2dfdd051c238695bf9ebfed0bf6a8c533507ac0893bce23be5930e973736bb03",
strip_prefix = "context-1.1.1",
urls = [
"https://mirror.bazel.build/github.com/gorilla/context/archive/v1.1.1.tar.gz",
"https://github.com/gorilla/context/archive/v1.1.1.tar.gz",
],
)
def com_github_gorilla_mux():
go_repository(
name = "com_github_gorilla_mux",
importpath = "github.com/gorilla/mux",
sha256 = "0dc18fb09413efea7393e9c2bd8b5b442ce08e729058f5f7e328d912c6c3d3e3",
strip_prefix = "mux-1.6.2",
urls = [
"https://mirror.bazel.build/github.com/gorilla/mux/archive/v1.6.2.tar.gz",
"https://github.com/gorilla/mux/archive/v1.6.2.tar.gz",
],
)
def com_github_tebeka_selenium():
go_repository(
name = "com_github_tebeka_selenium",
importpath = "github.com/tebeka/selenium",
sha256 = "c506637fd690f4125136233a3ea405908b8255e2d7aa2aa9d3b746d96df50dcd",
strip_prefix = "selenium-a49cf4b98a36c2b21b1ccb012852bd142d5fc04a",
urls = [
"https://mirror.bazel.build/github.com/tebeka/selenium/archive/a49cf4b98a36c2b21b1ccb012852bd142d5fc04a.tar.gz",
"https://github.com/tebeka/selenium/archive/a49cf4b98a36c2b21b1ccb012852bd142d5fc04a.tar.gz",
],
)
def com_github_urllib3():
http_archive(
name = "com_github_urllib3",
build_file = str(Label("//build_files:com_github_urllib3.BUILD")),
sha256 = "a68ac5e15e76e7e5dd2b8f94007233e01effe3e50e8daddf69acfd81cb686baf",
strip_prefix = "urllib3-1.23",
urls = [
"https://files.pythonhosted.org/packages/3c/d2/dc5471622bd200db1cd9319e02e71bc655e9ea27b8e0ce65fc69de0dac15/urllib3-1.23.tar.gz",
],
)
def com_google_code_findbugs_jsr305():
java_import_external(
name = "com_google_code_findbugs_jsr305",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar",
"https://repo1.maven.org/maven2/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar",
],
jar_sha256 =
"766ad2a0783f2687962c8ad74ceecc38a28b9f72a2d085ee438b7813e928d0c7",
licenses = ["notice"], # BSD 3-clause
)
def com_google_code_gson():
java_import_external(
name = "com_google_code_gson",
jar_sha256 =
"233a0149fc365c9f6edbd683cfe266b19bdc773be98eabdaf6b3c924b48e7d81",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/code/gson/gson/2.8.5/gson-2.8.5.jar",
"https://repo1.maven.org/maven2/com/google/code/gson/gson/2.8.5/gson-2.8.5.jar",
],
licenses = ["notice"], # The Apache Software License, Version 2.0
)
def com_google_errorprone_error_prone_annotations():
java_import_external(
name = "com_google_errorprone_error_prone_annotations",
jar_sha256 =
"10a5949aa0f95c8de4fd47edfe20534d2acefd8c224f8afea1f607e112816120",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.3.1/error_prone_annotations-2.3.1.jar",
"https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.3.1/error_prone_annotations-2.3.1.jar",
],
licenses = ["notice"], # Apache 2.0
)
def com_google_guava():
java_import_external(
name = "com_google_guava",
jar_sha256 = "a0e9cabad665bc20bcd2b01f108e5fc03f756e13aea80abaadb9f407033bea2c",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/guava/guava/26.0-jre/guava-26.9-jre.jar",
"https://repo1.maven.org/maven2/com/google/guava/guava/26.0-jre/guava-26.0-jre.jar",
],
licenses = ["notice"], # Apache 2.0
exports = [
"@com_google_code_findbugs_jsr305",
"@com_google_errorprone_error_prone_annotations",
],
)
def com_saucelabs_sauce_connect():
platform_http_file(
name = "com_saucelabs_sauce_connect",
licenses = ["by_exception_only"], # SauceLabs EULA
amd64_sha256 = "dd53f2cdcec489fbc2443942b853b51bf44af39f230600573119cdd315ddee52",
amd64_urls = [
"https://saucelabs.com/downloads/sc-4.5.1-linux.tar.gz",
],
macos_sha256 = "920ae7bd5657bccdcd27bb596593588654a2820486043e9a12c9062700697e66",
macos_urls = [
"https://saucelabs.com/downloads/sc-4.5.1-osx.zip",
],
windows_sha256 =
"ec11b4ee029c9f0cba316820995df6ab5a4f394053102e1871b9f9589d0a9eb5",
windows_urls = [
"https://saucelabs.com/downloads/sc-4.4.12-win32.zip",
],
)
def com_squareup_okhttp3_okhttp():
java_import_external(
name = "com_squareup_okhttp3_okhttp",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/squareup/okhttp3/okhttp/3.9.1/okhttp-3.9.1.jar",
"https://repo1.maven.org/maven2/com/squareup/okhttp3/okhttp/3.9.1/okhttp-3.9.1.jar",
],
jar_sha256 =
"a0d01017a42bba26e507fc6d448bb36e536f4b6e612f7c42de30bbdac2b7785e",
licenses = ["notice"], # Apache 2.0
deps = [
"@com_squareup_okio",
"@com_google_code_findbugs_jsr305",
],
)
def com_squareup_okio():
java_import_external(
name = "com_squareup_okio",
jar_sha256 = "79b948cf77504750fdf7aeaf362b5060415136ab6635e5113bd22925e0e9e737",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/squareup/okio/okio/2.0.0/okio-2.0.0.jar",
"https://repo1.maven.org/maven2/com/squareup/okio/okio/2.0.0/okio-2.0.0.jar",
],
licenses = ["notice"], # Apache 2.0
deps = [
"@com_google_code_findbugs_jsr305",
"@org_jetbrains_kotlin_stdlib",
],
)
def commons_codec():
java_import_external(
name = "commons_codec",
jar_sha256 =
"e599d5318e97aa48f42136a2927e6dfa4e8881dff0e6c8e3109ddbbff51d7b7d",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/commons-codec/commons-codec/1.11/commons-codec-1.11.jar",
"https://repo1.maven.org/maven2/commons-codec/commons-codec/1.11/commons-codec-1.11.jar",
],
licenses = ["notice"], # Apache License, Version 2.0
)
def commons_logging():
java_import_external(
name = "commons_logging",
jar_sha256 =
"daddea1ea0be0f56978ab3006b8ac92834afeefbd9b7e4e6316fca57df0fa636",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/commons-logging/commons-logging/1.2/commons-logging-1.2.jar",
"https://repo1.maven.org/maven2/commons-logging/commons-logging/1.2/commons-logging-1.2.jar",
],
licenses = ["notice"], # The Apache Software License, Version 2.0
)
def junit():
java_import_external(
name = "junit",
jar_sha256 =
"59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Eclipse Public License 1.0
testonly_ = 1,
deps = ["@org_hamcrest_core"],
)
def net_bytebuddy():
java_import_external(
name = "net_bytebuddy",
jar_sha256 = "4b87ad52a8f64a1197508e176e84076584160e3d65229ff757efee870cd4a8e2",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/net/bytebuddy/byte-buddy/1.8.19/byte-buddy-1.8.19.jar",
"https://repo1.maven.org/maven2/net/bytebuddy/byte-buddy/1.8.19/byte-buddy-1.8.19.jar",
],
licenses = ["notice"], # Apache 2.0
deps = ["@com_google_code_findbugs_jsr305"],
)
def org_apache_commons_exec():
java_import_external(
name = "org_apache_commons_exec",
jar_sha256 =
"cb49812dc1bfb0ea4f20f398bcae1a88c6406e213e67f7524fb10d4f8ad9347b",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/org/apache/commons/commons-exec/1.3/commons-exec-1.3.jar",
"https://repo1.maven.org/maven2/org/apache/commons/commons-exec/1.3/commons-exec-1.3.jar",
],
licenses = ["notice"], # Apache License, Version 2.0
)
def org_apache_httpcomponents_httpclient():
java_import_external(
name = "org_apache_httpcomponents_httpclient",
jar_sha256 =
"c03f813195e7a80e3608d0ddd8da80b21696a4c92a6a2298865bf149071551c7",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/org/apache/httpcomponents/httpclient/4.5.6/httpclient-4.5.6.jar",
"https://repo1.maven.org/maven2/org/apache/httpcomponents/httpclient/4.5.6/httpclient-4.5.6.jar",
],
licenses = ["notice"], # Apache License, Version 2.0
deps = [
"@org_apache_httpcomponents_httpcore",
"@commons_logging",
"@commons_codec",
],
)
def org_apache_httpcomponents_httpcore():
java_import_external(
name = "org_apache_httpcomponents_httpcore",
jar_sha256 =
"1b4a1c0b9b4222eda70108d3c6e2befd4a6be3d9f78ff53dd7a94966fdf51fc5",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/org/apache/httpcomponents/httpcore/4.4.9/httpcore-4.4.9.jar",
"https://repo1.maven.org/maven2/org/apache/httpcomponents/httpcore/4.4.9/httpcore-4.4.9.jar",
],
licenses = ["notice"], # Apache License, Version 2.0
)
def org_chromium_chromedriver():
platform_http_file(
name = "org_chromium_chromedriver",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
amd64_sha256 =
"71eafe087900dbca4bc0b354a1d172df48b31a4a502e21f7c7b156d7e76c95c7",
amd64_urls = [
"https://chromedriver.storage.googleapis.com/2.41/chromedriver_linux64.zip",
],
macos_sha256 =
"fd32a27148f44796a55f5ce3397015c89ebd9f600d9dda2bcaca54575e2497ae",
macos_urls = [
"https://chromedriver.storage.googleapis.com/2.41/chromedriver_mac64.zip",
],
windows_sha256 =
"a8fa028acebef7b931ef9cb093f02865f9f7495e49351f556e919f7be77f072e",
windows_urls = [
"https://chromedriver.storage.googleapis.com/2.38/chromedriver_win32.zip",
],
)
def org_chromium_chromium():
platform_http_file(
name = "org_chromium_chromium",
licenses = ["notice"], # BSD 3-clause (maybe more?)
amd64_sha256 =
"6933d0afce6e17304b62029fbbd246cbe9e130eb0d90d7682d3765d3dbc8e1c8",
amd64_urls = [
"https://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux_x64/561732/chrome-linux.zip",
],
macos_sha256 =
"084884e91841a923d7b6e81101f0105bbc3b0026f9f6f7a3477f5b313ee89e32",
macos_urls = [
"https://commondatastorage.googleapis.com/chromium-browser-snapshots/Mac/561733/chrome-mac.zip",
],
windows_sha256 =
"d1bb728118c12ea436d8ea07dba980789e7d860aa664dd1fad78bc20e8d9391c",
windows_urls = [
"https://commondatastorage.googleapis.com/chromium-browser-snapshots/Win_x64/540270/chrome-win32.zip",
],
)
def org_hamcrest_core():
java_import_external(
name = "org_hamcrest_core",
jar_sha256 =
"66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = 1,
)
def org_jetbrains_kotlin_stdlib():
java_import_external(
name = "org_jetbrains_kotlin_stdlib",
jar_sha256 = "62eaf9cc6e746cef4593abe7cdb4dd48694ef5f817c852e0d9fbbd11fcfc564e",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.2.61/kotlin-stdlib-1.2.61.jar",
"https://repo1.maven.org/maven2/org/jetbrains/kotlin/kotlin-stdlib/1.2.61/kotlin-stdlib-1.2.61.jar",
],
licenses = ["notice"], # The Apache Software License, Version 2.0
)
def org_json():
java_import_external(
name = "org_json",
jar_sha256 = "518080049ba83181914419d11a25d9bc9833a2d729b6a6e7469fa52851356da8",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/org/json/json/20180813/json-20180813.jar",
"https://repo1.maven.org/maven2/org/json/json/20180813/json-20180813.jar",
],
licenses = ["notice"], # MIT-style license
)
def org_mozilla_firefox():
platform_http_file(
name = "org_mozilla_firefox",
licenses = ["reciprocal"], # MPL 2.0
amd64_sha256 =
"3a729ddcb1e0f5d63933177a35177ac6172f12edbf9fbbbf45305f49333608de",
amd64_urls = [
"https://mirror.bazel.build/ftp.mozilla.org/pub/firefox/releases/61.0.2/linux-x86_64/en-US/firefox-61.0.2.tar.bz2",
"https://ftp.mozilla.org/pub/firefox/releases/61.0.2/linux-x86_64/en-US/firefox-61.0.2.tar.bz2",
],
macos_sha256 =
"bf23f659ae34832605dd0576affcca060d1077b7bf7395bc9874f62b84936dc5",
macos_urls = [
"https://mirror.bazel.build/ftp.mozilla.org/pub/firefox/releases/61.0.2/mac/en-US/Firefox%2061.0.2.dmg",
"https://ftp.mozilla.org/pub/firefox/releases/61.0.2/mac/en-US/Firefox%2061.0.2.dmg",
],
)
def org_mozilla_geckodriver():
platform_http_file(
name = "org_mozilla_geckodriver",
licenses = ["reciprocal"], # MPL 2.0
amd64_sha256 =
"c9ae92348cf00aa719be6337a608fae8304691a95668e8e338d92623ba9e0ec6",
amd64_urls = [
"https://mirror.bazel.build/github.com/mozilla/geckodriver/releases/download/v0.21.0/geckodriver-v0.21.0-linux64.tar.gz",
"https://github.com/mozilla/geckodriver/releases/download/v0.21.0/geckodriver-v0.21.0-linux64.tar.gz",
],
macos_sha256 =
"ce4a3e9d706db94e8760988de1ad562630412fa8cf898819572522be584f01ce",
macos_urls = [
"https://mirror.bazel.build/github.com/mozilla/geckodriver/releases/download/v0.21.0/geckodriver-v0.21.0-macos.tar.gz",
"https://github.com/mozilla/geckodriver/releases/download/v0.21.0/geckodriver-v0.21.0-macos.tar.gz",
],
)
def org_seleniumhq_py():
http_archive(
name = "org_seleniumhq_py",
build_file = str(Label("//build_files:org_seleniumhq_py.BUILD")),
sha256 = "f9ca21919b564a0a86012cd2177923e3a7f37c4a574207086e710192452a7c40",
strip_prefix = "selenium-3.14.0",
urls = [
"https://files.pythonhosted.org/packages/af/7c/3f76140976b1c8f8a6b437ccd1f04efaed37bdc2600530e76ba981c677b9/selenium-3.14.0.tar.gz",
],
)
def org_seleniumhq_selenium_api():
java_import_external(
name = "org_seleniumhq_selenium_api",
jar_sha256 = "1fc941f86ba4fefeae9a705c1468e65beeaeb63688e19ad3fcbda74cc883ee5b",
jar_urls | |
%s!=%s",
binascii.hexlify(R9.PROTOCOL_VERSION_BYTES),
binascii.hexlify(protocolconf))
return False
b64payload = retdata[39:-8]
hashconf = self._get_md5_hash(b64payload)
hashorig = retdata[20:39]
if hashconf != hashorig:
_LOGGER.warning("CheckResp md5 %s!=%s", binascii.hexlify(hashorig), binascii.hexlify(hashconf))
return False
try:
cryptpayload = b64decode(b64payload)
except BaseException as ex:
_LOGGER.warning("CheckResp b64 %s %s", str(ex), binascii.hexlify(b64payload))
return False
try:
payload = self._cipher.decrypt(cryptpayload)
payload = R9._unpad(payload)
except BaseException as ex:
_LOGGER.warning("CheckResp decry %s %s", str(ex), binascii.hexlify(cryptpayload))
return False
try:
jsonstr = payload.decode('utf-8')
except BaseException as ex:
_LOGGER.warning("CheckResp decode %s %s", str(ex), binascii.hexlify(payload))
return False
try:
jsondec = json.loads(jsonstr)
except BaseException as ex:
_LOGGER.warning("CheckResp jsonp %s %s", str(ex), jsonstr)
return False
if not len(command_in_dict):
return jsondec
if "dps" not in jsondec or "1" not in jsondec["dps"]:
_LOGGER.warning("CheckResp struct %s", jsondec)
return False
if jsondec["dps"]["1"] != command_in_dict:
_LOGGER.warning("CheckResp command %s!=%s", command_in_dict, jsondec["dps"]["1"])
return False
return jsondec
def _check_ping_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.PING_RESP_COMMAND)
if dictok:
return CD_RETURN_IMMEDIATELY, retdata
else:
return CD_CONTINUE_WAITING, None
def _check_ask_last_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.ASK_LAST_RESP_COMMAND, status_ok=[0, 1])
if dictok:
payload = retdata[20:-8]
try:
jsonstr = payload.decode('utf-8')
except BaseException as ex:
_LOGGER.warning("CheckResp decode %s %s", str(ex), binascii.hexlify(payload))
return CD_CONTINUE_WAITING, None
if jsonstr.find("json obj") >= 0:
return CD_RETURN_IMMEDIATELY, {"devId": self._id}
try:
jsondec = json.loads(jsonstr)
except BaseException as ex:
_LOGGER.warning("CheckResp jsonp %s %s", str(ex), jsonstr)
return CD_CONTINUE_WAITING, None
if ("devId" in jsondec and jsondec['devId'] == self._id) or\
("gwId" in jsondec and jsondec['gwId'] == self._id):
return CD_RETURN_IMMEDIATELY, jsondec
return CD_CONTINUE_WAITING, None
async def ask_last(self, timeout=-1, retry=2):
"""!
Sends ping to R9 object to get last command. This command is sent not crypted
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [dict|NoneType] On successful send, the decoded confirmation dict obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.ASK_LAST_COMMAND, self._get_ask_last_bytes())
return await self._tcp_protocol(pld, self._check_ask_last_resp, timeout, retry)
async def ping(self, timeout=-1, retry=2):
"""!
Sends ping to R9 object to see if it is online
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [bytes|NoneType] On successful send, bytes got from R9 are returned; None otherwise.
"""
pld = self._get_payload_bytes(R9.PING_COMMAND, {})
return await self._tcp_protocol(pld, self._check_ping_resp, timeout, retry)
def _check_study_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.STUDY_RESP_COMMAND, "study")
if dictok:
return CD_RETURN_IMMEDIATELY, dictok
else:
return CD_CONTINUE_WAITING, None
def _check_study_key_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.STUDY_KEY_RESP_1_COMMAND)
if dictok:
return CD_RETURN_IMMEDIATELY, retdata
else:
return CD_CONTINUE_WAITING, None
def _check_study_exit_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.STUDY_EXIT_RESP_COMMAND, "study_exit")
if dictok:
return CD_RETURN_IMMEDIATELY, dictok
else:
return CD_CONTINUE_WAITING, None
async def emit_ir(self, keybytes, timeout=-1, retry=3):
"""!
Sends ir to the R9 device
@param keybytes: [bytes] key to be emitted by R9 device. The key should be a byte object that represents lirc/arduino format array of little-endian shorts.
This is the same format obtained with the learning process
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [bytes|NoneType] On successful send, the array of bytes obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.STUDY_KEY_COMMAND, self._get_study_key_dict(keybytes))
return await self._tcp_protocol(pld, self._check_study_key_resp, timeout, retry)
async def enter_learning_mode(self, timeout=-1, retry=3):
"""!
Puts R9 in learning mode
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [dict|NoneType] On successful send, the decoded confirmation dict obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.STUDY_COMMAND, self._get_study_dict())
return await self._tcp_protocol(pld, self._check_study_resp, timeout, retry)
async def exit_learning_mode(self, timeout=-1, retry=3):
"""!
Exits R9 learning mode
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [dict|NoneType] On successful send, the decoded confirmation dict obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.STUDY_EXIT_COMMAND, self._get_study_exit_dict())
return await self._tcp_protocol(pld, self._check_study_exit_resp, timeout, retry)
def _check_learned_key(self, retdata):
dictok = self._generic_check_resp(retdata, R9.LEARNED_COMMAND, "")
if dictok:
_LOGGER.debug("Learned dict %s", dictok)
if "dps" not in dictok or "2" not in dictok["dps"]:
_LOGGER.warning("CheckResp not2 %s", dictok)
return CD_ABORT_AND_RETRY, None
try:
keydec = b64decode(dictok["dps"]["2"].encode())
except BaseException as ex:
_LOGGER.warning("CheckResp invalidkey %s %s", dictok, str(ex))
return CD_ABORT_AND_RETRY, None
return CD_RETURN_IMMEDIATELY, keydec
else:
return CD_CONTINUE_WAITING, None
async def get_learned_key(self, timeout=30):
"""!
Puts R9 in learning mode
@param timeout: [int] timeout to be used in TCP communication (optional). Default value is 30 seconds. If awaited, this method will block until a key is not received or
timeout seconds have been passed
@return [bytes|NoneType] On successful key reception, the byte object representing the learned key is returned. this can be used with emit_ir function for future key sending. It returns
None on error or on timeout (no key was pressed/detected)
"""
return await self._tcp_protocol(None, self._check_learned_key, timeout, 1)
async def _tcp_protocol(self, data, check_data_fun, timeout=-1, retry=1):
lstdata = []
if timeout < 0:
timeout = self._timeout
for _ in range(retry):
try:
passed = 0
starttime = time.time()
if await asyncio.wait_for(self._init_connection(), timeout):
if data:
self._writer.write(data)
await self._writer.drain()
self._contime = time.time()
self._pktnum += 1
while passed < timeout:
try:
rec_data = await asyncio.wait_for(self._reader.read(4096), timeout-passed)
# _LOGGER.info("Received[%s:%d][%d] %s",*self._hp,len(rec_data),binascii.hexlify(rec_data))
rv, rec_data = check_data_fun(rec_data)
if rv == CD_RETURN_IMMEDIATELY:
return rec_data
elif rv == CD_ABORT_AND_RETRY:
break
elif rv == CD_ADD_AND_CONTINUE_WAITING:
lstdata.append(rec_data)
except asyncio.TimeoutError:
_LOGGER.warning("Protocol[%s:%d] timeout", *self._hp)
break
passed = time.time()-starttime
if lstdata:
return lstdata
elif not data:
break
except asyncio.TimeoutError:
_LOGGER.warning("Protocol[%s:%d] connecting timeout", *self._hp)
await self.destroy_connection()
except BaseException as ex:
_LOGGER.warning("Protocol[%s:%d] error %s", *self._hp, str(ex))
await self.destroy_connection()
await self.destroy_connection()
return None
def _prepare_payload(self, dictjson):
txtjs = json.dumps(dictjson)
_LOGGER.debug("Send Schema (%d) %s", len(txtjs), txtjs)
txtjs = R9._pad(txtjs).encode()
crypted_text = self._cipher.encrypt(txtjs)
_LOGGER.debug("Cipher (%d) %s", len(crypted_text), binascii.hexlify(crypted_text).decode('utf-8'))
cifenc = b64encode(crypted_text)
_LOGGER.debug("B64 cipher (%d) %s", len(cifenc), cifenc.decode('utf-8'))
return cifenc
def _generic_fill_dict(self, filld):
filld["devId"] = self._id
filld['t'] = int(time.time())
filld['uid'] = self._uid
return filld
def _get_payload_bytes(self, command, filled_dict):
if not filled_dict:
pldall = bytes()
elif isinstance(filled_dict, dict):
pld = self._prepare_payload(filled_dict)
md5bytes = self._get_md5_hash(pld)
pldall = md5bytes+pld
else:
pldall = filled_dict
ln = len(pldall)+16-8
docrc = b'\x00\x00\x55\xAA' + struct.pack('>I', self._pktnum) + struct.pack('>I', command) + struct.pack('>I', ln) + pldall
crcbytes = struct.pack('>I', R9.crc32(docrc))
complete = docrc + crcbytes + b'\x00\x00\xAA\x55'
_LOGGER.debug("Comp packet (%d) %s", len(complete), binascii.hexlify(complete).decode('utf-8'))
return complete
def _get_study_key_dict(self, keybytes):
R9.STUDY_KEY_DICT["dps"]["7"] = b64encode(keybytes).decode('utf8')
return self._generic_fill_dict(R9.STUDY_KEY_DICT)
def _get_study_dict(self):
return self._generic_fill_dict(R9.STUDY_DICT)
def _get_ask_last_bytes(self):
R9.ASK_LAST_DICT["devId"] = self._id
R9.ASK_LAST_DICT["gwId"] = self._id
return json.dumps(R9.ASK_LAST_DICT).encode()
def _get_study_exit_dict(self):
return self._generic_fill_dict(R9.STUDY_EXIT_DICT)
def _get_md5_hash(self, payload_bytes):
preMd5String = b'data=' + payload_bytes + b'||lpv=' + R9.PROTOCOL_VERSION_BYTES + b'||' + self._key
m = md5()
m.update(preMd5String)
# print(repr(m.digest()))
hexdigest = m.hexdigest()
s = hexdigest[8:][:16]
_LOGGER.debug("Computed md5 %s", s)
return R9.PROTOCOL_VERSION_BYTES+s.encode()
if __name__ == '__main__': # pragma: no cover
import sys
import logging
async def testFake(n):
for i in range(n):
_LOGGER.debug("Counter is %d", i)
await asyncio.sleep(1)
async def ping_test(*args):
a = R9((args[2], DEFAULT_PORT), args[3], args[4])
rv = await a.ping()
if rv:
_LOGGER.info("Ping OK %s", binascii.hexlify(rv))
else:
_LOGGER.warning("Ping failed")
await a.destroy_connection()
async def ask_last_test(*args):
a = R9((args[2], DEFAULT_PORT), args[3], args[4])
rv = await a.ask_last()
if rv:
_LOGGER.info("Ask last OK %s", rv)
else:
_LOGGER.warning("Ask last failed")
await a.destroy_connection()
async def discovery_test(*args):
rv = await R9.discovery(int(args[2]))
if rv:
_LOGGER.info("Discovery OK %s", rv)
else:
_LOGGER.warning("Discovery failed")
async def emit_test(*args):
import re
mo = re.search('^[a-fA-F0-9]+$', args[5])
if mo:
payload = binascii.unhexlify(args[5])
else:
payload = b64decode(args[5])
a = R9((args[2], DEFAULT_PORT), args[3], args[4])
rv = await a.emit_ir(payload)
if rv:
_LOGGER.info("Emit OK %s", binascii.hexlify(rv).decode('utf-8'))
else:
_LOGGER.warning("Emit failed")
await a.destroy_connection()
| |
# Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains unittests for vermouth.ismags.
"""
# no-member because module networkx does indeed have a member isomorphism;
# redefined-outer-name because pylint does not like fixtures;
# protected-access because it's tests.
# pylint: disable=no-member, redefined-outer-name, protected-access
from pprint import pprint
from time import perf_counter
from hypothesis import given, note, settings, event
import hypothesis.strategies as st
from hypothesis_networkx import graph_builder
import networkx as nx
import pytest
import vermouth.ismags
from vermouth.graph_utils import categorical_maximum_common_subgraph as MCS
from .helper_functions import make_into_set
def basic_molecule(node_data, edge_data=None):
"""
Construct a simple Molecule based with specified nodes and edges.
"""
if edge_data is None:
edge_data = {}
mol = vermouth.Molecule()
for idx, node in enumerate(node_data):
mol.add_node(idx, **node)
for (idx, jdx), data in edge_data.items():
mol.add_edge(idx, jdx, **data)
return mol
@pytest.fixture(params=[
(
[(0, dict(name='a')),
(1, dict(name='a')),
(2, dict(name='b')),
(3, dict(name='b')),
(4, dict(name='a')),
(5, dict(name='a'))],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]
),
(
range(1, 5),
[(1, 2), (2, 4), (4, 3), (3, 1)]
),
(
[],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 0), (0, 6), (6, 7),
(2, 8), (8, 9), (4, 10), (10, 11)]
),
(
[],
[(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 6)]
),
(
[],
[(0, 1), (0, 5), (1, 2), (1, 3), (1, 4), (1, 6), (2, 7)]
),
(
[],
nx.petersen_graph().edges,
),
(
# Gnarly edgecase from Fabian. Coupling node 4 to 8 mean that 5 and 9
# are no longer equivalent, pushing them in their own partitions.
# However, {5, 9} was considered equivalent to {13, 17}, which is *not*
# taken into account in the second refinement, tripping a (former)
# assertion failure. Note that this is actually the minimal failing
# example.
[],
[(0, 3), (0, 4), (4, 5), (0, 8), (8, 9), (3, 12), (12, 13), (3, 16), (16, 17),]
)
])
def graphs(request):
"""
Some simple, symmetric graphs
"""
nodes, edges = request.param
graph = nx.Graph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
def test_symmetric_self_isomorphism(graphs):
"""
Make sure that when considering symmetry, there is only one isomorphism
between a graph and itself
"""
ismags = vermouth.ismags.ISMAGS(graphs, graphs)
iso = list(ismags.find_isomorphisms(True))
assert make_into_set(iso) == make_into_set([{n: n for n in graphs}])
graph_matcher = nx.isomorphism.GraphMatcher(graphs, graphs)
nx_answer = list(graph_matcher.isomorphisms_iter())
assert make_into_set(iso) <= make_into_set(nx_answer)
def test_asymmetric_self_isomorphism(graphs):
"""
Compare with reference implementation
"""
ismags = vermouth.ismags.ISMAGS(graphs, graphs)
ismags_answer = list(ismags.find_isomorphisms(False))
graph_matcher = nx.isomorphism.GraphMatcher(graphs, graphs)
nx_answer = list(graph_matcher.isomorphisms_iter())
assert make_into_set(ismags_answer) == make_into_set(nx_answer)
def test_broken_edgecase():
"""
In this edgecase the ordering of the nodes matters for the symmetries
found. This is a bad thing. It happens, because _refine_node_partitions in
_couple_nodes does *not* switch node orders, causing it to produce an
invalid coupling, losing out on a permutation.
"""
graph = nx.Graph()
nx.add_path(graph, range(5))
graph.add_edges_from([(2, 5), (5, 6)])
ismags = vermouth.ismags.ISMAGS(graph, graph)
ismags_answer = list(ismags.find_isomorphisms(True))
assert len(ismags_answer) == 1
graph = nx.relabel_nodes(graph, {0: 0, 1: 1, 2: 2, 3: 3, 4: 6, 5: 4, 6: 5})
ismags = vermouth.ismags.ISMAGS(graph, graph)
ismags_answer = list(ismags.find_isomorphisms(True))
assert len(ismags_answer) == 1
# no-value-for-parameter because `draw` is not explicitely passed;
# no-member because module networkx does indeed have a member isomorphism.
# pylint: disable=no-value-for-parameter, no-member
MAX_NODES = 5
ATTRNAMES = ['attr1', 'attr2']
NODE_DATA = st.dictionaries(keys=st.sampled_from(ATTRNAMES),
values=st.integers(min_value=0, max_value=MAX_NODES))
ATTRS = st.lists(st.sampled_from(ATTRNAMES), unique=True, min_size=0, max_size=2)
ISO_DATA = st.dictionaries(keys=st.sampled_from(ATTRNAMES),
values=st.integers(max_value=MAX_NODES, min_value=0))
ISO_BUILDER = graph_builder(node_data=ISO_DATA, min_nodes=0, max_nodes=MAX_NODES,
edge_data=ISO_DATA,
node_keys=st.integers(max_value=MAX_NODES, min_value=0))
MCS_BUILDER = graph_builder(node_data=ISO_DATA, min_nodes=0, max_nodes=5,
node_keys=st.integers(max_value=MAX_NODES, min_value=0))
@settings(deadline=500)
@given(subgraph=ISO_BUILDER, attrs=st.one_of(st.none(), ATTRS))
def test_hypo_symmetric_self_isomorphism(subgraph, attrs):
"""
Make sure that when considering symmetry, there is only one isomorphism
between a graph and itself
"""
if attrs is None:
node_match = lambda n1, n2: True
else:
node_match = nx.isomorphism.categorical_node_match(attrs, [None]*len(attrs))
note(("Graph nodes", subgraph.nodes(data=True)))
note(("Graph edges", subgraph.edges(data=True)))
ismags = vermouth.ismags.ISMAGS(subgraph, subgraph, node_match=node_match,
edge_match=node_match)
found = make_into_set(ismags.find_isomorphisms(True))
note(("Found", found))
assert found == make_into_set([{n: n for n in subgraph}])
@settings(deadline=500)
@given(graph=ISO_BUILDER, subgraph=ISO_BUILDER, attrs=st.one_of(st.none(), ATTRS))
def test_isomorphism_nonmatch(graph, subgraph, attrs):
"""
Test against networkx reference implementation using graphs that are
probably not subgraphs without considering symmetry.
"""
if attrs is None:
node_match = lambda n1, n2: True
else:
node_match = nx.isomorphism.categorical_node_match(attrs, [None]*len(attrs))
note(("Graph nodes", graph.nodes(data=True)))
note(("Graph edges", graph.edges(data=True)))
note(("Subgraph nodes", subgraph.nodes(data=True)))
note(("Subgraph edges", subgraph.edges(data=True)))
ref_time = perf_counter()
matcher = nx.isomorphism.GraphMatcher(graph, subgraph, node_match=node_match,
edge_match=node_match)
expected = make_into_set(matcher.subgraph_isomorphisms_iter())
ref_time -= perf_counter()
a_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match,
edge_match=node_match)
asymmetric = make_into_set(ismags.find_isomorphisms(False))
a_ism_time -= perf_counter()
s_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match,
edge_match=node_match)
symmetric = make_into_set(ismags.find_isomorphisms(True))
s_ism_time -= perf_counter()
note(("Symmetric", symmetric))
note(("Asymmetric", asymmetric))
note(("Expected", expected))
if a_ism_time < ref_time:
event('Asymmetric ISMAGS faster than reference')
if s_ism_time < a_ism_time:
event('Symmetric ISMAGS faster than asymmetric')
if s_ism_time < ref_time:
event('Symmetric ISMAGS faster than reference')
assert asymmetric == expected
assert symmetric <= asymmetric
if symmetric == asymmetric and expected:
assert ismags.analyze_symmetry(subgraph,
ismags._sgn_partitions,
ismags._sge_colors) == ([], {})
elif symmetric != asymmetric:
assert ismags.analyze_symmetry(subgraph, ismags._sgn_partitions,
ismags._sge_colors) != ([], {})
@settings(deadline=500)
@given(st.data())
def test_isomorphism_match(data):
"""
Test against networkx reference implementation using graphs that are
subgraphs without considering symmetry.
"""
attrs = data.draw(st.one_of(st.none(), ATTRS))
if attrs is None:
node_match = lambda n1, n2: True
else:
node_match = nx.isomorphism.categorical_node_match(attrs, [None]*len(attrs))
graph = data.draw(ISO_BUILDER)
if graph:
nodes = data.draw(st.sets(st.sampled_from(list(graph.nodes)),
max_size=len(graph)))
else:
nodes = []
subgraph = graph.subgraph(nodes)
note(("Graph nodes", graph.nodes(data=True)))
note(("Graph edges", graph.edges(data=True)))
note(("Subgraph nodes", subgraph.nodes(data=True)))
note(("Subgraph edges", subgraph.edges(data=True)))
ref_time = perf_counter()
matcher = nx.isomorphism.GraphMatcher(graph, subgraph, node_match=node_match,
edge_match=node_match)
expected = make_into_set(matcher.subgraph_isomorphisms_iter())
ref_time -= perf_counter()
a_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match,
edge_match=node_match)
asymmetric = make_into_set(ismags.find_isomorphisms(False))
a_ism_time -= perf_counter()
s_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match,
edge_match=node_match)
symmetric = make_into_set(ismags.find_isomorphisms(True))
s_ism_time -= perf_counter()
note(("Symmetric", symmetric))
note(("Asymmetric", asymmetric))
note(("Expected", expected))
if a_ism_time < ref_time:
event('Asymmetric ISMAGS faster than reference')
if s_ism_time < a_ism_time:
event('Symmetric ISMAGS faster than asymmetric')
if s_ism_time < ref_time:
event('Symmetric ISMAGS faster than reference')
assert asymmetric == expected
assert symmetric <= asymmetric
if symmetric == asymmetric and expected:
assert ismags.analyze_symmetry(subgraph,
ismags._sgn_partitions,
ismags._sge_colors) == ([], {})
elif symmetric != asymmetric:
assert ismags.analyze_symmetry(subgraph, ismags._sgn_partitions,
ismags._sge_colors) != ([], {})
@settings(deadline=500)
@given(graph=MCS_BUILDER, subgraph=MCS_BUILDER, attrs=st.one_of(st.none(), ATTRS))
def test_mcs_nonmatch(graph, subgraph, attrs):
"""
Test against networkx reference implementation using graphs that are
probably not subgraphs without considering symmetry.
"""
if attrs is None:
node_match = lambda n1, n2: True
attrs = []
else:
node_match = nx.isomorphism.categorical_node_match(attrs, [None]*len(attrs))
note(("Graph nodes", graph.nodes(data=True)))
note(("Graph edges", graph.edges(data=True)))
note(("Subgraph nodes", subgraph.nodes(data=True)))
note(("Subgraph edges", subgraph.edges(data=True)))
ref_time = perf_counter()
expected = make_into_set(MCS(graph, subgraph, attributes=attrs))
ref_time -= perf_counter()
a_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match)
asymmetric = make_into_set(ismags.largest_common_subgraph(False))
a_ism_time -= perf_counter()
s_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match)
symmetric = make_into_set(ismags.largest_common_subgraph(True))
s_ism_time -= perf_counter()
note(("Symmetric", symmetric))
note(("Asymmetric", asymmetric))
note(("Expected", expected))
if a_ism_time < ref_time:
event('Asymmetric ISMAGS faster than reference')
if s_ism_time < a_ism_time:
event('Symmetric ISMAGS faster than asymmetric')
if s_ism_time < ref_time:
event('Symmetric ISMAGS faster than reference')
assert asymmetric == expected or not expected
assert symmetric <= asymmetric
# if symmetric == asymmetric and expected:
# assert ismags.analyze_symmetry(subgraph,
# ismags._sgn_partitions,
# ismags._sge_colors) == ([], {})
@settings(deadline=500)
@given(st.data())
def test_mcs_match(data):
"""
Test against networkx reference implementation using graphs that are
subgraphs without considering symmetry.
"""
attrs = data.draw(st.one_of(st.none(), ATTRS))
if attrs is None:
node_match = lambda n1, n2: True
attrs = []
else:
node_match = nx.isomorphism.categorical_node_match(attrs, [None]*len(attrs))
graph = data.draw(MCS_BUILDER)
if graph:
nodes = data.draw(st.sets(st.sampled_from(list(graph.nodes)),
max_size=len(graph)))
else:
nodes = []
subgraph = graph.subgraph(nodes)
note(("Graph nodes", graph.nodes(data=True)))
note(("Graph edges", graph.edges(data=True)))
note(("Subgraph nodes", subgraph.nodes(data=True)))
note(("Subgraph edges", subgraph.edges(data=True)))
ref_time = perf_counter()
expected = make_into_set(MCS(graph, subgraph, attributes=attrs))
ref_time -= perf_counter()
a_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match)
asymmetric = make_into_set(ismags.largest_common_subgraph(False))
a_ism_time -= perf_counter()
s_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match)
symmetric = make_into_set(ismags.largest_common_subgraph(True))
s_ism_time -= perf_counter()
note(("Symmetric", symmetric))
note(("Asymmetric", asymmetric))
note(("Expected", expected))
if a_ism_time < ref_time:
event('Asymmetric ISMAGS faster than reference')
if s_ism_time < a_ism_time:
event('Symmetric ISMAGS faster than asymmetric')
if s_ism_time < ref_time:
event('Symmetric ISMAGS | |
to encode information on where points are located.
Recall that an ordered pair is of the form $(x,y)$. The first entry on the pair denotes how far from the origin along the x-axis the point is, the second entry denotes how far from the origin along the y-axis the point is.
Let's see a simple example for the ordered pair $(1,4)$.
fig = plt.figure(figsize=(16,5))
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
ax2.set_xticks(np.arange(-5,6)),ax2.set_yticks(np.arange(-5,6))
ax2.set_xlim(0,5)
ax2.set_ylim(0,5)
ax1.axis("Off"),ax2.axis("On"),ax3.axis("Off")
ax2.axhline(y=0, color='blue')
ax2.axvline(x=0, color='blue')
ax2.text(5.1,0.1,"x-axis",fontsize=20)
ax2.text(0.1,5.1,"y-axis",fontsize=20)
ax2.grid(True)
x_value,y_value = 1,4
x_or,y_or = 0,0
ax2.scatter(x_value,y_value,color="black",s=120)
ax2.scatter(x_or,y_or,color="black",s=220)
ax2.text(x_value + 0.1,y_value + 0.5,"(" +str(x_value) + "," + str(y_value) + ")")
ax2.text(x_or + 0.1,y_or + 0.3,"origin")
ax2.plot([-5,x_value], [y_value,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
ax2.plot([x_value,x_value], [-5,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
plt.show()
Notice why the order matters. Indeed, if we consider the pair $(4,1)$ we see that it is different.
fig = plt.figure(figsize=(16,5))
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
ax2.set_xticks(np.arange(-5,6)),ax2.set_yticks(np.arange(-5,6))
ax2.set_xlim(0,5)
ax2.set_ylim(0,5)
ax1.axis("Off"),ax2.axis("On"),ax3.axis("Off")
ax2.axhline(y=0, color='blue')
ax2.axvline(x=0, color='blue')
ax2.text(5.1,0.1,"x-axis",fontsize=20)
ax2.text(0.1,5.1,"y-axis",fontsize=20)
ax2.grid(True)
x_value,y_value = 4,1
x_or,y_or = 0,0
ax2.scatter(x_value,y_value,color="black",s=120)
ax2.scatter(x_or,y_or,color="black",s=220)
ax2.text(x_value + 0.1,y_value + 0.5,"(" +str(x_value) + "," + str(y_value) + ")")
ax2.text(x_or + 0.1,y_or + 0.3,"origin")
ax2.plot([-5,x_value], [y_value,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
ax2.plot([x_value,x_value], [-5,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
plt.show()
Let us take the table we computed previously for the relation
$$y = x +3$$
along with the ordered pairs we computed.
We can then represent the ordered pairs in the coordinate plane.
**Activity**
Use the widget below to see the relationship between the different ordered pairs and the points on the coordinate plane.
%matplotlib inline
@interact(x_value=widgets.IntSlider(value=0,
min=0,
max=5,
step=1,
description='Value for x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
))
def show_points(x_value):
x_values = np.array([0,1,2,3,4,5])
y_values = x_values + 3
fig = plt.figure()
plt.subplots_adjust(left=14, bottom=0.2, right=16, top=1.5,
wspace=0.1, hspace=0.2)
ax1 = fig.add_subplot(1, 2, 1)
ax1.text(0.1,0.8,"x = " + str(x_value),fontsize=20)
ax1.text(0.1,0.6,"y = " + str(x_value) +"+ 3 = " + str(x_value + 3),fontsize=20)
ax1.text(0.1,0.4,"Ordered pair (" + str(x_value) +"," + str(x_value + 3) + ")",fontsize=20)
ax1.set_title("Values for x and y", fontsize=25)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_xticks(np.arange(-6,11)),ax2.set_yticks(np.arange(-6,11))
ax2.set_xlim(0,6)
ax2.set_ylim(0,9)
ax1.axis("Off"),ax2.axis("On")
ax2.axhline(y=0, color='blue')
ax2.axvline(x=0, color='blue')
ax2.text(6.5,0.2,"x-axis",fontsize=20)
ax2.text(0.5,9.5,"y-axis",fontsize=20)
ax2.grid(True)
# for i in range(len(x_values)):
# ax2.text(x_values[i] - 0.5,y_values[i]-0.7,"(" + str(x_values[i]) + "," + str(y_values[i]) + ")")
points = ax2.scatter(x_values,y_values,color="black",s=60)
ax2.scatter(x_value,x_value + 3,color="red",s=120)
#datacursor(points)
plt.show()
### <h4>Conclusion</h4>
From this graph we conclude that the relation between $x$ and $y$ is linear. This makes sense given the equation is of the form
$$y = ax + b$$
where $a,b$ are integers and in this particular case, $a = 1, b =3$.
Points which are of interest are the intersection between $y$ and the x-axis as well as $x$ and the $y$ axis. The former happens exactly when $y = 0$ while the latter occurs when $x=0$.
We observe that $y$ does not intersect the x axis for positive values of $x$. We also observe that $x$ intersects the y-axis when $x=0$. Such intersection can be observed in the ordered pair $(0,3)$.
# Create button and dropdown widget
def rerun_cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,IPython.notebook.get_selected_index()+2)'))
style = {'description_width': 'initial'}
number_of_cat = 13
dropdown_options = [ str(i+1) for i in range(number_of_cat) ]
dropdown_widget = widgets.Dropdown( options = dropdown_options , value = '3' , description = 'Number of entries' , disabled=False,style=style )
categories_button = widgets.Button(button_style='info',description="Enter", layout=Layout(width='15%', height='30px'))
# Display widgets
#display(dropdown_widget)
#display(categories_button)
#categories_button.on_click( rerun_cell )
<h2 align='center'>Practice Area</h2>
<h4>Exercise</h4>
We will repeat a similar exercise as above, only this time, we will use a different linear relation.
$$y = 2x +4$$
Let us begin by building a simple table.
Answer the questions below to complete a similar table.
### Question 1
Knowing that $y = 2x + 4$, what is the value of $y$ when $x = 3$? In other words, what does $2(3) + 4$ equal to?
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_q(answer):
if answer=="Select option":
print("Click on the correct value of y.")
elif answer=="10":
ret="Correct!"
return ret
elif answer != "10" or answer != "Select Option":
ret = "You are close to the answer but need to improve your result.Recall 2(3) = 6. What does 6 + 4 equal to?"
return ret
answer_q = interact(question_q,answer=widgets.Select(
options=["Select option","1",\
"10","3",\
"0"],
value='Select option',
description="y value",
disabled=False,
style=s
))
### Question 2
Knowing that $y = 2x + 4$, what is the value of $y$ when $x=0$?
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_p(answer):
if answer=="Select option":
print("Click on the correct value of y.")
elif answer=="4":
ret="Correct!"
return ret
elif answer != "4" or answer != "Select Option":
ret = "You are close to the answer but need to improve your result.Recall y = x + 4. What does 0 + 4 equal to?"
return ret
answer_p = interact(question_p,answer=widgets.Select(
options=["Select option","-1",\
"10","4",\
"0"],
value='Select option',
description="y value",
disabled=False,
style=s
))
### Question 3
What is the ordered pair obtained when $x = 2$?
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_s(answer):
if answer=="Select option":
print("Click on the correct ordered pair (x,y)")
elif answer=="(2,8)":
ret="Correct!"
return ret
elif answer != "(2,8)" or answer != "Select Option":
ret = "You are close to the answer but need to improve your result.We know y = 8 and x = 2. We also know an ordered pair is of the form (x,y)."
return ret
answer_s = interact(question_s,answer=widgets.Select(
options=["Select option","(2,6)",\
"(2,8)","(8,2)",\
"(2,-2)"],
value='Select option',
description="Ordered pair (x,y)",
disabled=False,
style=s
))
def math_function(relation,x_val):
y_val = relation["+"](relation["Coef1"]*x_val,relation["Coef2"])
return y_val
def table_of_values_quad(range_val,relation):
empty_list = [ '' for i in range(range_val + 1) ]
category_list = [ i+1 for i in range(range_val + 1) ]
# Set up data input for dataframe
df_dict = {'Entry Number':category_list,\
'Values for x': empty_list, \
'y ='+ str(relation['Coef1']) + "x + " \
+ str(relation['Coef2']):empty_list,\
'Values for y': empty_list,\
'Ordered pairs': empty_list}
feature_list = ['Entry Number','Values for x',\
'y ='+ str(relation['Coef1']) \
+ "x + " + str(relation['Coef2']),\
'Values for y','Ordered pairs']
student_df = pd.DataFrame(data = df_dict,columns=feature_list)
student_df.set_index('Entry Number',inplace=True)
x_values = np.array(np.arange(range_val+1))
y_values = math_function(relation,x_values)
ordered = [(x_values[i],y_values[i]) for i in range(range_val+1)]
y_equals = ["y = " + str(relation['Coef1']) +"(" + str(x_values[i]) + ")" \
+ "+" + str(relation['Coef2'])
for i in range(len(x_values))]
student_df["Values for y"] = y_values
student_df["Values for x"] = x_values
student_df['y ='+ str(relation['Coef1']) + \
"x + " + str(relation['Coef2'])] = y_equals
student_df["Ordered pairs"] = ordered
q_student_df = q.show_grid( student_df , grid_options = grid_features )
display(q_student_df)
def generate_tab(value):
if value==True:
if "Correct!" in str(answer_p.widget.children)\
and "Correct!" in str(answer_q.widget.children)\
and "Correct!" in str(answer_s.widget.children):
relation_ar = {"Coef1":2,"Coef2":4,"+": operator.add}
table_of_values_quad(4,relation_ar)
else:
print("At least one of your answers is not correct. Compare your answers with the table.")
relation_ar = {"Coef1":2,"Coef2":4,"+": operator.add}
table_of_values_quad(4,relation_ar)
interact(generate_tab,value = widgets.ToggleButton(
value=False,
description='Generate Table',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
));
### Question 4
Using the information on the table and the widget below, identify and select what ordered pairs belong to the relation
$$y = 2x + 4$$
Select one of the four following options. The correct answer will plot all points, the incorrect answer will print a message.
def plot_answer(relation):
x_values = np.array([0,1,2,3,4])
y_values = relation["Coef1"]*x_values + relation["Coef2"]
fig = plt.figure()
plt.subplots_adjust(left=14, bottom=0.2, right=16, top=1.5,
wspace=0.1, hspace=0.2)
ax2 = fig.add_subplot(1, 1, 1)
ax2.set_xticks(np.arange(-6,11))
ax2.set_yticks(np.arange(-6,relation["Coef1"]*x_values[-1] + relation["Coef2"]+2))
ax2.set_xlim(0,5)
ax2.set_ylim(0,relation["Coef1"]*x_values[-1] + relation["Coef2"]+1)
ax2.text(x_values[-1] + 1,0.001,"x-axis",fontsize=20)
ax2.text(0.1,y_values[-1] + 1,"y-axis",fontsize=20)
ax2.grid(True)
# for i in range(len(x_values)):
# ax2.text(x_values[i] - 0.5,y_values[i]-0.7,"(" + str(x_values[i]) + "," + str(y_values[i]) + ")")
points = ax2.scatter(x_values,y_values,color="black",s=60)
#ax2.scatter(x_value,x_value + 3,color="red",s=120)
#datacursor(points)
plt.show()
def choose_points(value):
if value=="(3,10),(5,14),(0,4)":
print("Correct!")
rel = {"Coef1":2,"Coef2":4,"+": operator.add}
plot_answer(rel)
else:
print("Those do not look like the ordered pairs in our table. Try again.")
interact(choose_points,
value = widgets.RadioButtons(
options=[
"(3,11),(5,11),(2,8)",\
"(0,0),(1,2),(2,2)",\
"(3,10),(5,14),(0,4)",\
"(10,10),(10,8),(1,6)"],
# value='pineapple',
description='Ordered Pairs:',
disabled=False,
style = style
));
### Question 5: Conclusions
What can you conclude from the table above? Use the following statements to guide your answer and add any other observations you make.
| Statement |
|-----------|
|The relation between $x$ and $y$ is linear|
|There is an intersection between the y-axis and $x$ at the ordered pair ... |
|There is an intersection between the x-axis and $y$ at the ordered pair ... |
emma1_text = widgets.Textarea( value='', placeholder='Write your answer here. Press Record Answer when you finish.', description='', disabled=False , layout=Layout(width='100%', height='75px') )
emma1_button = | |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 06 09:49:33 2015
@author: JMS
"""
import random
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from scipy.linalg import orth
from occupancy_map import Map,ZMap
from ptp import LocalArea,PointToPoint,matrixrank, anglebetween
from math import degrees
import json
import threading
from multiprocessing.pool import ThreadPool
from contextlib import closing
import scipy.spatial as spt
class PointType:
calibrated = "CALIBRATED" # Points that have both map coordinates
non_calibrated = "NON_CALIBRATED" # Points with map1 coordinates but not with map2.
target = "TARGET" # Points with map1 but that only can be predicted to map2.
acquired = "ACQUIRED" # Points with only map2 but with no information about map1
unknown = "NA"
class State:
"""
The class State is a special feature that does not correspond to the PointType.
The PointType is a static situation that gives identity to the point.
The state is something temporary that can be altered.
"""
protected = "PROTECTED" # Point has been manually overwritten and cannot be modified
blocked = "BLOCKED"
zeroed = "" # No especial states
class virtualGridMap(object):
"""
A virtual map is a class that gets all the information of the grid and tries
to give a prediction of unknown positions.
It considers two homologous maps and establishes correspondences between them.
E.g.:
- Given a LM coordinate, returns the corresponding estimation of the SEM (not possible in LM map)
- Given a letter returns the corresponding coordinates of the estimated center
- Given a coordinate, estimate the letter where we are going to land
Representation of the points
We have selected 4 different kind of points:
- Non Calibrated NC: points coming from LM without assigned correspondence, used for calibration
- Calibrated C: points coming from LM, with the correspondent SEM coordinates, used for calibration
- Targets T: points coming from LM used for targeting
- Acquisition Acq: points acquired on the fly
Instead of saving the points in 4 different lists, we are saving all of them in one array and then
saving the indices for each categorie (Ind).
That allows having points belonging to more than one categorie, or easily to introduce
more category points.
Could be a 2D or a 3D
"""
__metaclass__ = ABCMeta
warning_transformation =""
map_lock = threading.Lock()
def __init__(self,logger, force2D =False, parent = None):
self.logger = logger
self.current_pos = "" # Landmark reference
self.last_point_added = ""
# LANDMARK
# Dataframe instead of class reason it is because the
# porting to a file is immediate and the managing of lists of arrays too.
# In design terms, having a Landmark class would be much better, but in practical terms
# slows down. The following is a mixture between class and database, linked by the landmark ID
self.columns = [ 'LANDMARK','TYPE', 'STATE',
'UPDATE_ORIGIN','UPDATE_DESTINY','UPDATE_TAG',
'COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y', 'COORDS_ORIGIN_Z',
'COORDS_DESTINY_X', 'COORDS_DESTINY_Y', 'COORDS_DESTINY_Z']
#
self.rms_avg = []
self.rms_sd = []
self.columns_corigin = ['LANDMARK','BELIEF','COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y', 'COORDS_ORIGIN_Z']
self.columns_cdestiny =['LANDMARK','BELIEF','COORDS_DESTINY_X', 'COORDS_DESTINY_Y', 'COORDS_DESTINY_Z']
if(force2D):
self.col_dim_coords_origin = ['COORDS_ORIGIN_X','COORDS_ORIGIN_Y']
self.col_dim_coords_destiny = ['COORDS_DESTINY_X','COORDS_DESTINY_Y']
else:
self.col_dim_coords_origin = ['COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y','COORDS_ORIGIN_Z']
self.col_dim_coords_destiny = ['COORDS_DESTINY_X', 'COORDS_DESTINY_Y','COORDS_DESTINY_Z']
self.col_reset = ['RMS_AVG','RMS_SD']
self.map_df = pd.DataFrame(columns=self.columns)
self.cor_df = pd.DataFrame(columns=self.columns_corigin)
self.cde_df = pd.DataFrame(columns=self.columns_cdestiny)
self.list_local_area = {} # every point can have a radius of action
# List of error associated to each point
self.list_errorOrigin = {}
self.list_errorDestiny = {}
self.map_exists = False
self.map_id = "map1_map2"
self.CalibratedPtp = PointToPoint()
self.GlobalPtp = PointToPoint()
# Occupancy map
self.grid_map = Map(1)
self.orientation = 0
@staticmethod
def dist_microns(x, y):
return np.sqrt(np.sum((x - y) ** 2)) * 1000.0 ## Error in um
@staticmethod
def dist(x, y):
if (x[0] == np.inf or x[1] == np.inf or y[0] == np.inf or y[1] == np.inf):
return np.inf
else:
return np.sqrt(np.sum((x - y) ** 2))
def checkValidSystem(self, calculateOrientation = False):
# Get all calibration points
coordsOrigin, coordsDestiny, pids = self.getLandmarksByType(PointType.calibrated)
coordsDestiny = coordsDestiny[:,0:2]
if(matrixrank(coordsDestiny,1)>=2):
# TODO : calculate orientation based on data
# A = orth(coordsDestiny)
# angle = anglebetween(A[0],[1,0])
#if(calculateOrientation):
# self.orientation = np.rad2deg(angle) # this angle has to b
return True
def unit_vector(vector):
""" Returns the unit vector of the vector. """
eps = np.finfo(np.float32).eps
if (np.sum(np.linalg.norm(vector)) < eps):
return vector
return vector / np.linalg.norm(vector)
def collinear(p0, p1, p2):
x1, y1 = p1[0] - p0[0], p1[1] - p0[1]
x2, y2 = p2[0] - p0[0], p2[1] - p0[1]
val = x1 * y2 - x2 * y1
return abs(val) < 1e-2
def loadMap(self,dict_map):
# Split in 3 dictionaries
stmap = dict_map['MAP']
stcor = dict_map['COR']
stcde = dict_map['CDE']
self.map_df = pd.read_json(stmap)
self.cor_df = pd.read_json(stcor)
self.cde_df = pd.read_json(stcde)
for index, row in self.map_df.iterrows():
p_id = str(row['LANDMARK'])
self.list_local_area[p_id] = LocalArea()
def isEmpty(self,arr):
arr = np.array(arr)
if not np.any(arr.shape):
return True
if(arr.size == 0):
return True
if np.any(np.isinf(arr.astype(float))):
return True
return False
def getTotalLandmarks(self):
return len(self.map_df)
def getLandmarkIds(self):
"""
Return available ids
"""
return list(self.map_df.LANDMARK);
def getCoordsFromLandmarks(self,ilids,map_value):
list_coords = []
for el in ilids:
coords = self.getLandmark(el, map_value)
if(not np.any(np.isinf(coords))):
list_coords.append(coords)
return np.array(list_coords)
def getLandmarksByType(self, type):
"""
ACK
"""
df2 = self.map_df.loc[self.map_df['TYPE'] == type]
point_ids = list(df2['LANDMARK'])
coordsOrigin = self.getCoordsFromLandmarks(point_ids,1)
coordsDestiny = self.getCoordsFromLandmarks(point_ids, 2)
return coordsOrigin,coordsDestiny,point_ids
def getLandmarkIDsByType(self, type):
"""
ACK
"""
df2 = self.map_df.loc[self.map_df['TYPE'] == type]
point_ids = list(df2['LANDMARK'])
return point_ids
def checkState(self,point_id,state):
df2 = self.map_df.loc[self.map_df['STATE'] == state] # Get all points in state
return np.any(df2['LANDMARK'].isin([point_id])); # Return true if any of the points is in the list
def isin(self,point_id):
return np.any(self.map_df['LANDMARK'].isin([point_id]));
def checkType(self,point_id,type):
df2 = self.map_df.loc[self.map_df['TYPE'] == type] # Get all points by type
return(np.any(df2['LANDMARK'].isin([point_id]))); # Return true if any of the points is in the list
def getLandmarkType(self,point_id):
df2 = self.map_df.loc[self.map_df['LANDMARK']==point_id]
flist = list(df2['TYPE'])
return flist[0]
def getLandmarkState(self,point_id):
df2 = self.map_df.loc[self.map_df['LANDMARK']==point_id]
flist = list(df2['STATE'])
return flist[0]
def setLandmarkId(self,old_id,new_id):
"""
ACK
"""
if(self.isin(old_id)):
self.map_df.loc[self.map_df['LANDMARK']==old_id,'LANDMARK'] = new_id
self.cor_df.loc[self.cor_df['LANDMARK']==old_id,'LANDMARK'] = new_id
self.cde_df.loc[self.cde_df['LANDMARK']==old_id,'LANDMARK'] = new_id
self.list_local_area[new_id] = self.list_local_area[old_id]
del self.list_local_area[old_id]
self.list_errorDestiny[new_id] = self.list_errorDestiny[old_id]
del self.list_errorDestiny[old_id]
self.list_errorOrigin[new_id] = self.list_errorOrigin[old_id]
del self.list_errorOrigin[old_id]
return "OK"
else:
return "ERROR: id not in list"
def getLandmark(self,point_id,map_value):
"""
Map value returns the coordinates : 1 for origin, 2 for destiny
"""
if(not self.isin(point_id)):
return np.array([-np.inf])
if (map_value == 1):
coords = self.map_df.loc[self.map_df['LANDMARK'] == point_id,self.col_dim_coords_origin]
coords = np.squeeze(coords.values)
return np.array(coords,dtype = np.float32)
elif (map_value == 2):
coords = self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_destiny]
coords = np.squeeze(coords.values)
return np.array(coords,dtype = np.float32)
else:
self.logger.error("ERROR: In getLandmark for :" + str(point_id) + ". From " + str(self.map_id) + " Use map_value 1 to origin, 2 to destiny.")
return np.array([-np.inf])
def updateLandmarks(self):
"""
Update inner set of landmarks
:return:
"""
point_ids = self.getLandmarkIds()
for el in point_ids:
self.updateLandmark(el)
def updateLandmark(self,point_id):
"""
Map value returns the coordinates : 1 for origin, 2 for destiny
"""
if not self.cor_df['LANDMARK'].empty:
df_pid = self.cor_df.loc[self.cor_df['LANDMARK'] == point_id]
if not df_pid.empty :
if len(df_pid) == 1:
# UPDATE GENERAL LANDMARK MAP
coords = np.array(df_pid[self.col_dim_coords_origin],dtype=np.float32)[0]
self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_origin] = coords[range(0, len(self.col_dim_coords_origin))]
else:
coords = self.averageLandmarkPosition(np.array(df_pid[self.col_dim_coords_origin],dtype=np.float32), np.array(df_pid['BELIEF']))
self.map_df.loc[self.map_df['LANDMARK'] == point_id,self.col_dim_coords_origin] = coords[range(0,len(self.col_dim_coords_origin))]
if not self.cde_df['LANDMARK'].empty:
df_pid = self.cde_df.loc[self.cde_df['LANDMARK'] == point_id]
if not df_pid.empty:
# UPDATE GENERAL LANDMARK MAP
if len(df_pid) == 1:
coords = np.array(df_pid[self.col_dim_coords_destiny],dtype=np.float32)[0]
self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_destiny] = coords[range(0, len(self.col_dim_coords_destiny))]
else:
coords = self.averageLandmarkPosition(np.array(df_pid[self.col_dim_coords_destiny],dtype=np.float32), np.array(df_pid['BELIEF']))
self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_destiny] = coords[range(0,len(self.col_dim_coords_destiny))]
def resetCoordinates(self, point_id, map_id):
"""
Set coordinates to 0
Map value returns the coordinates : 1 for origin, 2 for destiny
"""
if (not self.isin(point_id)):
return -1
if map_id == 1:
self.cor_df = self.cor_df[self.cor_df.LANDMARK != point_id]
self.addCoordsOrigin(point_id, np.zeros(len(self.col_dim_coords_origin)), 0.0)
self.list_errorOrigin[point_id] = []
self.list_local_area[point_id] = LocalArea()
if map_id == 2:
self.cde_df = self.cde_df[self.cde_df.LANDMARK != point_id]
self.addCoordsDestiny(point_id, np.zeros(len(self.col_dim_coords_destiny)), 0.0)
self.list_errorDestiny[point_id] = []
self.list_local_area[point_id] = LocalArea()
def averageLandmarkPosition(self, coords, belief, method = 'average'):
"""
We are going to start with a simple method of determining the landmark position by averaging all points estimated.
:param col_names:
:param df:
:return:
"""
if(method=='average'):
n_arr = (coords.transpose() * belief).transpose() # Multiply by weights
total_belief = np.sum(belief)
if(total_belief>0):
avg_coords = np.sum(n_arr, axis=0) / np.sum(belief)
else:
avg_coords = np.mean(coords,axis=0)
return avg_coords
elif(method =='max_belief'):
ind = np.amax(belief)
return coords[ind]
def getAllLandmarkCoordinates(self):
point_ids = list(self.map_df['LANDMARK'])
coords_origin | |
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implemented multiparameter equation of state as a Helmholtz free energy
* IAPWS-95 implementation
* Heavy water formulation 2017
"""
from __future__ import division
from itertools import product
import warnings
from scipy import exp, log, ndarray
from scipy.optimize import fsolve
from .iapws97 import _TSat_P, IAPWS97
from ._iapws import M, Tc, Pc, rhoc, Tc_D2O, Pc_D2O, rhoc_D2O
from ._iapws import _Viscosity, _ThCond, _Dielectric, _Refractive, _Tension
from ._iapws import _D2O_Viscosity, _D2O_ThCond, _D2O_Tension
from ._utils import _fase, getphase, deriv_H
class MEoS(_fase):
"""
General implementation of multiparameter equation of state. From this
derived all child class specified per individual compounds
Parameters
----------
T : float
Temperature [K]
P : float
Pressure [MPa]
rho : float
Density [kg/m³]
v : float
Specific volume [m³/kg]
h : float
Specific enthalpy [kJ/kg]
s : float
Specific entropy [kJ/kgK]
u : float
Specific internal energy [kJ/kg]
x : float
Vapor quality [-]
l : float, optional
Wavelength of light, for refractive index [nm]
rho0 : float, optional
Initial value of density, to improve iteration [kg/m³]
T0 : float, optional
Initial value of temperature, to improve iteration [K]
x0 : Initial value of vapor quality, necessary in bad input pair definition
where there are two valid solution (T-h, T-s)
Notes
-----
* It needs two incoming properties of T, P, rho, h, s, u.
* v as a alternate input parameter to rho
* T-x, P-x, preferred input pair to specified a point in two phases region
Returns
-------
The calculated instance has the following properties:
* P: Pressure [MPa]
* T: Temperature [K]
* x: Vapor quality [-]
* g: Specific Gibbs free energy [kJ/kg]
* a: Specific Helmholtz free energy [kJ/kg]
* v: Specific volume [m³/kg]
* r: Density [kg/m³]
* h: Specific enthalpy [kJ/kg]
* u: Specific internal energy [kJ/kg]
* s: Specific entropy [kJ/kg·K]
* cp: Specific isobaric heat capacity [kJ/kg·K]
* cv: Specific isochoric heat capacity [kJ/kg·K]
* cp_cv: Heat capacity ratio, [-]
* Z: Compression factor [-]
* fi: Fugacity coefficient [-]
* f: Fugacity [MPa]
* gamma: Isoentropic exponent [-]
* alfav: Isobaric cubic expansion coefficient [1/K]
* kappa: Isothermal compressibility [1/MPa]
* kappas: Adiabatic compresibility [1/MPa]
* alfap: Relative pressure coefficient [1/K]
* betap: Isothermal stress coefficient [kg/m³]
* joule: Joule-Thomson coefficient [K/MPa]
* betas: Isoentropic temperature-pressure coefficient [-]
* Gruneisen: Gruneisen parameter [-]
* virialB: Second virial coefficient [m³/kg]
* virialC: Third virial coefficient [m⁶/kg²]
* dpdT_rho: Derivatives, dp/dT at constant rho [MPa/K]
* dpdrho_T: Derivatives, dp/drho at constant T [MPa·m³/kg]
* drhodT_P: Derivatives, drho/dT at constant P [kg/m³·K]
* drhodP_T: Derivatives, drho/dP at constant T [kg/m³·MPa]
* dhdT_rho: Derivatives, dh/dT at constant rho [kJ/kg·K]
* dhdP_T: Isothermal throttling coefficient [kJ/kg·MPa]
* dhdT_P: Derivatives, dh/dT at constant P [kJ/kg·K]
* dhdrho_T: Derivatives, dh/drho at constant T [kJ·m³/kg²]
* dhdrho_P: Derivatives, dh/drho at constant P [kJ·m³/kg²]
* dhdP_rho: Derivatives, dh/dP at constant rho [kJ/kg·MPa]
* kt: Isothermal Expansion Coefficient [-]
* ks: Adiabatic Compressibility [1/MPa]
* Ks: Adiabatic bulk modulus [MPa]
* Kt: Isothermal bulk modulus [MPa]
* v0: Ideal specific volume [m³/kg]
* rho0: Ideal gas density [kg/m³]
* u0: Ideal specific internal energy [kJ/kg]
* h0: Ideal specific enthalpy [kJ/kg]
* s0: Ideal specific entropy [kJ/kg·K]
* a0: Ideal specific Helmholtz free energy [kJ/kg]
* g0: Ideal specific Gibbs free energy [kJ/kg]
* cp0: Ideal specific isobaric heat capacity [kJ/kg·K]
* cv0: Ideal specific isochoric heat capacity [kJ/kg·K]
* w0: Ideal speed of sound [m/s]
* gamma0: Ideal isoentropic exponent [-]
* w: Speed of sound [m/s]
* mu: Dynamic viscosity [Pa·s]
* nu: Kinematic viscosity [m²/s]
* k: Thermal conductivity [W/m·K]
* alfa: Thermal diffusivity [m²/s]
* sigma: Surface tension [N/m]
* epsilon: Dielectric constant [-]
* n: Refractive index [-]
* Prandt: Prandtl number [-]
* Pr: Reduced Pressure [-]
* Tr: Reduced Temperature [-]
* Hvap: Vaporization heat [kJ/kg]
* Svap: Vaporization entropy [kJ/kg·K]
* Z_rho: (Z-1) over the density [m³/kg]
* IntP: Internal pressure [MPa]
* invT: Negative reciprocal temperature [1/K]
* hInput: Specific heat input [kJ/kg]
"""
CP = None
_Pv = None
_rhoL = None
_rhoG = None
kwargs = {"T": 0.0,
"P": 0.0,
"rho": 0.0,
"v": 0.0,
"h": None,
"s": None,
"u": None,
"x": None,
"l": 0.5893,
"rho0": None,
"T0": None,
"x0": 0.5}
status = 0
msg = "Undefined"
def __init__(self, **kwargs):
"""Constructor, define common constant and initinialice kwargs"""
self.R = self._constants["R"]/self._constants.get("M", self.M)
self.Zc = self.Pc/self.rhoc/self.R/self.Tc
self.kwargs = MEoS.kwargs.copy()
self.__call__(**kwargs)
def __call__(self, **kwargs):
"""Make instance callable to can add input parameter one to one"""
# Alternative rho input
if "rhom" in kwargs:
kwargs["rho"] = kwargs["rhom"]*self.M
del kwargs["rhom"]
elif kwargs.get("v", 0):
kwargs["rho"] = 1./kwargs["v"]
del kwargs["v"]
elif kwargs.get("vm", 0):
kwargs["rho"] = self.M/kwargs["vm"]
del kwargs["vm"]
self.kwargs.update(kwargs)
if self.calculable:
try:
self.status = 1
self.calculo()
self.msg = ""
except RuntimeError as err:
self.status = 0
self.msg = err.args[0]
raise(err)
# Add msg for extrapolation state
if self.name == "water" and 130 <= self.T < 273.15:
self.msg = "Extrapolated state"
self.status = 3
warnings.warn("Using extrapolated values")
elif self.name == "water" and 50 <= self.T < 130:
self.msg = "Extrapolated state using Low-Temperature extension"
self.status = 3
warnings.warn("Using extrapolated values and Low-Temperature"
"extension")
@property
def calculable(self):
"""Check if inputs are enough to define state"""
self._mode = ""
if self.kwargs["T"] and self.kwargs["P"]:
self._mode = "TP"
elif self.kwargs["T"] and self.kwargs["rho"]:
self._mode = "Trho"
elif self.kwargs["T"] and self.kwargs["h"] is not None:
self._mode = "Th"
elif self.kwargs["T"] and self.kwargs["s"] is not None:
self._mode = "Ts"
elif self.kwargs["T"] and self.kwargs["u"] is not None:
self._mode = "Tu"
elif self.kwargs["P"] and self.kwargs["rho"]:
self._mode = "Prho"
elif self.kwargs["P"] and self.kwargs["h"] is not None:
self._mode = "Ph"
elif self.kwargs["P"] and self.kwargs["s"] is not None:
self._mode = "Ps"
elif self.kwargs["P"] and self.kwargs["u"] is not None:
self._mode = "Pu"
elif self.kwargs["rho"] and self.kwargs["h"] is not None:
self._mode = "rhoh"
elif self.kwargs["rho"] and self.kwargs["s"] is not None:
self._mode = "rhos"
elif self.kwargs["rho"] and self.kwargs["u"] is not None:
self._mode = "rhou"
elif self.kwargs["h"] is not None and self.kwargs["s"] is not None:
self._mode = "hs"
elif self.kwargs["h"] is not None and self.kwargs["u"] is not None:
self._mode = "hu"
elif self.kwargs["s"] is not None and self.kwargs["u"] is not None:
self._mode = "su"
elif self.kwargs["T"] and self.kwargs["x"] is not None:
self._mode = "Tx"
elif self.kwargs["P"] and self.kwargs["x"] is not None:
self._mode = "Px"
return bool(self._mode)
def calculo(self):
"""Calculate procedure"""
T = self.kwargs["T"]
rho = self.kwargs["rho"]
P = self.kwargs["P"]
s = self.kwargs["s"]
h = self.kwargs["h"]
u = self.kwargs["u"]
x = self.kwargs["x"]
# Initial values
T0 = self.kwargs["T0"]
rho0 = self.kwargs["rho0"]
if T0 or rho0:
To = T0
rhoo = rho0
elif self.name == "air":
To = 300
rhoo = 1e-3
else:
try:
st0 = IAPWS97(**self.kwargs)
except NotImplementedError:
To = 300
rhoo = 900
else:
if st0.status:
To = st0.T
rhoo = st0.rho
else:
To = 300
rhoo = 900
self.R = self._constants["R"]/self._constants.get("M", self.M)
propiedades = None
if x is None:
# Method with iteration necessary to get x
if self._mode == "TP":
try:
if self.name == "air":
raise ValueError
st0 = IAPWS97(**self.kwargs)
rhoo = st0.rho
except NotImplementedError:
if rho0:
rhoo = rho0
elif T < self.Tc and P < self.Pc and \
self._Vapor_Pressure(T) < P:
rhoo = self._Liquid_Density(T)
elif T < self.Tc and P < self.Pc:
rhoo = self._Vapor_Density(T)
else:
rhoo = self.rhoc*3
except ValueError:
rhoo = 1e-3
rho = fsolve(
lambda rho: self._Helmholtz(rho, T)["P"]-P*1000, rhoo)[0]
elif self._mode == "Th":
def f(rho):
return self._Helmholtz(rho, T)["h"]-h
if T >= self.Tc:
rhoo = self.rhoc
rho = fsolve(f, rhoo)[0]
else:
x0 = self.kwargs["x0"]
rhov = self._Vapor_Density(T)
rhol = self._Liquid_Density(T)
hl = self._Helmholtz(rhol, T)["h"]
hv = self._Helmholtz(rhov, T)["h"]
if x0 not in (0, 1) and hl <= h <= hv:
rhol, rhov, Ps = self._saturation(T)
vapor = self._Helmholtz(rhov, T)
liquido = self._Helmholtz(rhol, T)
hv = vapor["h"]
hl = liquido["h"]
x = (h-hl)/(hv-hl)
rho = 1/(x/rhov+(1-x)/rhol)
P = Ps/1000
else:
if h > hv:
rhoo = rhov
else:
rhoo = | |
model=model,
obs_shape_n=obs_shape_n,
act_space_n=env.action_space,
agent_index=i,
args=arglist,
local_q_func=(arglist.good_policy=='ddpg')))
# create scenario heuristic group trainer if applicable
if arglist.training_algorithm == 'ScenarioHeuristicGroupTrainer':
group_trainer = ScenarioHeuristicGroupTrainer(
agent_trainer_group=trainers,
init_group_policy=None,
n_episodes_per_batch=arglist.batch_size,
n_elite=25)
elif arglist.training_algorithm == 'PPOGroupTrainer':
group_trainer.update_agent_trainer_group(trainers)
return trainers, group_trainer
def get_trainer_actions(agents, trainers, observations, combined_action_value=False):
'''return list of actions from each agent's trainer
Args:
- agents: list of agents in the environment; i.e. "physical" robots
- trainers: list of decision makers for each agent; i.e. policy and learning algorithm
- observations: list of each observation for each trainer
- combined_action_value: boolean, does trainer.action output value est and probability
along with action
Returns:
- action_n: list of actions for each of the n agents at a single time step
- value_n: list of value estimate for each of the n agents actions
- neglogpact_n: list of negative log probability for each of the n agents actions
'''
# check agents, observations, and trainers are synced as agents may be terminated
# NOTE: this is a not an exhaustive check of syncing, just checks they are of the
# same size lists
n_trainers = len(trainers)
assert len(observations) == n_trainers
assert len(agents) == n_trainers
# initialize list of actions
action_n = [None]*len(trainers)
value_n = [None]*len(trainers)
neglogpact_n = [None]*len(trainers)
health_n = [1.0]*len(trainers)
# get action from each trainer
for i, trainer in enumerate(trainers):
# ensure order of trainers has not been modified
if int(trainer.name.split('_')[1]) != i: raise OrderingException('trainer list out of order')
# get action for trainer-agent i
avn = trainer.action(observations[i])
if combined_action_value:
assert len(avn) == 3
assert avn[1].shape == (1,) # ensure that value is coming in expected format
assert avn[2].shape == (1,) # ensure that value is coming in expected format
action_n[i] = avn[0]
value_n[i] = avn[1][0]
neglogpact_n[i] = avn[2][0]
else:
action_n[i] = avn
# handle terminated agent
if hasattr(agents[i], 'terminated') and agents[i].terminated:
# return all zeros for action. Can't use None since it messes up the ReplayBuffer
# to have inconsistent action formatting
action_n[i] = action_n[i]*0.0
# if neglogpact_n[i] is not None:
# neglogpact_n[i] *= 0.0
health_n[i] = 0.0
return action_n, value_n, neglogpact_n, health_n
def setup_experiment_record(arglist):
''' Setup file structure for a new experiment and record call info
Notes:
- this is used for recording complete experiments to be used for further analysis. This
is distinct from just "testing things out" and iterating designs.
- It captures the git commit hash, command line call used to kick off experiment in
a markdown file. In this way the experiment can be completely reproduceable.
- It also stores the policy and learning curves in the same directory
'''
# copy input arglist
input_arglist = deepcopy(arglist)
# check if changes committed and get commit hash
git_st = subprocess.check_output(['git', 'status', '--porcelain'])
if len(git_st) > 0:
raise Exception('Uncommitted changes present. Please commit all changes before running experiment')
git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
# enforce no display or benchmark
arglist.display = arglist.benchmark = False
# check the save directory and adjust if default
if arglist.save_dir == '/tmp/policy/':
arglist.save_dir = '/tmp/gym_experiments/'
# check experiment name and adjust if default
if arglist.experiment_name == 'default':
arglist.experiment_name = arglist.training_algorithm + '.' + arglist.scenario
# make expdata directory within save directory
count = 0
while True:
exp_dir_name = ('expdata.' +
datetime.datetime.today().strftime('%Y-%m-%d') +
'.' + arglist.experiment_name + '.' + str(count) + '/')
exp_dir = os.path.join(arglist.save_dir, exp_dir_name)
if os.path.exists(exp_dir):
count += 1
else:
os.makedirs(exp_dir)
break
# set save_dir and plots_dir
arglist.save_dir = exp_dir + 'policy/'
arglist.plots_dir = exp_dir
# make notes.md
with open(exp_dir+'notes.md', 'w+') as notes:
# record command and commit of experiment
notes.write('Call: python ')
for arg in sys.argv:
notes.write(arg + ' ')
notes.write('\n')
notes.write('Commit: {}'.format(git_hash))
# redefine stderr to capture errors during experiment and also output to terminal
sys.stderr = Logger(exp_dir+'error.log')
def train(arglist):
with U.single_threaded_session():
training_start_time = time.perf_counter()
# Create environment
env = make_env(arglist.environment, arglist.scenario, arglist,
benchmark=arglist.benchmark,
discrete_action_space=rl_algs.use_discrete_action_space(arglist.training_algorithm),
legacy_multidiscrete=rl_algs.use_legacy_multidiscrete(arglist.training_algorithm))
# Create agent trainers
obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]
num_adversaries = min(env.n, arglist.num_adversaries)
trainers, group_trainer = get_agent_trainers(env, num_adversaries, obs_shape_n, arglist)
# Create storage for training loss data useful for post processing and debugging
training_loss_names = ['episode_count']
if hasattr(group_trainer, 'mb_loss_names'):
training_loss_names = training_loss_names + group_trainer.mb_loss_names
# Initialize
U.initialize()
# Setup learning experiment recording
if arglist.record_experiment:
setup_experiment_record(arglist)
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
if (arglist.display or arglist.restore or arglist.benchmark):
print('Loading previous state...')
U.load_state(arglist.load_dir)
episode_rewards = [0.0] # sum of rewards for all agents
episode_agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
episode_rewards_stats = [] # sum of rewards for training curve
episode_agent_rewards_stats = [] # agent rewards for training curve
training_loss_stats = []
arglist_filename = arglist.plots_dir + arglist.experiment_name + '.arglist.pkl'
episode_rewards_filename = arglist.plots_dir + arglist.experiment_name + '.rewards.pkl'
training_loss_filename = arglist.plots_dir + arglist.experiment_name + '.losses.pkl'
episode_rewards_stats_filename = arglist.plots_dir + arglist.experiment_name + '.rewards_stats.pkl'.format(arglist.save_rate)
episode_agent_rewards_stats_filename = arglist.plots_dir + arglist.experiment_name + '.rewards_per_agent_stats.pkl'.format(arglist.save_rate)
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver()
obs_n = env.reset()
episode_step = 0
train_step = 0
t_start = time.time()
# capture joint state of all entities in world in case of use of centralized critic
if "central_joint_" in arglist.critic_type:
joint_state = env.get_joint_state()
print('Starting iterations...')
while True:
# record joint state of system for (training purposes only, not accessible by agents at runtime)
if "central_joint_" in arglist.critic_type:
group_trainer.record_joint_state(joint_state)
# get action, value estimate, and action probability from each agent's trainer
action_n, value_n, neglogp_action_n, health_n = get_trainer_actions(env.agents, trainers, obs_n,
combined_action_value=rl_algs.use_combined_action_value(arglist.training_algorithm))
# ensure actions are valid
if any([any(np.isnan(aa)) for aa in action_n]):
raise Exception("NaN actions returned by get_trainer_actions: obs_n={}\naction_n={}".format(obs_n, action_n))
# environment step
new_obs_n, rew_n, done_n, info_n = env.step(action_n)
episode_step += 1
done = all(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# capture new joint state to be recorded at next time step
if "central_joint_" in arglist.critic_type:
joint_state = env.get_joint_state()
# collect experience and store in the replay buffer
for i, agent in enumerate(trainers):
if rl_algs.use_combined_action_value(arglist.training_algorithm):
agent.experience( obs=obs_n[i],
act=action_n[i],
rew=rew_n[i],
new_obs=new_obs_n[i],
val=value_n[i],
neglogpact=neglogp_action_n[i],
done=done_n[i],
health=health_n[i],
terminal=terminal)
else:
agent.experience( obs=obs_n[i],
act=action_n[i],
rew=rew_n[i],
new_obs=new_obs_n[i],
done=done_n[i],
terminal=terminal)
obs_n = new_obs_n
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
episode_agent_rewards[i][-1] += rew
if done or terminal:
# before reseting environment, capture last joint state
if "central_joint_" in arglist.critic_type:
group_trainer.record_joint_state(env.get_joint_state())
# reset environment and appropriate variables
obs_n = env.reset()
episode_step = 0
episode_rewards.append(0)
for a in episode_agent_rewards:
a.append(0)
agent_info.append([[]])
# capture new joint state after reset to be recorded at next iteration
if "central_joint_" in arglist.critic_type:
joint_state = env.get_joint_state()
# increment global step counter
train_step += 1
# for benchmarking learned policies
if arglist.benchmark:
for i, info in enumerate(info_n):
agent_info[-1][i].append(info_n['n'])
if train_step > arglist.benchmark_iters and (done or terminal):
filename = arglist.benchmark_dir + arglist.experiment_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(filename, 'wb') as fp:
pickle.dump(agent_info[:-1], fp)
break
continue
# for displaying learned policies
if arglist.display:
time.sleep(0.05)
env.render()
continue
# update all trainers, if not in display or benchmark mode
if group_trainer is None:
loss_stats = None
# single trainer per agent, individualized learning
for agent in trainers:
agent.preupdate()
for agent in trainers:
loss_stats = agent.update(trainers, train_step)
else:
# group-wide learning
loss_stats = group_trainer.update_group_policy(terminal)
if loss_stats is not None:
ep_num = len(episode_rewards)
training_loss_stats += [[ep_num] + L for L in loss_stats]
# save model, display training output
if terminal and (len(episode_rewards) % arglist.save_rate == 0):
# tensorize group policy
# NOTE: this is not done at every time step because it appears
# to be very slow is very slow
# NOTE: This is a bit of a hack since ScenarioHeuristicGroupTrainer doesn't
# inherently store policy as tf Tensors. This will likely be removed
# later assuming ScenarioHeuristicGroupTrainer moves to tf-centric format
if group_trainer is not None:
group_trainer.tensorize_group_policy()
# save policy (tensorflow variables)
U.save_state(arglist.save_dir, saver=saver)
# record learning
recent_reward_stats=(np.mean(episode_rewards[-arglist.save_rate:]), np.std(episode_rewards[-arglist.save_rate:]), time.perf_counter()-training_start_time)
episode_rewards_stats.append(recent_reward_stats)
for rew in episode_agent_rewards:
episode_agent_rewards_stats.append((np.mean(rew[-arglist.save_rate:]), np.std(rew[-arglist.save_rate:])))
pickle_learning_curves(arglist_filename, episode_rewards_filename, episode_rewards_stats_filename, | |
just 1 word.")
@click.option('--stemmed_search', '-stemm', default=False,type=bool, help="Search in lemantisated/stemmed syntagmas. Be careful and don't give different conjugations of one lemma, if current options is True. Because you could get duplicates.")
@click.option('--context_len_left', '-conleft', default=True, help="The length of context In Output Tables. Could be also Disabled (False).")
@click.option('--context_len_right', '-conright', default=False,help="The length of context In Output Tables. Could be also Disabled (False).")
@click.option('--separator_syn', '-sepsyn', default=" || ", help="Separator inside syntagma in baseline.")
@click.option('--word_examples_sum_table', '-wordex', default=True,type=bool, help="Enable/disable Word Examples in Exported Output. (Just For SumOutputTables) ")
@click.option('--ignore_symbol', '-ignsym', default=False, help="Enable/disable Symbols in Exported Outputs. (Just For SumOutputTables)")
@click.option('--recompute_flag', '-recflag', default=None,help="For 'recompute' command. This command recompute the FullRepetativnes in given StatsDB. True - full_repetativnes, False - no_full_repetativnes/all_syntagmas ")
@click.option('--attr_name', '-attr', default=False, help="Stats and Corpus DBs has intern Attributes. For changing of getting them you need to get the name of this attribute. ")
@click.option('--value', '-val', default=False, help="For setting of the new Value for one Attribute.")
@click.option('--mode', '-m', default="prod" ,help="Set one of the Tool Modus", type=click.Choice(helpers.modi))
@click.option('--logdir', '-ld', default="logs", help="Choose the name of the Directory for log data.")
def stats(command1,
status_bar, use_end_file_marker, make_backup, lazyness_border,
rewrite, use_cash, optimizer, optimizer_page_size,
optimizer_cache_size, optimizer_locking_mode, optimizer_synchronous, optimizer_journal_mode, optimizer_temp_store,
gready,min_files_pro_stream,baseline_delimiter,
corp_fname, stream_number,create_indexes, freeze_db, optimized_for_long_syntagmas,
stats_fname,stats_intern_dbname,visibility, encryption_key , version, stats_id, context_lenght, full_repetativ_syntagma,
repl_up, ignore_hashtag, case_sensitiv, ignore_url, ignore_mention, ignore_punkt, ignore_num,
recompute_flag,value, attr_name, baseline_insertion_border,
export_dir, syntagma_for_export, exp_repl, exp_redu, exp_sentiment, export_name, export_file_type, rows_limit_in_file,
encryption_key_corp, output_table_type, additional_doc_cols, max_scope, stemmed_search, context_len_left, context_len_right,
separator_syn, word_examples_sum_table, exp_syntagma_typ,ignore_symbol,
mode,logdir ):
# $ zas-vot-tools strat1 sets/train_set sets/eval_set segments voiceless voiced vlwindow vcwindow experiments
logger = get_cli_logger(mode,logdir)
func_name = "stats"
#p(command,"command")
if syntagma_for_export != "*":
temp_syn = []
syntagma_for_export = syntagma_for_export.strip("'") if syntagma_for_export[0] == "'" else syntagma_for_export.strip('"')
exctracted_syn = syntagma_for_export.split(",")
for syntagma_part in exctracted_syn:
temp_syn.append(syntagma_part.split("|"))
### combinatoric
if len(temp_syn) == 0:
logger.error("No one syntagma was exctracted. Probably wrong structure was given. Please give syntagma in the following structure 'very|huge|highly,pitty|hard|happy,man|woman|boy|person'")
return False
else:
#p(temp_syn,"temp_syn")
syntagma_for_export = list(itertools.product(*temp_syn))
optimizer = strtobool(optimizer)
stats_intern_dbname = strtobool(stats_intern_dbname)
visibility = strtobool(visibility)
#cols_and_types_in_doc = strtobool(cols_and_types_in_doc)
#type_to_export = strtobool(type_to_export)
exp_sentiment = strtobool(exp_sentiment)
recompute_flag = strtobool(recompute_flag) if recompute_flag is not None else None
status_bar = strtobool(status_bar)
stats_fname = strtobool(stats_fname)
try:
max_scope = int(max_scope)
except:
max_scope = False
#p(status_bar,"status_bar")
#p(type(status_bar),"status_bar")
if command1 not in supported_commands[func_name]:
logger.error(" Given Command ('{}') is illegal for '{}'. Please use one of the following commands: '{}' ".format(command1,func_name,supported_commands[func_name] ))
return False
if command1 == "compute":
if not corp_fname or not stats_intern_dbname or not visibility:
logger.error("Command is incomplete: One of the following options is empty '--corp_fname', '--stats_intern_dbname', '--visibility' ")
return False
else:
files = get_corp_fname(main_folders)
if corp_fname in files:
pass
elif corp_fname in [os.path.splitext(fname)[0] for fname in files]:
corp_fname = corp_fname+".db"
else:
logger.error("Given corp_fname ('{}') wasn't found and can not be opened.".format(corp_fname))
return False
corp = Corpus(mode="error")
corp.open(os.path.join(main_folders["corp"],corp_fname), encryption_key=encryption_key_corp)
#p(("+++++",corp.corpdb))
if corp.corpdb:
if not corp.corpdb._db:
#p("----")
logger.error("CorpDB-Opening is failed. (CorpFname: ('{}')) If this corpus is encrypted, please use option '--encryption_key_corp'. ".format(corp))
return False
else:
logger.error("CorpDB-Opening is failed. (CorpFname: ('{}')) If this corpus is encrypted, please use option '--encryption_key_corp'. ".format(corp))
return False
#p("....")
language = corp.corpdb.get_attr("language")
corpus_id = corp.corpdb.get_attr("id")
stop_if_db_already_exist = False if rewrite else True
stop_process_if_possible = False if gready else True
stats = Stats(mode=mode, error_tracking=answer_error_tracking, status_bar=status_bar,
make_backup=strtobool(make_backup), lazyness_border=lazyness_border, thread_safe=True, rewrite=strtobool(rewrite), stop_if_db_already_exist=stop_if_db_already_exist,
use_cash=strtobool(use_cash), optimizer=strtobool(optimizer),optimizer_page_size=optimizer_page_size,
optimizer_cache_size=optimizer_cache_size, optimizer_locking_mode=optimizer_locking_mode, optimizer_synchronous=optimizer_synchronous,
optimizer_journal_mode=optimizer_journal_mode, optimizer_temp_store=optimizer_temp_store,stop_process_if_possible=stop_process_if_possible)
stats.init(main_folders["stats"], stats_intern_dbname, language, visibility, corpus_id=corpus_id,
encryption_key=encryption_key,fileName=stats_fname, version=version, stats_id=stats_id,
context_lenght=context_lenght, full_repetativ_syntagma=strtobool(full_repetativ_syntagma),
min_scope_for_indexes=2, repl_up=repl_up, ignore_hashtag=strtobool(ignore_hashtag), force_cleaning=False,
case_sensitiv=strtobool(case_sensitiv), ignore_url=strtobool(ignore_url), ignore_mention=strtobool(ignore_mention),
ignore_punkt=strtobool(ignore_punkt), ignore_num=strtobool(ignore_num),baseline_delimiter=baseline_delimiter,)
#p(stream_number, "stream_number")
stats.compute(corp, stream_number=stream_number, datatyp="dict",
adjust_to_cpu=True,min_files_pro_stream=min_files_pro_stream,cpu_percent_to_get=50,
create_indexes=create_indexes, freeze_db=freeze_db,baseline_insertion_border=baseline_insertion_border,
drop_indexes=True,optimized_for_long_syntagmas=optimized_for_long_syntagmas)
elif command1 == "recompute":
#p((stats_fname, recompute_flag))
if not stats_fname or recompute_flag is None:
logger.error("Command is incomplete: One of the following options is empty '--stats_fname', '--recompute_flag' ")
return False
else:
files = get_stats_fname(main_folders)
if stats_fname in files:
pass
elif stats_fname in [os.path.splitext(fname)[0] for fname in files]:
stats_fname = stats_fname+".db"
else:
logger.error("Given stats_fname ('{}') wasn't found and can not be opened.".format(stats_fname))
return False
stats = Stats(mode=mode)
stats.open(os.path.join(main_folders["stats"],stats_fname), encryption_key=encryption_key)
if not stats.statsdb:
logger.error("StatsDB-Opening is failed. (StatsFname: ('{}')) If this statsus is encrypted, please use option '--encryption_key'. ".format(stats))
return False
stats.recompute_syntagma_repetativity_scope(recompute_flag, _check_statsdb_consistency=True)
elif command1 == "optimize":
if not stats_fname:
logger.error("Command is incomplete: One of the following options is empty '--stats_fname' ")
return False
else:
files = get_stats_fname(main_folders)
if stats_fname in files:
pass
elif stats_fname in [os.path.splitext(fname)[0] for fname in files]:
stats_fname = stats_fname+".db"
else:
logger.error("Given stats_fname ('{}') wasn't found and can not be opened.".format(stats_fname))
return False
stats = Stats(mode=mode)
stats.open(os.path.join(main_folders["stats"],stats_fname), encryption_key=encryption_key)
if not stats.statsdb:
logger.error("StatsDB-Opening is failed. (StatsFname: ('{}')) If this statsus is encrypted, please use option '--encryption_key'. ".format(stats))
return False
stats.optimize_db( stream_number=stream_number, optimized_for_long_syntagmas=optimized_for_long_syntagmas, )
elif command1 == "recreate_indexes":
if not stats_fname:
logger.error("Command is incomplete: One of the following options is empty '--stats_fname' ")
return False
else:
files = get_stats_fname(main_folders)
if stats_fname in files:
pass
elif stats_fname in [os.path.splitext(fname)[0] for fname in files]:
stats_fname = stats_fname+".db"
else:
logger.error("Given stats_fname ('{}') wasn't found and can not be opened.".format(stats_fname))
return False
stats = Stats(mode=mode)
stats.open(os.path.join(main_folders["stats"],stats_fname), encryption_key=encryption_key)
if not stats.statsdb:
logger.error("StatsDB-Opening is failed. (StatsFname: ('{}')) If this statsus is encrypted, please use option '--encryption_key'. ".format(stats))
return False
#stats.optimize_db( stream_number=stream_number, optimized_for_long_syntagmas=optimized_for_long_syntagmas, )
stats.create_additional_indexes(optimized_for_long_syntagmas=optimized_for_long_syntagmas)
elif command1 == "del":
if not stats_fname:
logger.error("'--stats_fname' is not given. (you can also give tag 'all' instead of the stats_fname)")
return False
if stats_fname == "all":
#os.remove()
shutil.rmtree(main_folders["stats"], ignore_errors=True)
logger.info("All StatsDB was removed.")
else:
files = get_stats_fname(main_folders)
if stats_fname in files:
os.remove(os.path.join(main_folders["stats"], stats_fname))
logger.info("'{}'-StatsDB was removed".format(stats_fname))
elif stats_fname in [os.path.splitext(fname)[0] for fname in files]:
os.remove(os.path.join(main_folders["stats"], stats_fname)+".db")
logger.info("'{}'-StatsDB was removed".format(stats_fname))
else:
logger.error("Given fname ('{}') wasn't found and can not be removed.".format(stats_fname))
return False
elif command1 == "clean_dir":
#if not corp_fname:
# logger.error("'--corp_fname' is not given. (you can also give tag 'all' instead of the corp_fname)")
#if corp_fname == "all":
files = get_stats_fname(main_folders)
validated,possibly_encrypted,wrong,opened_db = validate_stats(main_folders,files)
deleted = []
for temp_dbname in validated:
stats = Sretats(mode="blind")
stats.open(os.path.join(main_folders["stats"], temp_dbname))
if stats.statsdb:
if stats.statsdb.get_attr("locked"):
deleted.append(temp_dbname)
os.remove(os.path.join(main_folders["stats"], temp_dbname))
files = os.listdir(main_folders["stats"])
for journal_fname in [fname for fname in files if".db-journal" in fname]:
#deleted.append(temp_dbname)
os.remove(os.path.join(main_folders["stats"], journal_fname))
if deleted:
print " Following not finished and locked statsDBs was deleted:"
for dn in deleted:
print " |-> '{}';".format(dn)
return True
else:
print " Locked or not finished statsDBs wasn't found."
return False
elif command1 == "names":
#p("fghjk")
files = get_stats_fname(main_folders)
#p(files, "files")
validated,possibly_encrypted,wrong,opened_db = validate_stats(main_folders,files)
print ">>> {} DBs was found <<< ".format(len(files))
print " '{}'-From them was validated:".format(len(validated))
for i, fname in enumerate(validated):
print " {}. '{}';".format(i, fname)
if possibly_encrypted:
print "\n '{}'-From them are possibly encrypted/damaged/invalid:".format(len(possibly_encrypted))
for i, fname in enumerate(possibly_encrypted):
print " {}. '{}';".format(i, fname)
#p(files)
elif command1 == "meta":
if not stats_fname:
logger.error("'--stats_fname' is not given. (you can also give tag 'all' instead of the stats_fname)")
return False
files = get_stats_fname(main_folders)
validated,possibly_encrypted,wrong,opened_db = validate_stats(main_folders,files)
if stats_fname == "all":
for db in opened_db:
print("\n >>>> {} <<<<".format(db.fname()))
for k,v in db.get_all_attr().items():
print " {} = '{}';".format(k,v)
print "\n\nNotice! with 'all'-Argument could be checked just not-encrypted DBs. If you want to check encrypted DB use additional to stats_fname also '--encryption_key'"
else:
if stats_fname in files:
if stats_fname in validated:
ix = validated.index(stats_fname)
db = opened_db[ix]
print("\n >>>> {} <<<<".format(db.fname()))
for k,v in db.get_all_attr().items():
print " {} = '{}';".format(k,v)
elif stats_fname in possibly_encrypted:
if not encryption_key:
logger.error("Current DBFile is possibly encrypted/damaged. Please use '--encryption_key'-Option to decrypt it. ")
else:
h = DBHandler(mode="blind")
h.connect(os.path.join(main_folders["stats"],stats_fname),encryption_key=encryption_key)
try:
if h.typ() == "stats":
print("\n >>>> {} <<<<".format(h.fname()))
for k,v in h.get_all_attr().items():
print " {} = '{}';".format(k,v)
else:
logger.error("'{}'-DB is not an StatsDB or given encryption key ('{}') is wrong. ".format(stats_fname,encryption_key))
return False
except:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(stats_fname,encryption_key))
return False
elif stats_fname in [os.path.splitext(fname)[0] for fname in files]:
spl1 = [os.path.splitext(fname)[0] for fname in validated]
spl2 = [os.path.splitext(fname)[0] for fname in possibly_encrypted]
if stats_fname in spl1:
ix = spl1.index(stats_fname)
db = opened_db[ix]
print("\n >>>> {} <<<<".format(db.fname()))
for k,v in db.get_all_attr().items():
print " {} = '{}';".format(k,v)
elif stats_fname in spl2:
if not encryption_key:
logger.error("Current DBFile is possibly encrypted/damaged. Please use '--encryption_key'-Option to decrypt it. ")
else:
h = DBHandler(mode="blind")
h.connect(os.path.join(main_folders["stats"],stats_fname)+".db",encryption_key=encryption_key)
| |
<filename>src/pte_decode/decoding/decoder_factory.py<gh_stars>0
"""Module for machine learning models."""
from dataclasses import dataclass
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from bayes_opt import BayesianOptimization
from catboost import CatBoostClassifier
from sklearn.discriminant_analysis import (
LinearDiscriminantAnalysis,
QuadraticDiscriminantAnalysis,
)
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score, log_loss
from sklearn.model_selection import GroupKFold, GroupShuffleSplit
# from sklearn.svm import SVC
from xgboost import XGBClassifier
from pte_decode.decoding.decoder_base import Decoder
def get_decoder(
classifier: str = "lda",
scoring: str = "balanced_accuracy",
balancing: Optional[str] = None,
optimize: bool = False,
) -> Decoder:
"""Create and return Decoder of desired type.
Parameters
----------
classifier : str
Allowed values for `classifier`: ["catboost", "lda", "lin_svm", "lr",
"svm_lin", "svm_poly", "svm_rbf", "xgb"].
scoring : str | None, default="balanced_accuracy"
Score to be calculated. Possible values:
["oversample", "undersample", "balance_weights"].
balancing : str | None, default=None
Method for balancing skewed datasets. Possible values:
["oversample", "undersample", "balance_weights"].
Returns
-------
Decoder
Instance of Decoder given `classifer` and `balancing` method.
"""
classifiers = {
"catboost": CATB,
"dummy": Dummy,
"lda": LDA,
"lr": LR,
"qda": QDA,
# "svm_lin": SVC_Lin,
# "svm_poly": SVC_Poly,
# "svm_rbf": SVC_RBF,
"xgb": XGB,
}
scoring_methods = {
"balanced_accuracy": _get_balanced_accuracy,
"log_loss": _get_log_loss,
}
classifier = classifier.lower()
balancing = balancing.lower() if isinstance(balancing, str) else balancing
scoring = scoring.lower()
if classifier not in classifiers:
raise DecoderNotFoundError(classifier, classifiers.keys())
if scoring not in scoring_methods:
raise ScoringMethodNotFoundError(scoring, scoring_methods.keys())
return classifiers[classifier](
balancing=balancing,
optimize=optimize,
scoring=scoring_methods[scoring],
)
def _get_balanced_accuracy(model, data_test, label_test) -> Any:
"""Calculated balanced accuracy score."""
return balanced_accuracy_score(label_test, model.predict(data_test))
def _get_log_loss(model, data_test, label_test) -> Any:
"""Calculate Log Loss score."""
return log_loss(label_test, model.predict_proba(data_test))
class ScoringMethodNotFoundError(Exception):
"""Exception raised when invalid balancing method is passed.
Attributes:
input_value -- input value which caused the error
allowed -- allowed input values
message -- explanation of the error
"""
def __init__(
self,
input_value,
allowed,
message="Input scoring method is not an allowed value.",
) -> None:
self.input_value = input_value
self.allowed = allowed
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{{self.message}} Allowed values: {self.allowed}. Got:"
f" {self.input_value}."
)
class DecoderNotFoundError(Exception):
"""Exception raised when invalid Decoder is passed.
Attributes:
input_value -- input which caused the error
allowed -- allowed input types
message -- explanation of the error
"""
def __init__(
self,
input_value,
allowed,
message="Input decoding model is not an allowed value.",
) -> None:
self.input_value = input_value
self.allowed = allowed.values
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{{self.message}} Allowed values: {self.allowed}."
" Got: {self.input_value}."
)
@dataclass
class CATB(Decoder):
"""Class for CatBoostClassifier implementation."""
def __post_init__(self):
self.model = CatBoostClassifier(
loss_function="MultiClass",
verbose=False,
use_best_model=True,
eval_metric="MultiClass",
)
def fit(
self,
data: Union[pd.DataFrame, pd.Series],
labels: np.ndarray,
groups: np.ndarray,
) -> None:
"""Fit model to given training data and training labels."""
self.data_train = data
self.labels_train = labels
self.groups_train = groups
if self.optimize:
self.model = self._bayesian_optimization()
# Train outer model
(
self.data_train,
self.labels_train,
eval_set,
) = self._get_validation_split(
self.data_train,
self.labels_train,
self.groups_train,
train_size=0.8,
)
(
self.data_train,
self.labels_train,
sample_weight,
) = self._balance_samples(
self.data_train, self.labels_train, self.balancing
)
self.model.fit(
self.data_train,
self.labels_train,
eval_set=eval_set,
early_stopping_rounds=25,
sample_weight=sample_weight,
verbose=False,
)
def _bayesian_optimization(self):
"""Estimate optimal model parameters using bayesian optimization."""
optimizer = BayesianOptimization(
self._bo_tune,
{
"max_depth": (4, 10),
"learning_rate": (0.003, 0.3),
"bagging_temperature": (0.0, 1.0),
"l2_leaf_reg": (1, 30),
"random_strength": (0.01, 1.0),
},
)
optimizer.maximize(init_points=10, n_iter=20, acq="ei")
params = optimizer.max["params"]
params["max_depth"] = round(params["max_depth"])
return CatBoostClassifier(
iterations=200,
loss_function="MultiClass",
verbose=False,
use_best_model=True,
eval_metric="MultiClass",
max_depth=params["max_depth"],
learning_rate=params["learning_rate"],
random_strength=params["random_strength"],
bagging_temperature=params["bagging_temperature"],
l2_leaf_reg=params["l2_leaf_reg"],
)
def _bo_tune(
self,
max_depth,
learning_rate,
bagging_temperature,
l2_leaf_reg,
random_strength,
):
# Cross validating with the specified parameters in 5 folds
cv_inner = GroupShuffleSplit(
n_splits=3, train_size=0.66, random_state=42
)
scores = []
for train_index, test_index in cv_inner.split(
self.data_train, self.labels_train, self.groups_train
):
data_train_, data_test_ = (
self.data_train[train_index],
self.data_train[test_index],
)
y_tr, y_te = (
self.labels_train[train_index],
self.labels_train[test_index],
)
groups_tr = self.groups_train[train_index]
(data_train_, y_tr, eval_set_inner,) = self._get_validation_split(
data=data_train_,
labels=y_tr,
groups=groups_tr,
train_size=0.8,
)
data_train_, y_tr, sample_weight = self._balance_samples(
data_train_, y_tr, self.balancing
)
inner_model = CatBoostClassifier(
iterations=100,
loss_function="MultiClass",
verbose=False,
eval_metric="MultiClass",
max_depth=round(max_depth),
learning_rate=learning_rate,
bagging_temperature=bagging_temperature,
l2_leaf_reg=l2_leaf_reg,
random_strength=random_strength,
)
inner_model.fit(
data_train_,
y_tr,
eval_set=eval_set_inner,
early_stopping_rounds=25,
sample_weight=sample_weight,
verbose=False,
)
y_probs = inner_model.predict_proba(data_test_)
score = log_loss(y_te, y_probs, labels=[0, 1])
scores.append(score)
# Return the negative MLOGLOSS
return -1.0 * np.mean(scores)
@dataclass
class LDA(Decoder):
"""Class for applying Linear Discriminant Analysis using scikit-learn."""
def __post_init__(self):
if self.balancing == "balance_weights":
raise ValueError(
"Sample weights cannot be balanced for Linear "
"Discriminant Analysis. Please set `balance_weights` to"
"either `oversample`, `undersample` or `None`."
)
if self.optimize:
raise ValueError(
"Hyperparameter optimization cannot be performed for this"
" implementation of Linear Discriminant Analysis. Please"
" set `optimize` to False."
)
def fit(
self, data: np.ndarray, labels: np.ndarray, groups: np.ndarray
) -> None:
"""Fit model to given training data and training labels."""
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model = LinearDiscriminantAnalysis(
solver="lsqr", shrinkage="auto"
)
self.model.fit(self.data_train, self.labels_train)
@dataclass
class LR(Decoder):
"""Basic representation of class for finding and filtering files."""
def fit(self, data: np.ndarray, labels: np.ndarray, groups) -> None:
"""Fit model to given training data and training labels."""
self.data_train = data
self.labels_train = labels
self.groups_train = groups
if self.optimize:
self.model = self._bayesian_optimization()
else:
self.model = LogisticRegression(solver="newton-cg")
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model.fit(self.data_train, self.labels_train)
def _bayesian_optimization(self):
"""Estimate optimal model parameters using bayesian optimization."""
optimizer = BayesianOptimization(
self._bo_tune,
{"C": (0.01, 1.0)}, # pylint: disable=invalid-name
)
optimizer.maximize(init_points=10, n_iter=20, acq="ei")
# Train outer model with optimized parameters
params = optimizer.max["params"]
# params['max_iter'] = int(params['max_iter'])
return LogisticRegression(
solver="newton-cg", max_iter=500, C=params["C"]
)
def _bo_tune(self, C: float): # pylint: disable=invalid-name
# Cross validating with the specified parameters in 5 folds
cv_inner = GroupShuffleSplit(
n_splits=3, train_size=0.66, random_state=42
)
scores = []
for train_index, test_index in cv_inner.split(
self.data_train, self.labels_train, self.groups_train
):
data_train_, data_test_ = (
self.data_train[train_index],
self.data_train[test_index],
)
y_tr, y_te = (
self.labels_train[train_index],
self.labels_train[test_index],
)
data_train_, y_tr, sample_weight = self._balance_samples(
data_train_, y_tr, self.balancing
)
inner_model = LogisticRegression(
solver="newton-cg", C=C, max_iter=500
)
inner_model.fit(data_train_, y_tr, sample_weight=sample_weight)
y_probs = inner_model.predict_proba(data_test_)
score = log_loss(y_te, y_probs, labels=[0, 1])
scores.append(score)
# Return the negative MLOGLOSS
return -1.0 * np.mean(scores)
@dataclass
class Dummy(Decoder):
"""Dummy classifier implementation from scikit learn"""
def fit(self, data: np.ndarray, labels: np.ndarray, groups) -> None:
"""Fit model to given training data and training labels."""
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model = DummyClassifier(strategy="uniform")
self.model.fit(self.data_train, self.labels_train)
def get_score(self, data_test: np.ndarray, label_test: np.ndarray):
"""Calculate score."""
scores = [
self.scoring(self.model, data_test, label_test)
for _ in range(0, 100)
]
return np.mean(scores)
@dataclass
class QDA(Decoder):
"""Class for applying Linear Discriminant Analysis using scikit-learn."""
def __post_init__(self):
if self.balancing == "balance_weights":
raise ValueError(
"Sample weights cannot be balanced for Quadratic "
"Discriminant Analysis. Please set `balance_weights` to"
"either `oversample`, `undersample` or `None`."
)
if self.optimize:
raise ValueError(
"Hyperparameter optimization cannot be performed for this"
" implementation of Quadratic Discriminant Analysis. Please"
" set `optimize` to False."
)
def fit(self, data: np.ndarray, labels: np.ndarray, groups) -> None:
"""Fit model to given training data and training labels."""
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model = QuadraticDiscriminantAnalysis()
self.model.fit(self.data_train, self.labels_train)
@dataclass
class XGB(Decoder):
"""Basic representation of class for finding and filtering files."""
def _bayesian_optimization(self):
"""Estimate optimal model parameters using bayesian optimization."""
optimizer = BayesianOptimization(
self._bo_tune,
{
"learning_rate": (0.003, 0.3),
"max_depth": (4, 10),
"gamma": (0, 1),
"colsample_bytree": (0.4, 1),
"subsample": (0.4, 1),
},
)
optimizer.maximize(init_points=10, n_iter=20, acq="ei")
# Train outer model with optimized parameters
params = optimizer.max["params"]
return XGBClassifier(
objective="binary:logistic",
use_label_encoder=False,
n_estimators=200,
eval_metric="logloss",
learning_rate=params["learning_rate"],
gamma=params["gamma"],
max_depth=int(params["max_depth"]),
subsample=params["subsample"],
colsample_bytree=params["colsample_bytree"],
)
def _bo_tune(
self, learning_rate, gamma, max_depth, subsample, colsample_bytree
):
cv_inner = GroupKFold(
n_splits=3,
)
scores = []
for train_index, test_index in cv_inner.split(
self.data_train, self.labels_train, self.groups_train
):
data_train_, data_test_ = (
self.data_train.iloc[train_index],
self.data_train.iloc[test_index],
)
y_tr, y_te = (
self.labels_train[train_index],
self.labels_train[test_index],
)
groups_tr = self.groups_train[train_index]
(data_train_, y_tr, eval_set_inner,) = self._get_validation_split(
data=data_train_,
labels=y_tr,
groups=groups_tr,
train_size=0.8,
)
(data_train_, y_tr, sample_weight,) = self._balance_samples(
data=data_train_, labels=y_tr, method=self.balancing
)
inner_model = XGBClassifier(
objective="binary:logistic",
booster="gbtree",
use_label_encoder=False,
eval_metric="logloss",
n_estimators=100,
learning_rate=learning_rate,
gamma=gamma,
max_depth=int(max_depth),
colsample_bytree=colsample_bytree,
subsample=subsample,
)
inner_model.fit(
X=data_train_,
y=y_tr,
eval_set=eval_set_inner,
early_stopping_rounds=20,
sample_weight=sample_weight,
verbose=False,
)
y_probs = inner_model.predict_proba(X=data_test_)
score = log_loss(y_te, y_probs, labels=[0, 1])
scores.append(score)
# Return the negative MLOGLOSS
return -1.0 * np.mean(scores)
def fit(
self, data: pd.DataFrame, labels: np.ndarray, groups: np.ndarray
) -> None:
"""Fit model to given training data and training labels."""
self.data_train = data
self.labels_train = labels
self.groups_train = groups
if self.optimize:
self.model = self._bayesian_optimization()
else:
self.model = XGBClassifier(
objective="binary:logistic",
booster="gbtree",
use_label_encoder=False,
n_estimators=200,
eval_metric="logloss",
)
# Train outer model
(
| |
<gh_stars>0
from functools import partial
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras import regularizers
from utils import *
from metrics import *
from deform_conv.layers import ConvOffset2D
def Conv(input, num_filters, use_deform=False, activation='relu', padding='same',
kernel_initializer='he_normal', normal_conv_trainable=True,
channel_wise=False):
input = Conv2D(num_filters, (3, 3), activation=None, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=regularizers.l2(0.01),
trainable=normal_conv_trainable)(input)
if use_deform:
input = ConvOffset2D(num_filters, channel_wise=channel_wise)(input)
return input
def text_classification_model(pretrained_weights=None, input_size=(None, None, 3),
num_classes=3, num_filters=32, use_deform=True,
channel_wise=False, normal_conv_trainable=True,
class_weights=None, loss_weights=[1.0, 1.0],
ignore_background=False):
global Conv
Conv = partial(Conv, normal_conv_trainable=normal_conv_trainable,
channel_wise=channel_wise)
def conv_act_bn_dropout_block(input, num_filters, use_deform=True,
dropout=0):
output = Conv(input, num_filters, use_deform=use_deform)
output = Activation('relu')(output)
output = BatchNormalization()(output)
if dropout > 0:
output = SpatialDropout2D(dropout)(output)
return output
input = Input(input_size)
conv1 = conv_act_bn_dropout_block(input, num_filters, use_deform=use_deform)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=use_deform)
down1_2 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_act_bn_dropout_block(down1_2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
down2_3 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_act_bn_dropout_block(down2_3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
down3_4 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_act_bn_dropout_block(down3_4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
down4_5 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = conv_act_bn_dropout_block(down4_5, num_filters*16, use_deform=use_deform)
conv5 = conv_act_bn_dropout_block(conv5, num_filters*16, use_deform=use_deform)
# down5_6 = MaxPooling2D(pool_size=(2, 2))(conv5)
#
# conv6 = conv_act_bn_dropout_block(down5_6, num_filters*32, use_deform=use_deform)
# conv6 = conv_act_bn_dropout_block(conv6, num_filters*32, use_deform=use_deform)
#
# up6_5 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv6), num_filters*16)
# merge5 = concatenate([conv5, up6_5], axis=3)
# conv5 = conv_act_bn_dropout_block(merge5, num_filters*16, use_deform=use_deform)
# conv5 = conv_act_bn_dropout_block(conv5, num_filters*16, use_deform=use_deform)
up5_4 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv5), num_filters*8)
merge4 = concatenate([conv4, up5_4], axis=3)
conv4 = conv_act_bn_dropout_block(merge4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
up4_3 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv4), num_filters*4)
merge3 = concatenate([conv3, up4_3], axis=3)
conv3 = conv_act_bn_dropout_block(merge3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
up3_2 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv3), num_filters*2)
merge2 = concatenate([conv2, up3_2], axis=3)
conv2 = conv_act_bn_dropout_block(merge2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
up2_1 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv2), num_filters)
merge1 = concatenate([conv1, up2_1], axis=3)
conv1 = conv_act_bn_dropout_block(merge1, num_filters, use_deform=False)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=False)
# key_mask = Conv2D(1, 1, activation='sigmoid', name='key_mask',
# trainable=normal_conv_trainable)(conv9)
# value_mask = Conv2D(1, 1, activation='sigmoid', name='value_mask',
# trainable=normal_conv_trainable)(conv9)
output_mask = Conv2D(num_classes, (1, 1), activation='softmax',
name='output_mask',
trainable=normal_conv_trainable)(conv1)
model = Model(input=input, outputs=output_mask)
global IoU_score
IoU_score = partial(IoU_score, ignore_last_channel=ignore_background)
IoU_score.__name__ = 'IoU_score'
global custom_categorical_loss
custom_categorical_loss = partial(custom_categorical_loss,
class_weights=class_weights,
loss_weights=loss_weights,
ignore_last_channel=ignore_background)
custom_categorical_loss.__name__ = 'custom_categorical_loss'
model.compile(optimizer=Adam(lr=1e-4),
loss=custom_categorical_loss,
metrics=['accuracy', IoU_score])
if pretrained_weights:
model.load_weights(pretrained_weights, by_name=True)
# model.summary()
return model
def relation_model(pretrained_weights=None, input_size=(None, None, 3),
num_filters=32, use_deform=True,
channel_wise=False, normal_conv_trainable=True,
loss_weights=[1.0, 1.0]):
global Conv
Conv = partial(Conv, normal_conv_trainable=normal_conv_trainable,
use_deform=use_deform, channel_wise=channel_wise)
def conv_act_bn_dropout_block(input, num_filters, use_deform=True,
dropout=0):
output = Conv(input, num_filters, use_deform=use_deform)
output = Activation('relu')(output)
output = BatchNormalization()(output)
if dropout > 0:
output = SpatialDropout2D(dropout)(output)
return output
input = Input(input_size)
conv1 = conv_act_bn_dropout_block(input, num_filters, use_deform=use_deform)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=use_deform)
down1_2 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_act_bn_dropout_block(down1_2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
down2_3 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_act_bn_dropout_block(down2_3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
down3_4 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_act_bn_dropout_block(down3_4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
down4_5 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = conv_act_bn_dropout_block(down4_5, num_filters*16, use_deform=use_deform)
conv5 = conv_act_bn_dropout_block(conv5, num_filters*16, use_deform=use_deform)
# down5_6 = MaxPooling2D(pool_size=(2, 2))(conv5)
#
# conv6 = conv_act_bn_dropout_block(down5_6, num_filters*32, use_deform=use_deform)
# conv6 = conv_act_bn_dropout_block(conv6, num_filters*32, use_deform=use_deform)
#
# up6_5 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv6), num_filters*16)
# merge5 = concatenate([conv5, up6_5], axis=3)
# conv5 = conv_act_bn_dropout_block(merge5, num_filters*16, use_deform=use_deform)
# conv5 = conv_act_bn_dropout_block(conv5, num_filters*16, use_deform=use_deform)
up5_4 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv5), num_filters*8)
merge4 = concatenate([conv4, up5_4], axis=3)
conv4 = conv_act_bn_dropout_block(merge4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
up4_3 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv4), num_filters*4)
merge3 = concatenate([conv3, up4_3], axis=3)
conv3 = conv_act_bn_dropout_block(merge3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
up3_2 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv3), num_filters*2)
merge2 = concatenate([conv2, up3_2], axis=3)
conv2 = conv_act_bn_dropout_block(merge2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
up2_1 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv2), num_filters)
merge1 = concatenate([conv1, up2_1], axis=3)
conv1 = conv_act_bn_dropout_block(merge1, num_filters, use_deform=False)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=False)
horizontal_relation_mask = Conv2D(1, 1, activation='sigmoid',
name='horizontal_relation_mask')(conv1)
vertical_relation_mask = Conv2D(1, 1, activation='sigmoid',
name='vertical_relation_mask')(conv1)
model = Model(input=input,
outputs=[horizontal_relation_mask, vertical_relation_mask])
global IoU_score
IoU_score = partial(IoU_score, ignore_last_channel=False)
IoU_score.__name__ = 'IoU_score'
global custom_loss
custom_loss = partial(custom_loss, class_weights=None,
loss_weights=loss_weights, ignore_last_channel=False)
custom_loss.__name__ = 'custom_loss'
model.compile(optimizer=Adam(lr=1e-4),
loss={'horizontal_relation_mask': custom_loss,
'vertical_relation_mask': custom_loss},
metrics=['accuracy', IoU_score])
if pretrained_weights:
model.load_weights(pretrained_weights, by_name=True)
# model.summary()
return model
def text_detection_model(pretrained_weights=None, input_size=(None, None, 3),
num_filters=32, use_deform=True,
channel_wise=False, normal_conv_trainable=True,
loss_weights=[1.0, 1.0]):
global Conv
Conv = partial(Conv, normal_conv_trainable=normal_conv_trainable,
use_deform=use_deform, channel_wise=channel_wise)
def conv_act_bn_dropout_block(input, num_filters, use_deform=True,
dropout=0):
output = Conv(input, num_filters, use_deform=use_deform)
output = Activation('relu')(output)
output = BatchNormalization()(output)
if dropout > 0:
output = SpatialDropout2D(dropout)(output)
return output
input = Input(input_size)
conv1 = conv_act_bn_dropout_block(input, num_filters, use_deform=use_deform)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=use_deform)
down1_2 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_act_bn_dropout_block(down1_2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
down2_3 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_act_bn_dropout_block(down2_3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
down3_4 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_act_bn_dropout_block(down3_4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
down4_5 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = conv_act_bn_dropout_block(down4_5, num_filters*16, use_deform=use_deform)
conv5 = conv_act_bn_dropout_block(conv5, num_filters*16, use_deform=use_deform)
# down5_6 = MaxPooling2D(pool_size=(2, 2))(conv5)
#
# conv6 = conv_act_bn_dropout_block(down5_6, num_filters*32, use_deform=use_deform)
# conv6 = conv_act_bn_dropout_block(conv6, num_filters*32, use_deform=use_deform)
#
# up6_5 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv6), num_filters*16)
# merge5 = concatenate([conv5, up6_5], axis=3)
# conv5 = conv_act_bn_dropout_block(merge5, num_filters*16, use_deform=use_deform)
# conv5 = conv_act_bn_dropout_block(conv5, num_filters*16, use_deform=use_deform)
up5_4 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv5), num_filters*8)
merge4 = concatenate([conv4, up5_4], axis=3)
conv4 = conv_act_bn_dropout_block(merge4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
up4_3 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv4), num_filters*4)
merge3 = concatenate([conv3, up4_3], axis=3)
conv3 = conv_act_bn_dropout_block(merge3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
up3_2 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv3), num_filters*2)
merge2 = concatenate([conv2, up3_2], axis=3)
conv2 = conv_act_bn_dropout_block(merge2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
up2_1 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv2), num_filters)
merge1 = concatenate([conv1, up2_1], axis=3)
conv1 = conv_act_bn_dropout_block(merge1, num_filters, use_deform=False)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=False)
text_mask = Conv2D(1, 1, activation='sigmoid', name='text_mask')(conv1)
model = Model(input=input, outputs=text_mask)
global IoU_score
IoU_score = partial(IoU_score, ignore_last_channel=False)
IoU_score.__name__ = 'IoU_score'
global custom_loss
custom_loss = partial(custom_loss, class_weights=None,
loss_weights=loss_weights, ignore_last_channel=False)
custom_loss.__name__ = 'custom_loss'
model.compile(optimizer=Adam(lr=1e-4),
loss=custom_loss,
metrics=['accuracy', IoU_score])
if pretrained_weights:
model.load_weights(pretrained_weights, by_name=True)
# model.summary()
return model
def isbi_model(pretrained_weights=None, input_size=(None, None, 3),
num_filters=32, use_deform=True, channel_wise=False,
normal_conv_trainable=True, loss_weights=[1.0, 1.0]):
global Conv
Conv = partial(Conv, normal_conv_trainable=normal_conv_trainable,
use_deform=use_deform, channel_wise=channel_wise)
def conv_act_bn_dropout_block(input, num_filters, use_deform=True,
dropout=0):
output = Conv(input, num_filters, use_deform=use_deform)
output = Activation('relu')(output)
output = BatchNormalization()(output)
if dropout > 0:
output = SpatialDropout2D(dropout)(output)
return output
input = Input(input_size)
conv1 = conv_act_bn_dropout_block(input, num_filters, use_deform=use_deform)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=use_deform)
down1_2 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_act_bn_dropout_block(down1_2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
down2_3 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_act_bn_dropout_block(down2_3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
down3_4 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_act_bn_dropout_block(down3_4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
conv4 = Dropout(0.5)(conv4)
down4_5 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = conv_act_bn_dropout_block(down4_5, num_filters*16, use_deform=use_deform)
conv5 = conv_act_bn_dropout_block(conv5, num_filters*16, use_deform=use_deform)
conv5 = Dropout(0.5)(conv5)
up5_4 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv5), num_filters*8)
merge4 = concatenate([conv4, up5_4], axis=3)
conv4 = conv_act_bn_dropout_block(merge4, num_filters*8, use_deform=use_deform)
conv4 = conv_act_bn_dropout_block(conv4, num_filters*8, use_deform=use_deform)
up4_3 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv4), num_filters*4)
merge3 = concatenate([conv3, up4_3], axis=3)
conv3 = conv_act_bn_dropout_block(merge3, num_filters*4, use_deform=use_deform)
conv3 = conv_act_bn_dropout_block(conv3, num_filters*4, use_deform=use_deform)
up3_2 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv3), num_filters*2)
merge2 = concatenate([conv2, up3_2], axis=3)
conv2 = conv_act_bn_dropout_block(merge2, num_filters*2, use_deform=use_deform)
conv2 = conv_act_bn_dropout_block(conv2, num_filters*2, use_deform=use_deform)
up2_1 = conv_act_bn_dropout_block(UpSampling2D(size = (2,2))(conv2), num_filters)
merge1 = concatenate([conv1, up2_1], axis=3)
conv1 = conv_act_bn_dropout_block(merge1, num_filters, use_deform=False)
conv1 = conv_act_bn_dropout_block(conv1, num_filters, use_deform=False)
output = Conv2D(1, 1, activation='sigmoid', name='output')(conv1)
model = Model(input=input, outputs=output)
global IoU_score
IoU_score = partial(IoU_score, ignore_last_channel=False)
IoU_score.__name__ = 'IoU_score'
global custom_loss
custom_loss = partial(custom_loss, class_weights=None,
| |
<reponame>sirex/htsql
#
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.core.tr.bind`
=========================
This module implements the binding process.
"""
from ..util import maybe, listof, tupleof, similar
from ..adapter import Adapter, Protocol, adapt, adapt_many
from ..domain import (Domain, BooleanDomain, IntegerDomain, DecimalDomain,
FloatDomain, UntypedDomain, EntityDomain, RecordDomain, ListDomain,
IdentityDomain, VoidDomain)
from ..classify import normalize
from ..error import Error, translate_guard, choices_guard, point
from ..syn.syntax import (Syntax, CollectSyntax, SelectSyntax, ApplySyntax,
FunctionSyntax, PipeSyntax, OperatorSyntax, PrefixSyntax,
ProjectSyntax, FilterSyntax, LinkSyntax, DetachSyntax, AttachSyntax,
AssignSyntax, ComposeSyntax, LocateSyntax, IdentitySyntax, GroupSyntax,
IdentifierSyntax, UnpackSyntax, ReferenceSyntax, LiftSyntax,
StringSyntax, LabelSyntax, NumberSyntax, RecordSyntax, DirectSyntax)
from .binding import (Binding, WrappingBinding, CollectBinding, RootBinding,
HomeBinding, TableBinding, ChainBinding, ColumnBinding,
QuotientBinding, KernelBinding, ComplementBinding, LocateBinding,
SieveBinding, AttachBinding, SortBinding, CastBinding, IdentityBinding,
ImplicitCastBinding, RescopingBinding, AssignmentBinding,
DefineBinding, DefineReferenceBinding, DefineCollectionBinding,
DefineLiftBinding, SelectionBinding, WildSelectionBinding,
DirectionBinding, TitleBinding, RerouteBinding,
ReferenceRerouteBinding, AliasBinding, LiteralBinding, FormulaBinding,
VoidBinding, Recipe, LiteralRecipe, SelectionRecipe, FreeTableRecipe,
AttachedTableRecipe, ColumnRecipe, KernelRecipe, ComplementRecipe,
IdentityRecipe, ChainRecipe, SubstitutionRecipe, BindingRecipe,
ClosedRecipe, PinnedRecipe, AmbiguousRecipe)
from .lookup import (lookup_attribute, lookup_reference, lookup_complement,
lookup_attribute_set, lookup_reference_set, expand, direct, guess_tag,
identify, unwrap)
from .signature import IsEqualSig, AndSig
from .coerce import coerce
from .decorate import decorate
class BindingState(object):
def __init__(self, root, environment=None):
assert isinstance(root, RootBinding)
# The root lookup scope.
self.root = root
# The current lookup scope.
self.scope = root
# The stack of previous lookup scopes.
self.scope_stack = []
# References in the root scope.
self.environment = environment
if self.environment is not None:
collection = {}
for name, recipe in self.environment:
name = normalize(name)
collection[name] = recipe
if collection:
self.scope = DefineCollectionBinding(
self.scope, collection, True, self.scope.syntax)
def push_scope(self, scope):
"""
Sets the new lookup scope.
This function stores the current scope in the stack and makes
the given binding the new lookup scope. Use the :attr:`scope`
attribute to get the current scope; :meth:`pop_scope` to restore
the previous scope.
`scope` (:class:`htsql.core.tr.binding.Binding`)
The new lookup scope.
"""
# Sanity check on the argument.
assert isinstance(scope, Binding)
# Ensure that the root scope was set.
assert self.root is not None
# Save the current lookup scope.
self.scope_stack.append(self.scope)
# Assign the new lookup scope.
self.scope = scope
def pop_scope(self):
"""
Restores the previous lookup scope.
This functions restores the previous lookup scope from the stack.
Use the :attr:`scope` attribute to get the current scope;
:meth:`push_scope` to change the current scope.
"""
# Restore the prevous lookup scope from the stack.
self.scope = self.scope_stack.pop()
def bind(self, syntax, scope=None):
"""
Binds the given syntax node using the current binding state.
Returns a binding node.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup scope is set to `scope` when
binding the syntax node.
"""
with translate_guard(syntax):
if scope is not None:
self.push_scope(scope)
binding = Bind.__prepare__(syntax, self)()
if scope is not None:
self.pop_scope()
return binding
def use(self, recipe, syntax, scope=None):
"""
Applies a recipe to produce a binding node.
Returns a binding node.
`recipe` (:class:`htsql.core.tr.binding.Recipe`)
The recipe to apply.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node associated with the recipe.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup scope is set to `scope` when
binding the syntax node.
"""
# If passed, set the new lookup scope.
if scope is not None:
self.push_scope(scope)
# Realize and apply `BindByRecipe` adapter.
with translate_guard(syntax):
binding = BindByRecipe.__invoke__(recipe, syntax, self)
# Restore the old lookup scope.
if scope is not None:
self.pop_scope()
# Return the generated binding node.
return binding
def call(self, syntax, scope=None):
"""
Binds a global function or a global identifier.
Returns a binding node.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup context is set to `scope` when
binding the syntax node.
"""
# If passed, set the new lookup scope.
if scope is not None:
self.push_scope(scope)
# Realize and apply `BindByName` protocol.
with translate_guard(syntax):
binding = BindByName.__invoke__(syntax, self)
# Restore the old lookup scope.
if scope is not None:
self.pop_scope()
# Return the generated binding node.
return binding
class Bind(Adapter):
"""
Translates a syntax node to a binding node.
This is an interface adapter; see subclasses for implementations.
The binding process resolves identifiers against database objects,
resolves and validates operators and function calls, and determine
types of all expression.
The :class:`Bind` adapter has the following signature::
Bind: (Syntax, BindingState) -> Binding
The adapter is polymorphic on the `Syntax` argument.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`state` (:class:`BindingState`)
The current state of the binding process.
"""
adapt(Syntax)
def __init__(self, syntax, state):
assert isinstance(syntax, Syntax)
assert isinstance(state, BindingState)
self.syntax = syntax
self.state = state
def __call__(self):
# The default implementation raises an error. It is actually
# unreachable since we provide an implementation for all syntax nodes.
raise Error("Unable to bind a node")
def hint_choices(choices):
# Generate a hint from a list of choices.
assert isinstance(choices, listof(unicode))
if not choices:
return None
chunks = ["did you mean:"]
if len(choices) == 1:
chunks.append("'%s'" % choices[0].encode('utf-8'))
else:
chunks.append(", ".join("'%s'" % choice.encode('utf-8')
for choice in choices[:-1]))
chunks.append("or")
chunks.append("'%s'" % choices[-1].encode('utf-8'))
return " ".join(chunks)
class BindCollect(Bind):
adapt(CollectSyntax)
def __call__(self):
## FIXME: an empty segment syntax should not be generated.
#if self.syntax.arm is None:
# raise Error("output columns are not specified",
# self.syntax.mark)
# Bind the segment expression.
if self.syntax.arm is not None:
seed = self.state.bind(self.syntax.arm)
if isinstance(seed, AssignmentBinding):
with translate_guard(seed):
if len(seed.terms) != 1:
raise Error("Qualified definition is not allowed"
" for an in-segment assignment")
if seed.parameters is not None:
raise Error("Parameterized definition is not allowed"
" for an in-segment assignment")
name, is_reference = seed.terms[0]
if is_reference:
recipe = BindingRecipe(self.state.bind(seed.body))
else:
recipe = SubstitutionRecipe(self.state.scope, [],
None, seed.body)
recipe = ClosedRecipe(recipe)
syntax = seed.syntax
if isinstance(syntax, AssignSyntax):
syntax = syntax.larm
seed = self.state.use(recipe, syntax)
else:
seed = self.state.scope
seed = Select.__invoke__(seed, self.state)
domain = ListDomain(seed.domain)
return CollectBinding(self.state.scope, seed, domain,
self.syntax)
class Select(Adapter):
adapt(Domain)
@classmethod
def __dispatch__(interface, binding, *args, **kwds):
assert isinstance(binding, Binding)
return (type(binding.domain),)
def __init__(self, binding, state):
self.binding = binding
self.state = state
def __call__(self):
domain = coerce(self.binding.domain)
if domain is None:
# FIXME: separate implementation for VoidDomain with a better error
# message.
raise Error("Output column must be scalar")
return ImplicitCastBinding(self.binding, domain, self.binding.syntax)
class SelectRecord(Select):
adapt_many(EntityDomain,
RecordDomain)
def __call__(self):
recipes = expand(self.binding, with_syntax=True, with_wild=True,
with_class=True)
if recipes is None:
return super(SelectRecord, self).__call__()
elements = []
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax, scope=self.binding)
element = Select.__invoke__(element, self.state)
elements.append(element)
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
binding = SelectionBinding(self.binding, elements, domain,
self.binding.syntax)
return binding
class SelectList(Select):
adapt(ListDomain)
def __call__(self):
return self.binding
class SelectIdentity(Select):
adapt(IdentityDomain)
def __call__(self):
return self.binding
class SelectUntyped(Select):
adapt(UntypedDomain)
def __call__(self):
return self.binding
class BindSelect(Bind):
adapt(SelectSyntax)
def __call__(self):
scope = self.state.bind(self.syntax.larm)
return self.state.bind(self.syntax.rarm, scope=scope)
class BindRecord(Bind):
adapt(RecordSyntax)
def __call__(self):
# Extract selector elements.
elements = []
scope = self.state.scope
self.state.push_scope(scope)
for arm in self.syntax.arms:
binding = self.state.bind(arm)
# Handle in-selector assignments.
if isinstance(binding, AssignmentBinding):
with translate_guard(binding):
if len(binding.terms) != 1:
raise Error("Qualified definition is not allowed"
" for an in-selector assignment")
if binding.parameters is not None:
raise Error("Parameterized definition is not allowed"
" for an in-selector assignment")
name, is_reference = binding.terms[0]
if is_reference:
recipe = BindingRecipe(self.state.bind(binding.body))
else:
recipe = SubstitutionRecipe(scope, [],
None, binding.body)
recipe = ClosedRecipe(recipe)
syntax = binding.syntax
if isinstance(syntax, AssignSyntax):
syntax = syntax.larm.larms[0]
binding = self.state.use(recipe, syntax)
if is_reference:
scope = DefineReferenceBinding(scope, name,
recipe, scope.syntax)
else:
scope = DefineBinding(scope, name, None,
recipe, scope.syntax)
self.state.pop_scope()
self.state.push_scope(scope)
# Extract nested selectors, if any.
bindings = []
recipes = expand(binding, with_wild=True)
if recipes is not None:
seed = binding
for syntax, recipe in recipes:
binding = self.state.use(recipe, syntax)
binding = RescopingBinding(binding, seed, binding.syntax)
bindings.append(binding)
else:
bindings.append(binding)
# Handle in-selector direction decorators.
order = []
for binding in bindings:
direction = direct(binding)
if direction is not None:
order.append(binding)
if order:
scope = SortBinding(scope, order, None, None, scope.syntax)
self.state.pop_scope()
self.state.push_scope(scope)
elements.extend(bindings)
self.state.pop_scope()
# Generate a selection scope.
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
return SelectionBinding(scope, elements, domain, self.syntax)
class BindApply(Bind):
adapt(ApplySyntax)
def __call__(self):
# Look for the parameterized attribute in the current local scope.
recipe = lookup_attribute(self.state.scope,
self.syntax.name, len(self.syntax.arguments))
if recipe is not None:
binding = self.state.use(recipe, self.syntax)
# If not found, look for a global function.
else:
binding = self.state.call(self.syntax)
| |
in block.body:
# Ignore things that look like docstrings
if (isinstance(stmt, ExpressionStmt) and
isinstance(stmt.expr, StrExpr)):
continue
# Constructor is named after class
if isinstance(stmt, FuncDef):
if stmt.name() == '__init__':
self.decl_write_ind('%s(', o.name)
self._WriteFuncParams(stmt.type.arg_types, stmt.arguments)
self.decl_write(');\n')
# Must visit these for member vars!
self.accept(stmt.body)
continue
if stmt.name() == '__enter__':
continue
if stmt.name() == '__exit__':
# Turn it into a destructor with NO ARGS
self.decl_write_ind('~%s();\n', o.name)
continue
self.accept(stmt)
self.current_class_name = None
# Now write member defs
#log('MEMBERS for %s: %s', o.name, list(self.member_vars.keys()))
if self.member_vars:
self.decl_write('\n') # separate from functions
for name in sorted(self.member_vars):
c_type = get_c_type(self.member_vars[name])
self.decl_write_ind('%s %s;\n', c_type, name)
self.decl_write('\n')
self.decl_write_ind('DISALLOW_COPY_AND_ASSIGN(%s)\n', o.name)
self.indent -= 1
self.decl_write_ind('};\n')
return
self.current_class_name = o.name
# Now we're visiting for definitions (not declarations).
#
block = o.defs
for stmt in block.body:
if isinstance(stmt, FuncDef):
# Collect __init__ calls within __init__, and turn them into
# initializer lists.
if stmt.name() == '__init__':
self.write('\n')
self.write_ind('%s::%s(', o.name, o.name)
self._WriteFuncParams(stmt.type.arg_types, stmt.arguments)
self.write(') ')
# Everything descents from Obj
if not base_class_name:
# TODO: Generate the right mask!
self.write(
': gc_heap::Obj(Tag::FixedSize, kZeroMask, sizeof(%s)) ' % o.name)
# Taking into account the docstring, look at the first statement to
# see if it's a superclass __init__ call. Then move that to the
# initializer list.
first_index = 0
maybe_skip_stmt = stmt.body.body[0]
if (isinstance(maybe_skip_stmt, ExpressionStmt) and
isinstance(maybe_skip_stmt.expr, StrExpr)):
first_index += 1
first_stmt = stmt.body.body[first_index]
if (isinstance(first_stmt, ExpressionStmt) and
isinstance(first_stmt.expr, CallExpr)):
expr = first_stmt.expr
#log('expr %s', expr)
callee = first_stmt.expr.callee
# TextOutput() : ColorOutput(f), ... {
if isinstance(callee, MemberExpr) and callee.name == '__init__':
base_constructor_args = expr.args
#log('ARGS %s', base_constructor_args)
self.write(': %s(', base_class_name)
for i, arg in enumerate(base_constructor_args):
if i == 0:
continue # Skip 'this'
if i != 1:
self.write(', ')
self.accept(arg)
self.write(') {\n')
self.indent += 1
for node in stmt.body.body[first_index+1:]:
self.accept(node)
self.indent -= 1
self.write('}\n')
continue
# Normal function body
self.accept(stmt.body)
continue
if stmt.name() == '__enter__':
continue
if stmt.name() == '__exit__':
self.decl_write('\n')
self.decl_write_ind('%s::~%s()', o.name, o.name)
self.accept(stmt.body)
continue
self.accept(stmt)
self.current_class_name = None # Stop prefixing functions with class
def visit_global_decl(self, o: 'mypy.nodes.GlobalDecl') -> T:
pass
def visit_nonlocal_decl(self, o: 'mypy.nodes.NonlocalDecl') -> T:
pass
def visit_decorator(self, o: 'mypy.nodes.Decorator') -> T:
pass
def visit_var(self, o: 'mypy.nodes.Var') -> T:
pass
# Module structure
def visit_import(self, o: 'mypy.nodes.Import') -> T:
pass
def visit_import_from(self, o: 'mypy.nodes.ImportFrom') -> T:
if self.decl: # No duplicate 'using'
return
if o.id in ('__future__', 'typing'):
return # do nothing
# Later we need to turn module.func() into module::func(), without
# disturbing self.foo.
for name, alias in o.names:
if alias:
self.imported_names.add(alias)
else:
self.imported_names.add(name)
# A heuristic that works for the OSH import style.
#
# from core.pyerror import log => using core::util::log
# from core import util => NOT translated
for name, alias in o.names:
# TODO: Should these be moved to core/pylib.py or something?
# They are varargs functions that have to be rewritten.
if name in ('log', 'p_die', 'e_die', 'e_strict', 'e_usage',
'stderr_line'):
continue # do nothing
if name in ('switch', 'tagswitch', 'iteritems', 'str_cmp'): # mylib
continue # do nothing
if '.' in o.id:
last_dotted = o.id.split('.')[-1]
# Omit this:
# from _devbuild.gen import grammar_nt
if last_dotted == 'gen':
return
# ASDL:
#
# namespaces:
# expr_e::Const # Compound sum
# expr::Const
# Id
#
# types:
# expr__Const
# expr_t # sum type
# expr_context_e # simple sum. This one is hard
# double_quoted
# Id_str
# Tag numbers/namespaces end with _n. enum types end with _e.
# TODO: rename special cases
is_namespace = False
if last_dotted.endswith('_asdl'):
if name.endswith('_n') or name.endswith('_i') or name in (
'Id', 'hnode_e', 'source_e', 'place_e',
# syntax_asdl
're', 're_repeat', 'class_literal_term', 'proc_sig',
'bracket_op', 'bracket_op_e',
'source', 'source_e',
'suffix_op', 'suffix_op_e',
'sh_lhs_expr', 'parse_result',
'command_e', 'command',
'condition_e', 'condition',
'arith_expr_e', 'arith_expr',
'bool_expr_e', 'bool_expr',
'expr_e', 'expr',
'place_expr_e', 'place_expr',
'word_part_e', 'word_part',
'word_e', 'word',
'redir_loc_e', 'redir_loc',
'redir_param_e', 'redir_param',
'proc_sig_e', 'proc_sig',
'glob_part_e', 'glob_part',
're_e', 're',
're_repeat_e', 're_repeat',
'class_literal_term_e', 'class_literal_term',
'sh_lhs_expr_e', 'sh_lhs_expr',
'variant_type',
# runtime_asdl
'flag_type_e', 'flag_type',
'lvalue_e', 'lvalue',
'value_e', 'value',
'part_value_e', 'part_value',
'cmd_value_e', 'cmd_value',
'redirect_arg_e', 'redirect_arg',
'a_index_e', 'a_index',
'printf_part_e', 'printf_part',
'job_status', 'job_status_e',
):
is_namespace = True
if is_namespace:
# No aliases yet?
#lhs = alias if alias else name
self.write_ind(
'namespace %s = %s::%s;\n', name, last_dotted, name)
else:
if alias:
# using runtime_asdl::emit_e = EMIT;
self.write_ind('using %s = %s::%s;\n', alias, last_dotted, name)
else:
# from _devbuild.gen.id_kind_asdl import Id
# -> using id_kind_asdl::Id.
self.write_ind('using %s::%s;\n', last_dotted, name)
else:
# If we're importing a module without an alias, we don't need to do
# anything. 'namespace cmd_eval' is already defined.
if not alias:
return
# from asdl import format as fmt
# -> namespace fmt = format;
self.write_ind('namespace %s = %s;\n', alias, name)
# Old scheme
# from testpkg import module1 =>
# namespace module1 = testpkg.module1;
# Unfortunately the MyPy AST doesn't have enough info to distinguish
# imported packages and functions/classes?
def visit_import_all(self, o: 'mypy.nodes.ImportAll') -> T:
pass
# Statements
def visit_block(self, block: 'mypy.nodes.Block') -> T:
self.write('{\n') # not indented to use same line as while/if
self.indent += 1
if self.prepend_to_block:
done = set()
for lval_name, c_type in self.prepend_to_block:
if lval_name not in done:
self.write_ind('%s %s;\n', c_type, lval_name)
done.add(lval_name)
self.write('\n')
self.prepend_to_block = None
self._write_body(block.body)
self.indent -= 1
self.write_ind('}\n')
def visit_expression_stmt(self, o: 'mypy.nodes.ExpressionStmt') -> T:
# TODO: Avoid writing docstrings.
# If it's just a string, then we don't need it.
self.write_ind('')
self.accept(o.expr)
self.write(';\n')
def visit_operator_assignment_stmt(self, o: 'mypy.nodes.OperatorAssignmentStmt') -> T:
self.write_ind('')
self.accept(o.lvalue)
self.write(' %s= ', o.op) # + to +=
self.accept(o.rvalue)
self.write(';\n')
def visit_while_stmt(self, o: 'mypy.nodes.WhileStmt') -> T:
self.write_ind('while (')
self.accept(o.expr)
self.write(') ')
self.accept(o.body)
def visit_return_stmt(self, o: 'mypy.nodes.ReturnStmt') -> T:
self.write_ind('return ')
if o.expr:
self.in_return_expr = True
self.accept(o.expr)
self.in_return_expr = False
self.write(';\n')
def visit_assert_stmt(self, o: 'mypy.nodes.AssertStmt') -> T:
pass
def visit_if_stmt(self, o: 'mypy.nodes.IfStmt') -> T:
# Not sure why this wouldn't be true
assert len(o.expr) == 1, o.expr
# Omit anything that looks like if __name__ == ...
cond = o.expr[0]
if isinstance(cond, UnaryExpr) and cond.op == 'not':
# check 'if not mylist'
cond_expr = cond.expr
else:
# TODO: if x > 0 and mylist
# if x > 0 and not mylist , etc.
cond_expr = cond
cond_type = self.types[cond_expr]
if not _CheckConditionType(cond_type):
raise AssertionError(
"Can't use str, list, or dict in boolean context")
if (isinstance(cond, ComparisonExpr) and
isinstance(cond.operands[0], NameExpr) and
cond.operands[0].name == '__name__'):
return
# Omit if 0:
if isinstance(cond, IntExpr) and cond.value == 0:
return
# Omit if TYPE_CHECKING blocks. They contain type expressions that
# don't type check!
if isinstance(cond, NameExpr) and cond.name == 'TYPE_CHECKING':
return
# mylib.CPP
if isinstance(cond, MemberExpr) and cond.name == 'CPP':
# just take the if block
self.write_ind('// if MYCPP\n')
self.write_ind('')
for node in o.body:
self.accept(node)
self.write_ind('// endif MYCPP\n')
return
# mylib.PYTHON
if isinstance(cond, MemberExpr) and cond.name == 'PYTHON':
if o.else_body:
self.write_ind('// if not PYTHON\n')
self.write_ind('')
self.accept(o.else_body)
self.write_ind('// endif MYCPP\n')
return
self.write_ind('if (')
for e in o.expr:
self.accept(e)
self.write(') ')
for node in o.body:
self.accept(node)
if o.else_body:
self.write_ind('else ')
self.accept(o.else_body)
def visit_break_stmt(self, o: 'mypy.nodes.BreakStmt') -> T:
self.write_ind('break;\n')
def visit_continue_stmt(self, o: 'mypy.nodes.ContinueStmt') -> T:
self.write_ind('continue;\n')
def visit_pass_stmt(self, o: 'mypy.nodes.PassStmt') -> T:
self.write_ind('; // pass\n')
def visit_raise_stmt(self, o: 'mypy.nodes.RaiseStmt') -> T:
self.write_ind('throw ')
# it could be raise -> throw ; . OSH uses that.
if o.expr:
self.accept(o.expr)
self.write(';\n')
def visit_try_stmt(self, o: 'mypy.nodes.TryStmt') -> T:
self.write_ind('try ')
self.accept(o.body)
caught = False
for t, v, handler in zip(o.types, o.vars, o.handlers):
# Heuristic
if isinstance(t, MemberExpr):
c_type = '%s::%s*' % (t.expr.name, t.name)
elif isinstance(t, TupleExpr):
c_type = None
if len(t.items) == 2:
e1 = t.items[0]
e2 = t.items[1]
if isinstance(e1, NameExpr) and isinstance(e2, NameExpr):
names = [e1.name, e2.name]
names.sort()
if names == ['IOError', 'OSError']:
c_type = '_OSError*' # Base class in mylib
if c_type is None:
c_type = 'MultipleExceptions' # Causes compile error
else:
c_type = '%s*' % t.name
if v:
self.write_ind('catch (%s %s) ', c_type, v.name)
else:
self.write_ind('catch (%s) ', c_type)
self.accept(handler)
caught = True
# DUMMY to prevent compile errors
# TODO: Remove this
if not caught:
self.write_ind('catch (std::exception) { }')
| |
def create_user(self, body, **kwargs): # noqa: E501
"""Create a new user. # noqa: E501
An endpoint for creating or inviting a new user to the account. In case of invitation email address is used only, other attributes are set in the 2nd step. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/users?action=invite -d {\"email\": \"<EMAIL>\"} -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_user(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param UserInfoReq body: A user object with attributes. (required)
:param str action: Action, either 'create' or 'invite'.
:return: UserInfoResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_user_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_user_with_http_info(body, **kwargs) # noqa: E501
return data
def create_user_with_http_info(self, body, **kwargs): # noqa: E501
"""Create a new user. # noqa: E501
An endpoint for creating or inviting a new user to the account. In case of invitation email address is used only, other attributes are set in the 2nd step. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/users?action=invite -d {\"email\": \"<EMAIL>\"} -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_user_with_http_info(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param UserInfoReq body: A user object with attributes. (required)
:param str action: Action, either 'create' or 'invite'.
:return: UserInfoResp
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'action'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_user`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'action' in params:
query_params.append(('action', params['action'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserInfoResp', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_group(self, group_id, **kwargs): # noqa: E501
"""Delete a group. # noqa: E501
An endpoint for deleting a group. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_group(group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str group_id: The ID of the group to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_group_with_http_info(group_id, **kwargs) # noqa: E501
else:
(data) = self.delete_group_with_http_info(group_id, **kwargs) # noqa: E501
return data
def delete_group_with_http_info(self, group_id, **kwargs): # noqa: E501
"""Delete a group. # noqa: E501
An endpoint for deleting a group. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_group_with_http_info(group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str group_id: The ID of the group to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group_id'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'group_id' is set
if ('group_id' not in params or
params['group_id'] is None):
raise ValueError("Missing the required parameter `group_id` when calling `delete_group`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group_id' in params:
path_params['groupID'] = params['group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/policy-groups/{groupID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_invitation(self, invitation_id, **kwargs): # noqa: E501
"""Delete a user invitation. # noqa: E501
An endpoint for deleting an active user invitation which has been sent for a new or an existing user to join the account. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_invitation(invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str invitation_id: The ID of the invitation to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_invitation_with_http_info(invitation_id, **kwargs) # noqa: E501
else:
(data) = self.delete_invitation_with_http_info(invitation_id, **kwargs) # noqa: E501
return data
def delete_invitation_with_http_info(self, invitation_id, **kwargs): # noqa: E501
"""Delete a user invitation. # noqa: E501
An endpoint for deleting an active user invitation which has been sent for a new or an existing user to join the account. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_invitation_with_http_info(invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str invitation_id: The ID of the invitation to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['invitation_id'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_invitation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'invitation_id' is set
if ('invitation_id' not in params or
params['invitation_id'] is None):
raise ValueError("Missing the required parameter `invitation_id` when calling `delete_invitation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'invitation_id' in params:
path_params['invitation-id'] = params['invitation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/user-invitations/{invitation-id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_user(self, user_id, **kwargs): # noqa: E501
"""Delete a user. # noqa: E501
An endpoint for deleting a user. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/users/{user-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_user_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.delete_user_with_http_info(user_id, **kwargs) # noqa: E501
return data
def delete_user_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Delete a user. # noqa: E501
An endpoint for deleting a user. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/users/{user-id} -H 'Authorization: Bearer | |
import os
import tempfile
import pandas as pd
import csv
from nidm.experiment.Query import GetProjectsUUID
import click
from nidm.experiment.tools.click_base import cli
from nidm.experiment.tools.rest import RestParser
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn import preprocessing
@cli.command()
@click.option("--nidm_file_list", "-nl", required=True,
help="A comma separated list of NIDM files with full path")
@click.option("--var","-variables", required=True,
help="This parameter is for the variables the user would like to complete the k-means algorithm on.\nThe way this looks in the command is python3 nidm_kmeans.py -nl MTdemog_aseg_v2.ttl -v \"fs_003343,age*sex,sex,age,group,age*group,bmi\"")
@click.option("--k_range", "-k", required=True,
help="The maxiumum number of clusters to try. The algorithm will go from 2 to this number to determine the optimal number of clusters.")
@click.option("--optimal_cluster_method", "-m", required=True,
help="The criterion used to select the optimal partitioning (either Silhouette Score, AIC, or BIC).")
@click.option("--output_file", "-o", required=False,
help="Optional output file (TXT) to store results of the linear regression, contrast, and regularization")
def gmm(nidm_file_list, output_file, var, k_range, optimal_cluster_method):
"""
This function provides a tool to complete k-means clustering on NIDM data.
"""
global v # Needed to do this because the code only used the parameters in the first method, meaning I had to move it all to method 1.
v = var.strip() # used in data_aggregation, kmenas(), spaces stripped from left and right
global o # used in dataparsing()
o = output_file
global n # used in data_aggregation()
n = nidm_file_list
global k_num
k_num = int(k_range.strip())
global cm
cm = optimal_cluster_method
data_aggregation()
dataparsing()
cluster_number()
def data_aggregation(): # all data from all the files is collected
""" This function provides query support for NIDM graphs. """
# query result list
results = []
# if there is a CDE file list, seed the CDE cache
if v: #ex: age,sex,DX_GROUP
print("***********************************************************************************************************")
command = "pynidm k-means -nl " + n + " -variables \"" + v + "\" " + "-k " + str(k_num) + " -m " + cm
print("Your command was: " + command)
if (o is not None):
f = open(o, "w")
f.write("Your command was " + command)
f.close()
verbosity=0
restParser = RestParser(verbosity_level=int(verbosity))
restParser.setOutputFormat(RestParser.OBJECT_FORMAT)
global df_list # used in dataparsing()
df_list = []
# set up uri to do fields query for each nidm file
global file_list
file_list = n.split(",")
df_list_holder = {}
for i in range(len(file_list)):
df_list_holder[i] = []
df_holder = {}
for i in range(len(file_list)):
df_holder[i] = []
global condensed_data_holder
condensed_data_holder = {}
for i in range(len(file_list)):
condensed_data_holder[i] = []
count = 0
not_found_count = 0
for nidm_file in file_list:
# get project UUID
project = GetProjectsUUID([nidm_file])
# split the model into its constituent variables
global var_list
# below, we edit the model so it splits by +,~, or =. However, to help it out in catching everything
# we replaced ~ and = with a + so that we can still use split. Regex wasn't working.
var_list = v.split(",")
for i in range(len(var_list)): # here, we remove any leading or trailing spaces
var_list[i] = var_list[i].strip()
# set the dependent variable to the one dependent variable in the model
global vars # used in dataparsing()
vars = ""
for i in range(len(var_list) - 1, -1, -1):
if not "*" in var_list[i]: # removing the star term from the columns we're about to pull from data
vars = vars + var_list[i] + ","
else:
print("Interacting variables are not present in clustering models. They will be removed.")
vars = vars[0:len(vars) - 1]
uri = "/projects/" + project[0].toPython().split("/")[-1] + "?fields=" + vars
# get fields output from each file and concatenate
df_list_holder[count].append(pd.DataFrame(restParser.run([nidm_file], uri)))
# global dep_var
df = pd.concat(df_list_holder[count])
with tempfile.NamedTemporaryFile(delete=False) as temp: # turns the dataframe into a temporary csv
df.to_csv(temp.name + '.csv')
temp.close()
data = list(csv.reader(open(
temp.name + '.csv'))) # makes the csv a 2D list to make it easier to call the contents of certain cells
var_list = vars.split(",") # makes a list of the independent variables
numcols = (len(data) - 1) // (
len(var_list)) # Finds the number of columns in the original dataframe
global condensed_data # also used in linreg()
condensed_data_holder[count] = [
[0] * (len(var_list))] # makes an array 1 row by the number of necessary columns
for i in range(
numcols): # makes the 2D array big enough to store all of the necessary values in the edited dataset
condensed_data_holder[count].append([0] * (len(var_list)))
for m in range(0, len(var_list)):
end_url = var_list[m].split("/")
if "/" in var_list[m]:
var_list[m] = end_url[len(end_url) - 1]
for i in range(len(var_list)): # stores the independent variable names in the first row
condensed_data_holder[count][0][i] = var_list[i]
numrows = 1 # begins at the first row to add data
fieldcolumn = 0 # the column the variable name is in in the original dataset
valuecolumn = 0 # the column the value is in in the original dataset
datacolumn = 0 # if it is identified by the dataElement name instead of the field's name
not_found_list = []
for i in range(len(data[0])):
if data[0][i] == 'sourceVariable': # finds the column where the variable names are
fieldcolumn = i
elif data[0][i] == 'source_variable': # finds the column where the variable names are
fieldcolumn = i
elif data[0][i] == 'isAbout':
aboutcolumn = i
elif data[0][i] == 'label':
namecolumn = i # finds the column where the variable names are
elif data[0][i] == 'value':
valuecolumn = i # finds the column where the values are
elif data[0][i] == 'dataElement': # finds the column where the data element is if necessary
datacolumn = i
for i in range(
len(condensed_data_holder[count][
0])): # starts iterating through the dataset, looking for the name in that
for j in range(1, len(data)): # column, so it can append the values under the proper variables
try:
if data[j][fieldcolumn] == condensed_data_holder[count][0][
i]: # in the dataframe, the name is in column 3
condensed_data_holder[count][numrows][i] = data[j][
valuecolumn] # in the dataframe, the value is in column 2
numrows = numrows + 1 # moves on to the next row to add the proper values
elif data[j][aboutcolumn] == condensed_data_holder[count][0][
i]:
condensed_data_holder[count][numrows][i] = data[j][
valuecolumn] # in the dataframe, the value is in column 2
numrows = numrows + 1 # moves on to the next row to add the proper values
elif condensed_data_holder[count][0][
i] in data[j][
aboutcolumn]: # this is in case the uri only works by querying the part after the last backslash
condensed_data_holder[count][numrows][i] = data[j][
valuecolumn] # in the dataframe, the value is in column 2
numrows = numrows + 1 # moves on to the next row to add the proper values
elif data[j][namecolumn] == condensed_data_holder[count][0][
i]: # in the dataframe, the name is in column 12
condensed_data_holder[count][numrows][i] = data[j][
valuecolumn] # in the dataframe, the value is in column 2
numrows = numrows + 1 # moves on to the next row to add the proper values
elif condensed_data_holder[count][0][i] == data[j][
datacolumn]: # in the dataframe, the name is in column 9
condensed_data_holder[count][numrows][i] = data[j][
valuecolumn] # in the dataframe, the value is in column 2
numrows = numrows + 1 # moves on to the next row to add the proper values
except IndexError:
numrows = numrows + 1
numrows = 1 # resets to the first row for the next variable
temp_list = condensed_data_holder[count]
for j in range(len(temp_list[0]) - 1, 0,
-1): # if the software appends a column with 0 as the heading, it removes this null column
if temp_list[0][j] == "0" or temp_list[0][j] == "NaN":
for row in condensed_data_holder[count]:
row.pop(j)
rowsize = len(condensed_data_holder[count][0])
count1 = 0
for i in range(0, rowsize):
for row in condensed_data_holder[count]:
if row[i] == 0 or row[i] == "NaN" or row[i] == "0":
count1 = count1 + 1
if count1 > len(condensed_data_holder[count]) - 2:
not_found_list.append(condensed_data_holder[count][0][i])
count1 = 0
for i in range(len(condensed_data_holder[count][0])):
if " " in condensed_data_holder[count][0][i]:
condensed_data_holder[count][0][i] = condensed_data_holder[count][0][i].replace(" ", "_")
for i in range(len(var_list)):
if "/" in var_list[i]:
splitted = var_list[i].split("/")
var_list[i] = splitted[len(splitted) - 1]
if " " in var_list[i]:
var_list[i] | |
<reponame>will-jj/arim
import warnings
import numpy as np
import pytest
import arim
from arim import ut, scat, _scat_crack
import tests.helpers
def test_scattering_angles_grid():
n = 10
theta = scat.make_angles(n)
inc_theta, out_theta = scat.make_angles_grid(n)
for i in range(n):
for j in range(n):
assert inc_theta[i, j] == theta[j]
assert out_theta[i, j] == theta[i]
def test_sdh_2d_scat():
out_theta = np.array(
[
-3.141592653589793,
-2.722713633111154,
-2.303834612632515,
-1.884955592153876,
-1.466076571675237,
-1.047197551196598,
-0.628318530717959,
-0.209439510239319,
0.209439510239319,
0.628318530717959,
1.047197551196597,
1.466076571675236,
1.884955592153876,
2.303834612632516,
2.722713633111154,
]
)
inc_theta = 0.0
matlab_res = dict()
matlab_res["LL"] = np.array(
[
-0.206384032591909 + 0.336645038756022j,
-0.194171819277630 + 0.313226502544485j,
-0.155687913654758 + 0.264243478643578j,
-0.090375683177214 + 0.226391506237526j,
-0.005253862284530 + 0.211028560232004j,
0.085889202419455 + 0.204053854945626j,
0.165030960663520 + 0.193967940239943j,
0.212013086087838 + 0.184664953622806j,
0.212013086087838 + 0.184664953622806j,
0.165030960663520 + 0.193967940239943j,
0.085889202419455 + 0.204053854945626j,
-0.005253862284530 + 0.211028560232004j,
-0.090375683177214 + 0.226391506237526j,
-0.155687913654758 + 0.264243478643578j,
-0.194171819277630 + 0.313226502544484j,
]
)
matlab_res["LT"] = np.array(
[
-0.000000000000000 + 0.000000000000000j,
0.173514558396338 - 0.235915394468874j,
0.363162600270786 - 0.165777746007565j,
0.503786047970495 + 0.061137988770260j,
0.546299366197900 + 0.133217223565162j,
0.506680725919996 + 0.029760392507310j,
0.380878711540161 - 0.059504104563334j,
0.145732609209624 - 0.037889115351498j,
-0.145732609209624 + 0.037889115351498j,
-0.380878711540162 + 0.059504104563334j,
-0.506680725919996 - 0.029760392507310j,
-0.546299366197900 - 0.133217223565162j,
-0.503786047970495 - 0.061137988770261j,
-0.363162600270786 + 0.165777746007565j,
-0.173514558396338 + 0.235915394468874j,
]
)
matlab_res["TL"] = np.array(
[
0.000000000000000 - 0.000000000000000j,
-0.043378639599085 + 0.058978848617218j,
-0.090790650067696 + 0.041444436501891j,
-0.125946511992624 - 0.015284497192565j,
-0.136574841549475 - 0.033304305891291j,
-0.126670181479999 - 0.007440098126828j,
-0.095219677885040 + 0.014876026140834j,
-0.036433152302406 + 0.009472278837875j,
0.036433152302406 - 0.009472278837875j,
0.095219677885040 - 0.014876026140834j,
0.126670181479999 + 0.007440098126828j,
0.136574841549475 + 0.033304305891291j,
0.125946511992624 + 0.015284497192565j,
0.090790650067697 - 0.041444436501891j,
0.043378639599085 - 0.058978848617218j,
]
)
matlab_res["TT"] = np.array(
[
-0.262017703125609 + 0.771353787922999j,
-0.376441609988753 + 0.188374651320542j,
-0.429903377994878 - 0.562524535327520j,
-0.243229145424068 - 0.367845549589069j,
-0.126223795958403 + 0.187399980358998j,
-0.136416167137459 + 0.230680597518463j,
-0.019016110619602 - 0.071187589718864j,
0.181910208019545 - 0.257602560262746j,
0.181910208019545 - 0.257602560262746j,
-0.019016110619602 - 0.071187589718863j,
-0.136416167137459 + 0.230680597518463j,
-0.126223795958403 + 0.187399980358999j,
-0.243229145424068 - 0.367845549589067j,
-0.429903377994878 - 0.562524535327520j,
-0.376441609988753 + 0.188374651320541j,
]
)
freq = 2.0e6
v_l = 6000
v_t = 3000
hole_radius = 5e-4
result = scat.sdh_2d_scat(inc_theta, out_theta, freq, hole_radius, v_l, v_t)
matlab_res[
"LT"
] *= (
-1
) # trust Lopez-Sanchez instead of Brind, different from matlab implementation
assert len(result) == 4
assert result["LL"].shape == out_theta.shape
assert result["LT"].shape == out_theta.shape
assert result["TL"].shape == out_theta.shape
assert result["TT"].shape == out_theta.shape
args = dict(rtol=1e-5)
np.testing.assert_allclose(result["LL"], matlab_res["LL"], **args)
np.testing.assert_allclose(result["LT"], matlab_res["LT"], **args)
np.testing.assert_allclose(result["TL"], matlab_res["TL"], **args)
np.testing.assert_allclose(result["TT"], matlab_res["TT"], **args)
def _scattering_function(inc_theta, out_theta):
inc_theta = ut.wrap_phase(inc_theta)
out_theta = ut.wrap_phase(out_theta)
return (inc_theta + np.pi) / np.pi * 10 + (out_theta + np.pi) / np.pi * 100
def test_make_scattering_matrix():
numpoints = 5
inc_theta, out_theta = scat.make_angles_grid(numpoints)
scattering_matrix = _scattering_function(inc_theta, out_theta)
assert inc_theta.shape == (numpoints, numpoints)
assert out_theta.shape == (numpoints, numpoints)
theta = scat.make_angles(numpoints)
for i in range(numpoints):
for j in range(numpoints):
assert np.allclose(
scattering_matrix[i, j], _scattering_function(theta[j], theta[i])
)
assert scattering_matrix.shape == (numpoints, numpoints)
idx = (1, 3)
np.testing.assert_allclose(
scattering_matrix[idx], _scattering_function(inc_theta[idx], out_theta[idx])
)
np.testing.assert_allclose(
_scattering_function(np.pi, np.pi), _scattering_function(-np.pi, -np.pi)
)
x = 0.1
np.testing.assert_allclose(
_scattering_function(np.pi + x, np.pi), _scattering_function(-np.pi + x, -np.pi)
)
def test_interpolate_matrix():
numpoints = 5
dtheta = 2 * np.pi / numpoints
inc_theta, out_theta = scat.make_angles_grid(numpoints)
scattering_matrix = _scattering_function(inc_theta, out_theta)
scat_fn = scat.interpolate_matrix(scattering_matrix)
# raise Exception(scattering_matrix)
np.testing.assert_allclose(
scat_fn(inc_theta, out_theta), _scattering_function(inc_theta, out_theta)
)
# add multiple of 2pi
np.testing.assert_allclose(
scat_fn(inc_theta + 10 * np.pi, out_theta - 6 * np.pi),
_scattering_function(inc_theta, out_theta),
)
# remove last column because edge effect
x = (inc_theta + dtheta / 4)[:, :-1]
y = out_theta[:, :-1]
np.testing.assert_allclose(scat_fn(x, y), _scattering_function(x, y))
# remove last line because edge effect
x = inc_theta[:-1]
y = (out_theta + dtheta / 4)[:-1]
np.testing.assert_allclose(scat_fn(x, y), _scattering_function(x, y))
x = inc_theta[:-1, :-1] + dtheta / 3
y = (out_theta + dtheta / 4)[:-1, :-1]
np.testing.assert_allclose(scat_fn(x, y), _scattering_function(x, y))
def test_rotate_matrix():
# n = 10
# scat_matrix = np.random.uniform(size=(n, n)) + 1j * np.random.uniform(size=(n, n))
# scat_func = ut.interpolate_matrix(scat_matrix)
n = 72
inc_angles, out_angles = scat.make_angles_grid(n)
scat_matrix = np.exp(
-((inc_angles - np.pi / 6) ** 2) - (out_angles + np.pi / 4) ** 2
) + 1j * np.exp(-((inc_angles + np.pi / 2) ** 2) - (out_angles - np.pi / 10) ** 2)
scat_func = scat.interpolate_matrix(scat_matrix)
# rotation of 0°
rotated_scat_matrix = scat.rotate_matrix(scat_matrix, 0.0)
np.testing.assert_allclose(scat_matrix, rotated_scat_matrix, rtol=1e-6)
# rotation of 360°
rotated_scat_matrix = scat.rotate_matrix(scat_matrix, 2 * np.pi)
np.testing.assert_allclose(scat_matrix, rotated_scat_matrix, rtol=1e-6)
# rotation of pi/ 6
# Ensure that S'(theta_1, theta_2) = S(theta_1 - phi, theta_2 - phi)
# No interpolation is involded here, this should be perfectly equal
phi = np.pi / 6
rotated_scat_matrix = scat.rotate_matrix(scat_matrix, phi)
rotated_scat_scat_func = scat.interpolate_matrix(rotated_scat_matrix)
theta_1 = np.linspace(0, 2 * np.pi, n)
theta_2 = np.linspace(0, np.pi, n)
np.testing.assert_allclose(
rotated_scat_scat_func(theta_1, theta_2),
scat_func(theta_1 - phi, theta_2 - phi),
)
# rotation of pi/ 5
# Ensure that S'(theta_1, theta_2) = S(theta_1 - phi, theta_2 - phi)
# Because of interpolation, this is not exactly equal.
phi = np.pi / 5
rotated_scat_matrix = scat.rotate_matrix(scat_matrix, phi)
rotated_scat_scat_func = scat.interpolate_matrix(rotated_scat_matrix)
theta_1 = np.linspace(0, 2 * np.pi, 15)
theta_2 = np.linspace(0, np.pi, 15)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(np.real(rotated_scat_scat_func(theta_1, theta_2)))
# plt.plot(np.real(scat_func(theta_1 - phi, theta_2 - phi)))
# plt.show()
# plt.figure()
# plt.plot(np.imag(rotated_scat_scat_func(theta_1, theta_2)))
# plt.plot(np.imag(scat_func(theta_1 - phi, theta_2 - phi)))
# plt.show()
np.testing.assert_allclose(
rotated_scat_scat_func(theta_1, theta_2),
scat_func(theta_1 - phi, theta_2 - phi),
atol=5e-3,
)
# unrotate
np.testing.assert_allclose(
scat.rotate_matrix(rotated_scat_matrix, -phi), scat_matrix, rtol=1e-6
)
# test rotate_matrices
matrices = dict(LL=scat_matrix, TT=np.ones((10, 10)))
rotated_matrices = scat.rotate_matrices(matrices, np.pi / 6)
assert "LL" in rotated_matrices
assert "TT" in rotated_matrices
def make_scat_data_single_freq():
# create realistic data
hole_radius = 5e-4
scat_sdh = scat.SdhScat(hole_radius, TestScattering.v_L, TestScattering.v_T)
numangles = 80
frequency = 2e6
matrices = scat_sdh.as_single_freq_matrices(frequency, numangles)
matrices2 = {scat_key: mat[np.newaxis] for scat_key, mat in matrices.items()}
return scat.ScatFromData.from_dict(frequency, matrices2)
def test_scat_factory():
material = arim.Material(
6300.0, 3120.0, 2700.0, "solid", metadata={"long_name": "Aluminium"}
)
fname = tests.helpers.get_data_filename("scat/scat_matlab.mat")
scat_obj = scat.scat_factory("file", material, fname)
assert isinstance(scat_obj, scat.ScatFromData)
scat_obj(0.0, 0.0, 2e6)
scat_obj = scat.scat_factory("crack_centre", material, crack_length=2.0e-3)
assert isinstance(scat_obj, scat.CrackCentreScat)
scat_obj(0.0, 0.0, 2e6)
scat_obj = scat.scat_factory("sdh", material, radius=0.5e-3)
assert isinstance(scat_obj, scat.SdhScat)
scat_obj(0.0, 0.0, 2e6)
scat_obj = scat.scat_factory("point", material)
assert isinstance(scat_obj, scat.PointSourceScat)
scat_obj(0.0, 0.0, 2e6)
scat_obj = scat.scat_factory("crack_tip", material)
assert isinstance(scat_obj, scat.CrackTipScat)
scat_obj(0.0, 0.0, 2e6)
def make_scat_data_multi_freq():
# create realistic data
hole_radius = 5e-4
scat_sdh = scat.SdhScat(hole_radius, TestScattering.v_L, TestScattering.v_T)
frequencies = [1e6, 2e6, 3e6]
numangles = 80
matrices = scat_sdh.as_multi_freq_matrices(frequencies, numangles)
return scat.ScatFromData.from_dict(frequencies, matrices)
# TODO: add test 'crack_tip' once properly implemented
@pytest.fixture(
params=["sdh", "point", "data_singlefreq", "data_multifreq", "crack_centre"]
)
def scat_obj(request):
if request.param == "sdh":
hole_radius = 5e-4
return scat.SdhScat(hole_radius, TestScattering.v_L, TestScattering.v_T)
elif request.param == "point":
return scat.PointSourceScat(TestScattering.v_L, TestScattering.v_T)
elif request.param == "data_singlefreq":
return make_scat_data_single_freq()
elif request.param == "data_multifreq":
return make_scat_data_multi_freq()
elif request.param == "crack_centre":
crack_length = 2.0e-3
return scat.CrackCentreScat(
crack_length, TestScattering.v_L, TestScattering.v_T, TestScattering.density
)
elif request.param == "crack_tip":
return scat.CrackTipScat(TestScattering.v_L, TestScattering.v_T)
else:
raise Exception("this fixture does not behave well")
@pytest.fixture(params=["data_singlefreq", "data_multifreq"])
def scat_data_obj(request):
if request.param == "data_singlefreq":
return make_scat_data_single_freq()
elif request.param == "data_multifreq":
return make_scat_data_multi_freq()
else:
raise Exception("this fixture does not behave well")
class TestScattering:
v_L = 6300.0
v_T = 3100.0
density = 2700.0
def test_scattering(self, scat_obj):
numangles = 7
n, m = 9, 11
phi_in = scat.make_angles(n)
phi_out = scat.make_angles(m)
phi_in_array, phi_out_array = np.meshgrid(phi_in, phi_out, indexing="xy")
assert phi_in_array.shape == phi_out_array.shape == (m, n)
scat_keys = ("LL", "LT", "TL", "TT")
freq = 2e6
# test Scattering.__call__, Scattering.__str__
repr(scat_obj)
str(scat_obj)
# test Scattering.__call__ with 0d array angles
val_dict = scat_obj(0.1, 0.2, freq)
assert set(val_dict.keys()) == set(scat_keys)
for val in val_dict.values():
assert np.ndim(val) == 0
# test Scattering.__call__ with 1d array angles
val_dict = scat_obj(phi_in, phi_in, freq)
for val in val_dict.values():
assert val.shape == phi_in.shape
# test Scattering.__call__ with broadcast
val_dict = scat_obj(0.0, phi_out, freq)
val_dict2 = scat_obj(np.zeros_like(phi_out), phi_out, freq)
for scat_key in scat_keys:
np.testing.assert_allclose(val_dict[scat_key], val_dict2[scat_key])
val_dict = scat_obj(0.0, phi_out_array, freq)
val_dict2 = scat_obj(np.zeros_like(phi_out_array), phi_out_array, freq)
for scat_key in scat_keys:
np.testing.assert_allclose(val_dict[scat_key], val_dict2[scat_key])
# test Scattering.__call__ with 2d array angles
reference_dict = scat_obj(phi_in_array, phi_out_array, freq)
for val in reference_dict.values():
assert val.shape == phi_in_array.shape
# test broadcasting works well
for idx in np.ndindex(*phi_in_array.shape):
val_dict = scat_obj(phi_in_array[idx], phi_out_array[idx], freq)
for scat_key in scat_keys:
if np.isnan(reference_dict[scat_key][idx]):
continue
np.testing.assert_allclose(
val_dict[scat_key], reference_dict[scat_key][idx]
)
# computing the values for one scat_key
for scat_key in scat_keys:
val_dict = scat_obj(
phi_in_array, phi_out_array, freq, to_compute=[scat_key]
)
assert scat_key in val_dict
np.testing.assert_allclose(
val_dict[scat_key],
reference_dict[scat_key],
err_msg="different output for the scat_key",
)
# test Scattering.as_single_freq_matrices
matrices_singlef = scat_obj.as_single_freq_matrices(freq, numangles)
for scat_key in | |
from data import COCODetection, get_label_map, MEANS, COLORS
from yolact import Yolact
from utils.augmentations import BaseTransform, FastBaseTransform, Resize
from utils.functions import MovingAverage, ProgressBar
from layers.box_utils import jaccard, center_size, mask_iou
from utils import timer
from utils.functions import SavePath
from layers.output_utils import postprocess, undo_image_transformation
import pycocotools
from data import cfg, set_cfg, set_dataset
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import argparse
import time
import random
import cProfile
import pickle
import json
import os
from collections import defaultdict
from pathlib import Path
from collections import OrderedDict
from PIL import Image
import matplotlib.pyplot as plt
import cv2
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args(argv=None):
parser = argparse.ArgumentParser(
description='YOLACT COCO Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open. If "interrupt", this will open the interrupt file.')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to evaulate model')
parser.add_argument('--fast_nms', default=True, type=str2bool,
help='Whether to use a faster, but not entirely correct version of NMS.')
parser.add_argument('--cross_class_nms', default=False, type=str2bool,
help='Whether compute NMS cross-class or per-class.')
parser.add_argument('--display_masks', default=True, type=str2bool,
help='Whether or not to display masks over bounding boxes')
parser.add_argument('--display_bboxes', default=True, type=str2bool,
help='Whether or not to display bboxes around masks')
parser.add_argument('--display_text', default=True, type=str2bool,
help='Whether or not to display text (class [score])')
parser.add_argument('--display_scores', default=True, type=str2bool,
help='Whether or not to display scores in addition to classes')
parser.add_argument('--display', dest='display', action='store_true',
help='Display qualitative results instead of quantitative ones.')
parser.add_argument('--shuffle', dest='shuffle', action='store_true',
help='Shuffles the images when displaying them. Doesn\'t have much of an effect when display is off though.')
parser.add_argument('--ap_data_file', default='results/ap_data.pkl', type=str,
help='In quantitative mode, the file to save detections before calculating mAP.')
parser.add_argument('--resume', dest='resume', action='store_true',
help='If display not set, this resumes mAP calculations from the ap_data_file.')
parser.add_argument('--max_images', default=-1, type=int,
help='The maximum number of images from the dataset to consider. Use -1 for all.')
parser.add_argument('--output_coco_json', dest='output_coco_json', action='store_true',
help='If display is not set, instead of processing IoU values, this just dumps detections into the coco json file.')
parser.add_argument('--bbox_det_file', default='results/bbox_detections.json', type=str,
help='The output file for coco bbox results if --coco_results is set.')
parser.add_argument('--mask_det_file', default='results/mask_detections.json', type=str,
help='The output file for coco mask results if --coco_results is set.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--output_web_json', dest='output_web_json', action='store_true',
help='If display is not set, instead of processing IoU values, this dumps detections for usage with the detections viewer web thingy.')
parser.add_argument('--web_det_path', default='web/dets/', type=str,
help='If output_web_json is set, this is the path to dump detections into.')
parser.add_argument('--no_bar', dest='no_bar', action='store_true',
help='Do not output the status bar. This is useful for when piping to a file.')
parser.add_argument('--display_lincomb', default=False, type=str2bool,
help='If the config uses lincomb masks, output a visualization of how those masks are created.')
parser.add_argument('--benchmark', default=False, dest='benchmark', action='store_true',
help='Equivalent to running display mode but without displaying an image.')
parser.add_argument('--no_sort', default=False, dest='no_sort', action='store_true',
help='Do not sort images by hashed image ID.')
parser.add_argument('--seed', default=None, type=int,
help='The seed to pass into random.seed. Note: this is only really for the shuffle and does not (I think) affect cuda stuff.')
parser.add_argument('--mask_proto_debug', default=False, dest='mask_proto_debug', action='store_true',
help='Outputs stuff for scripts/compute_mask.py.')
parser.add_argument('--no_crop', default=False, dest='crop', action='store_false',
help='Do not crop output masks with the predicted bounding box.')
parser.add_argument('--image', default=None, type=str,
help='A path to an image to use for display.')
parser.add_argument('--images', default=None, type=str,
help='An input folder of images and output folder to save detected images. Should be in the format input->output.')
parser.add_argument('--video', default=None, type=str,
help='A path to a video to evaluate on. Passing in a number will use that index webcam.')
parser.add_argument('--video_multiframe', default=1, type=int,
help='The number of frames to evaluate in parallel to make videos play at higher fps.')
parser.add_argument('--score_threshold', default=0, type=float,
help='Detections with a score under this threshold will not be considered. This currently only works in display mode.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--detect', default=False, dest='detect', action='store_true',
help='Don\'t evauluate the mask branch at all and only do object detection. This only works for --display and --benchmark.')
parser.add_argument('--display_fps', default=False, dest='display_fps', action='store_true',
help='When displaying / saving video, draw the FPS on the frame')
parser.add_argument('--emulate_playback', default=False, dest='emulate_playback', action='store_true',
help='When saving a video, emulate the framerate that you\'d get running in real-time mode.')
parser.add_argument('--display_best_bboxes_only', default=False)
parser.add_argument('--display_best_masks_only', default=False)
parser.add_argument('--display_object_without_mask', default=False)
parser.add_argument('--display_only_car', default=False)
parser.set_defaults(no_bar=False, display=False, resume=False, output_coco_json=False, output_web_json=False, shuffle=False,
benchmark=False, no_sort=False, no_hash=False, mask_proto_debug=False, crop=True, detect=False, display_fps=False,
emulate_playback=False)
global args
args = parser.parse_args(argv)
if args.output_web_json:
args.output_coco_json = True
if args.seed is not None:
random.seed(args.seed)
color_cache = defaultdict(lambda: {})
def prep_display(dets_out, img, h, w, args, undo_transform=True, class_color=False, mask_alpha=0.45, fps_str=''):
"""
Note: If undo_transform=False then im_h and im_w are allowed to be None.
"""
if undo_transform:
img_numpy = undo_image_transformation(img, w, h)
img_gpu = torch.Tensor(img_numpy).cuda()
else:
img_gpu = img / 255.0
h, w, _ = img.shape
with timer.env('Postprocess'):
save = cfg.rescore_bbox
cfg.rescore_bbox = True
t = postprocess(dets_out, w, h, visualize_lincomb = args.display_lincomb,
crop_masks = args.crop,
score_threshold = args.score_threshold)
cfg.rescore_bbox = save
with timer.env('Copy'):
idx = t[1].argsort(0, descending=True)[:args.top_k]
if cfg.eval_mask_branch:
# Masks are drawn on the GPU, so don't copy
masks = t[3][idx]
classes, scores, boxes = [x[idx].detach().cpu().numpy() for x in t[:3]]
num_dets_to_consider = min(args.top_k, classes.shape[0])
for j in range(num_dets_to_consider):
if scores[j] < args.score_threshold:
num_dets_to_consider = j
break
# Quick and dirty lambda for selecting the color for a particular index
# Also keeps track of a per-gpu color cache for maximum speed
def get_color(j, on_gpu=None):
global color_cache
color_idx = (classes[j] * 5 if class_color else j * 5) % len(COLORS)
if on_gpu is not None and color_idx in color_cache[on_gpu]:
return color_cache[on_gpu][color_idx]
else:
color = COLORS[color_idx]
if not undo_transform:
# The image might come in as RGB or BRG, depending
color = (color[2], color[1], color[0])
if on_gpu is not None:
color = torch.Tensor(color).to(on_gpu).float() / 255.
color_cache[on_gpu][color_idx] = color
return color
# First, draw the masks on the GPU where we can do it really fast
# Beware: very fast but possibly unintelligible mask-drawing code ahead
# I wish I had access to OpenGL or Vulkan but alas, I guess Pytorch tensor operations will have to suffice
if args.display_masks and cfg.eval_mask_branch and num_dets_to_consider > 0:
masks = masks[:num_dets_to_consider, :, :, None]
colors = torch.cat([get_color(j, on_gpu=img_gpu.device.index).view(1, 1, 1, 3) for j in range(num_dets_to_consider)], dim=0)
masks_color = masks.repeat(1, 1, 1, 3) * colors * mask_alpha
inv_alph_masks = masks * (-mask_alpha) + 1
masks_color_summand = masks_color[0]
if num_dets_to_consider > 1:
inv_alph_cumul = inv_alph_masks[:(num_dets_to_consider-1)].cumprod(dim=0)
masks_color_cumul = masks_color[1:] * inv_alph_cumul
masks_color_summand += masks_color_cumul.sum(dim=0)
img_gpu = img_gpu * inv_alph_masks.prod(dim=0) + masks_color_summand
img_numpy_mask = (masks_color_summand * 255).byte().cpu().numpy()
cv2.imwrite('results/mask_car_image.jpg', img_numpy_mask)
print("Mask for all visible car is generated")
if args.display_best_masks_only == True and args.top_k == 1:
masks = masks[:num_dets_to_consider, :, :, None]
num_dets_to_consider = min(args.top_k, classes.shape[0])
print('maskshape', (masks.shape))
for i in range(num_dets_to_consider):
msk = masks[i, :, :, None]
mask = msk.view(1, masks.shape[1], masks.shape[2], 1)
print('newmaskshape', (mask.shape))
img_gpu_masked = img_gpu * (mask.sum(dim=0) >= 1).float().expand(-1, -1, 3)
img_numpy_masked = (img_gpu_masked * 255).byte().cpu().numpy()
cv2.imwrite('results/mask_image'+str(i)+'.jpg', img_numpy_masked)
print("Mask for the most visible car is generated")
if args.display_fps:
# Draw the box for the fps on the GPU
font_face = cv2.FONT_HERSHEY_DUPLEX
font_scale = 0.6
font_thickness = 1
text_w, text_h = cv2.getTextSize(fps_str, font_face, font_scale, font_thickness)[0]
img_gpu[0:text_h+8, 0:text_w+8] *= 0.6 # 1 - Box alpha
# Then draw the stuff that needs to be done on the cpu
# Note, make sure this is a uint8 tensor or opencv will not anti alias text for whatever reason
img_numpy = (img_gpu * 255).byte().cpu().numpy()
if args.display_fps:
# Draw the text on the CPU
text_pt = (4, text_h + 2)
text_color = [255, 255, 255]
cv2.putText(img_numpy, fps_str, text_pt, font_face, font_scale, text_color, font_thickness, cv2.LINE_AA)
if num_dets_to_consider == 0:
return img_numpy
if args.display_text or args.display_bboxes:
for j in reversed(range(num_dets_to_consider)):
x1, y1, x2, y2 = boxes[j, :]
color = get_color(j)
score = scores[j]
if args.display_bboxes:
cv2.rectangle(img_numpy, (x1, y1), (x2, y2), color, 1)
if args.display_best_bboxes_only == 'True':
crop = img_numpy[y1:y2,x1:x2]
cv2.imwrite('results/crop_object.png',crop)
print("crop for the most visible car is generated")
if args.display_text:
_class = cfg.dataset.class_names[classes[j]]
text_str = '%s: %.2f' % (_class, score) if args.display_scores else _class
font_face = cv2.FONT_HERSHEY_DUPLEX
font_scale = | |
0, 0, 0, 0],
[958, 66.7, 0, 9999, -9999, 1.0, 100, 1, 66.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[959, 45.5, 0, 9999, -9999, 1.0, 100, 1, 45.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[960, 26.5, 0, 9999, -9999, 1.0, 100, 1, 26.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[963, 467.970736, 0, 9999, -9999, 1.0, 100, 1, 875.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[967, 37.5, 0, 9999, -9999, 1.0, 100, 1, 37.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[969, 56.9, 0, 9999, -9999, 0.999584, 100, 1, 56.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[971, 20.0, 0, 9999, -9999, 1.0, 100, 1, 20.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[978, 4.6, 0, 9999, -9999, 1.0, 100, 1, 4.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[982, 9.9, 0, 9999, -9999, 1.0, 100, 1, 9.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[984, 465.0, 0, 9999, -9999, 1.0, 100, 1, 465.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[985, 22.0, 0, 9999, -9999, 1.0, 100, 1, 22.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[986, 11.2, 0, 9999, -9999, 1.0, 100, 1, 11.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[987, 164.5, 0, 9999, -9999, 1.0, 100, 1, 164.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[988, 5.1, 0, 9999, -9999, 1.0, 100, 1, 5.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[993, 392.0, 0, 9999, -9999, 1.0, 100, 1, 392.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[994, 33.0, 0, 9999, -9999, 1.0, 100, 1, 33.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[995, 4.2, 0, 9999, -9999, 1.0, 100, 1, 4.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[997, 18.8, 0, 9999, -9999, 1.0, 100, 1, 18.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[999, 15.6, 0, 9999, -9999, 1.0, 100, 1, 15.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1002, 9.9, 0, 9999, -9999, 1.0, 100, 1, 9.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1007, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1011, 18.7, 0, 9999, -9999, 1.0, 100, 1, 18.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1027, 34.281803, 0, 9999, -9999, 1.0, 100, 1, 48.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1028, 400.0, 0, 9999, -9999, 1.0, 100, 1, 400.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1029, 60.0, 0, 9999, -9999, 1.0, 100, 1, 60.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1030, 540.370747, 0, 9999, -9999, 1.0, 100, 1, 1018.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1031, 1447.199962, 0, 9999, -9999, 1.0, 100, 1, 1447.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1032, 132.187606, 0, 9999, -9999, 1.0, 100, 1, 153.510391, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1033, 41.249214, 0, 9999, -9999, 1.0, 100, 1, 50.164506, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1034, 73.793899, 0, 9999, -9999, 1.0, 100, 1, 84.262779, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1035, 41.689523, 0, 9999, -9999, 1.0, 100, 1, 49.886469, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1036, 57.674205, 0, 9999, -9999, 1.0, 100, 1, 67.223077, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1037, 71.536325, 0, 9999, -9999, 1.0, 100, 1, 94.684044, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1038, 64.020269, 0, 9999, -9999, 1.0, 100, 1, 85.798525, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1039, 58.290788, 0, 9999, -9999, 1.0, 100, 1, 132.724114, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1040, 0.003204, 0, 9999, -9999, 1.0, 100, 1, 0.064179, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1041, 162.900875, 0, 9999, -9999, 1.0, 100, 1, 204.187624, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1042, 15.445472, 0, 9999, -9999, 1.0, 100, 1, 52.70053, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1043, 0.452383, 0, 9999, -9999, 1.0, 100, 1, 6.035538, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1044, 27.204212, 0, 9999, -9999, 1.0, 100, 1, 36.163532, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1046, 95.787747, 0, 9999, -9999, 1.0, 100, 1, 106.787063, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1047, 10.11057, 0, 9999, -9999, 1.0, 100, 1, 13.029581, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1048, 58.871775, 0, 9999, -9999, 1.0, 100, 1, 71.656883, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1049, 247.339252, 0, 9999, -9999, 1.0, 100, 1, 293.755375, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1050, 46.71577, 0, 9999, -9999, 1.0, 100, 1, 52.781606, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1051, 263.653772, 0, 9999, -9999, 1.0, 100, 1, 304.42978, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1052, 20.622775, 0, 9999, -9999, 1.0, 100, 1, 20.66869, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1053, 16.328545, 0, 9999, -9999, 1.0, 100, 1, 16.368087, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1055, 1.176646, 0, 9999, -9999, 1.0, 100, 1, 2.856069, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1056, 502.662947, 0, 9999, -9999, 1.0, 100, 1, 603.943953, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1057, 230.717255, 0, 9999, -9999, 1.0, 100, 1, 426.979979, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1058, 789.438994, 0, 9999, -9999, 1.0, 100, 1, 1055.735174, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1059, 313.981535, 0, 9999, -9999, 1.0, 100, 1, 414.871332, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1060, 2.713848, 0, 9999, -9999, 1.0, 100, 1, 10.351632, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1061, 76.646892, 0, 9999, -9999, 1.0, 100, 1, 161.862597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1062, 0.779798, 0, 9999, -9999, 1.0, 100, 1, 2.878561, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1063, 2.093641, 0, 9999, -9999, 1.0, 100, 1, 8.670916, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1064, 175.57795, 0, 9999, -9999, 1.0, 100, 1, 209.786524, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1065, 284.417823, 0, 9999, -9999, 1.0, 100, 1, 339.421643, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1066, 127.301975, 0, 9999, -9999, 1.0, 100, 1, 134.399019, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1067, 13.475896, 0, 9999, -9999, 1.0, 100, 1, 32.653526, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1068, 3.497467, 0, 9999, -9999, 1.0, 100, 1, 5.009022, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1069, 2.016518, 0, 9999, -9999, 1.0, 100, 1, 3.190759, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1070, 0.50587, 0, 9999, -9999, 1.0, 100, 1, 0.788599, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1071, 2.791301, 0, 9999, -9999, 1.0, 100, 1, 4.328696, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1072, 107.042268, 0, 9999, -9999, 1.0, 100, 1, 112.606433, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1073, 76.834785, 0, 9999, -9999, 1.0, 100, 1, 77.81765, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1074, 144.695533, 0, 9999, -9999, 1.0, 100, 1, 153.592986, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1075, 11.111229, 0, 9999, -9999, 1.0, 100, 1, 15.783448, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1076, 0.155569, 0, 9999, -9999, 1.0, 100, 1, 2.29551, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1077, 6.601032, 0, 9999, -9999, 1.0, 100, 1, 26.120041, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1078, 3.937705, 0, 9999, -9999, 1.0, 100, 1, 34.413246, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1079, 68.367079, 0, 9999, -9999, 1.0, 100, 1, 72.327992, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1080, 49.159773, 0, 9999, -9999, 1.0, 100, 1, 132.149983, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1081, 327.127922, 0, 9999, -9999, 1.0, 100, 1, 405.642115, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1082, 400.002811, 0, 9999, -9999, 1.0, 100, 1, 510.054159, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1083, 390.775877, 0, 9999, -9999, 1.0, 100, 1, 633.681488, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1084, 499.466952, 0, 9999, -9999, 1.0, 100, 1, 602.719371, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1085, 90.851838, 0, 9999, -9999, 1.0, 100, 1, 113.714399, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1086, 168.443589, 0, 9999, -9999, 1.0, 100, 1, 225.59917, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1087, 52.254535, 0, 9999, -9999, 1.0, 100, 1, 116.66597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1088, 18.821987, 0, 9999, -9999, 1.0, 100, 1, 36.782492, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1089, 107.895687, 0, 9999, -9999, 1.0, 100, 1, 384.449592, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1090, 88.869691, 0, 9999, -9999, 1.0, 100, 1, 89.140897, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1091, 38.168328, 0, 9999, -9999, 1.0, 100, 1, 45.7939, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1092, 41.263437, 0, 9999, -9999, 1.0, 100, 1, 54.002032, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1093, 155.062631, 0, 9999, -9999, 1.0, 100, 1, 155.605298, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1094, 3.68356, 0, 9999, -9999, 1.0, 100, 1, 3.759038, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1095, 0.199545, 0, 9999, -9999, 1.0, 100, 1, 0.204951, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1096, 82.926285, 0, 9999, -9999, 1.0, 100, 1, 84.50612, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1097, 4.592179, 0, 9999, -9999, 1.0, 100, 1, 4.601122, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1098, 70.952976, 0, 9999, -9999, 1.0, 100, 1, 71.025499, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1099, 290.424712, 0, 9999, -9999, 1.0, 100, 1, 290.937198, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1100, 0.001647, 0, 9999, -9999, 1.0, 100, 1, 0.026696, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1101, 57.788539, 0, 9999, -9999, 1.0, 100, 1, 83.930665, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1102, 282.936992, 0, 9999, -9999, 1.0, 100, 1, 350.979988, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1103, 209.094175, 0, 9999, -9999, 1.0, 100, 1, 245.381701, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1104, 0.193798, 0, 9999, -9999, 1.0, 100, 1, 0.206918, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1105, 2.157234, 0, 9999, -9999, 1.0, 100, 1, 2.178593, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1106, 2.178127, 0, 9999, -9999, 1.0, 100, 1, 2.289793, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1107, 60.399619, 0, 9999, -9999, 1.0, 100, 1, 76.221615, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1108, 249.016983, 0, 9999, -9999, 1.0, 100, 1, 320.422751, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1109, 0.748192, 0, 9999, -9999, 1.0, 100, 1, 0.77821, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1110, 1.6284, 0, 9999, -9999, 1.0, 100, 1, 1.654557, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1111, 69.111708, 0, 9999, -9999, 1.0, 100, 1, 89.637993, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1112, 67.25534, 0, 9999, -9999, 1.0, 100, 1, 69.53429, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1113, 3.35137, 0, 9999, -9999, 1.0, 100, 1, 3.536361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1114, 11.453643, 0, 9999, -9999, 1.0, 100, 1, 13.446889, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1115, 49.646632, 0, 9999, -9999, 1.0, 100, 1, 50.575278, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1116, 32.422475, 0, 9999, -9999, 1.0, 100, 1, 32.601142, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1117, 90.572213, 0, 9999, -9999, 1.0, 100, 1, 90.792541, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1118, 7.20873, 0, 9999, -9999, 1.0, 100, 1, 8.725012, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1119, 43.118994, 0, 9999, -9999, 1.0, 100, 1, 43.254023, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1120, 2.063216, 0, 9999, -9999, 1.0, 100, 1, 2.416001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1121, 0.449377, 0, 9999, -9999, 1.0, 100, 1, 0.540589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1122, 1.283134, 0, 9999, -9999, 1.0, 100, 1, 1.462883, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1123, 0.997732, 0, 9999, -9999, 1.0, 100, 1, 1.464336, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1124, 1.087464, 0, 9999, -9999, 1.0, 100, 1, 1.288283, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1125, 24.370614, 0, 9999, -9999, 1.0, 100, 1, 25.818899, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1126, 27.747657, 0, 9999, -9999, 1.0, 100, 1, 29.154893, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1127, 103.548812, 0, 9999, -9999, 1.0, 100, 1, 105.296621, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1128, 3.022664, 0, 9999, -9999, 1.0, 100, 1, 3.06139, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1129, 4.672624, 0, 9999, -9999, 1.0, 100, 1, 4.738747, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1130, 1.004021, 0, 9999, -9999, 1.0, 100, 1, 1.025754, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1131, 2.856115, 0, 9999, -9999, 1.0, 100, 1, 2.897078, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1132, 0.351349, 0, 9999, -9999, 1.0, 100, 1, 0.359497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1133, 0.598181, 0, 9999, -9999, 1.0, 100, 1, 0.719597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1134, 0.422665, 0, 9999, -9999, 1.0, 100, 1, 0.508453, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
is None:
reward_macro = self.reward
else:
reward_macro += self.reward
return self.obs, reward_macro, self.reward_bounty_raw_to_return, self.done, self.info
def one_step(self):
'''as a environment, it has step method.
But the step method step forward for args.hierarchy_interval times,
as a macro action, this method is to step forward for a singel step'''
'''for each one_step, interact with env for one step'''
self.interact_one_step()
self.step_i += 1
if self.step_i==args.num_steps[self.hierarchy_id]:
'''if reach args.num_steps[self.hierarchy_id], update agent for one step with the experiences stored in rollouts'''
self.update_agent_one_step()
self.step_i = 0
def specify_action(self):
'''this method is used to speicfy actions to the agent,
so that we can get insight on with is happening'''
if args.test_action:
if self.hierarchy_id in [0]:
human_action = 'nope'
while human_action not in ['q','w','e','a','s','d']:
human_action = input(
'[Macro Action {}, actual action {}], Act: '.format(
utils.onehot_to_index(input_actions_onehot_global[0][0].cpu().numpy()),
self.action[0,0].item(),
)
)
if args.env_name in ['MontezumaRevengeNoFrameskip-v4']:
human_action_map = {
'd':0,
'a':1,
's':2,
'w':3,
'e':4,
'q':5,
}
self.action[0,0] = int(human_action_map[human_action])
else:
self.action[0,0] = int(human_action)
if self.hierarchy_id in [2]:
self.action[0,0] = int(
input(
'[top Action {}], Act: '.format(
self.action[0,0].item(),
)
)
)
# # DEBUG: specify higher level actions
# if args.summarize_one_episode.split('_')[0] in ['sub']:
# if self.hierarchy_id in [1]:
# self.action[0,0]=int(args.summarize_one_episode.split('_')[1])
# print(self.action[:,0])
def log_for_specify_action(self):
if args.test_action and (self.hierarchy_id in [0]):
print_str = ''
print_str += '[reward {} ][done {}][masks {}]'.format(
self.reward_raw_OR_reward[0],
self.done[0],
self.masks[0].item(),
)
if args.reward_bounty > 0.0:
print_str += '[reward_bounty {}]'.format(
self.reward_bounty[0],
)
print(print_str)
def generate_actions_to_step(self):
'''this method generate actions_to_step controlled by many logic'''
self.actions_to_step = self.action.squeeze(1).cpu().numpy()
if self.hierarchy_id not in [0]:
self.actions_to_step = {
'actions_to_step': self.actions_to_step,
}
if (self.hierarchy_id not in [0]) and (args.reward_bounty > 0.0) and (not args.mutual_information):
'''predict states'''
self.transition_model.eval()
with torch.no_grad():
if len(self.rollouts.observations[self.step_i].size())==4:
'''state are represented in a image format'''
self.observation_predicted_from_to_downer_layer = self.rollouts.observations[self.step_i][:,-1:]
elif len(self.rollouts.observations[self.step_i].size())==2:
'''state are represented in a one-dimentional vector format'''
self.observation_predicted_from_to_downer_layer = self.rollouts.observations[self.step_i]
else:
raise NotImplemented
if len(self.rollouts.observations[self.step_i].size()) in [4]:
now_states = self.rollouts.observations[self.step_i].repeat(self.envs.action_space.n,1,1,1)
elif len(self.rollouts.observations[self.step_i].size()) in [2]:
now_states = self.rollouts.observations[self.step_i].repeat(self.envs.action_space.n,1)
else:
raise NotImplemented
self.predicted_next_observations_to_downer_layer, self.predicted_reward_bounty_to_downer_layer = self.transition_model(
inputs = now_states,
input_action = self.action_onehot_batch,
)
'''generate inverse mask'''
if self.args.inverse_mask:
inverse_mask_model.eval()
self.mask_of_predicted_observation_to_downer_layer = inverse_mask_model.get_mask(
# last_states = (now_states[:,-1:]+self.predicted_next_observations_to_downer_layer),
last_states = now_states[:,-1:], # only predict from real obs first
)
self.predicted_next_observations_to_downer_layer = self.predicted_next_observations_to_downer_layer.view(self.envs.action_space.n,args.num_processes,*self.predicted_next_observations_to_downer_layer.size()[1:])
if self.args.inverse_mask:
self.mask_of_predicted_observation_to_downer_layer = self.mask_of_predicted_observation_to_downer_layer.view(self.envs.action_space.n,args.num_processes,*self.mask_of_predicted_observation_to_downer_layer.size()[1:])
else:
self.mask_of_predicted_observation_to_downer_layer = None
self.predicted_reward_bounty_to_downer_layer = self.predicted_reward_bounty_to_downer_layer.view(self.envs.action_space.n,args.num_processes,*self.predicted_reward_bounty_to_downer_layer.size()[1:]).squeeze(2)
self.actions_to_step.update(
{
'predicted_next_observations_to_downer_layer': self.predicted_next_observations_to_downer_layer,
'mask_of_predicted_observation_to_downer_layer': self.mask_of_predicted_observation_to_downer_layer,
'observation_predicted_from_to_downer_layer': self.observation_predicted_from_to_downer_layer,
'predicted_reward_bounty_to_downer_layer': self.predicted_reward_bounty_to_downer_layer,
}
)
def generate_reward_bounty(self):
'''this method generate reward bounty'''
self.bounty_clip *= 0.0 # to record the clip value
self.reward_bounty_raw_to_return *= 0.0 # to be return and train the bounty prediction
self.reward_bounty *= 0.0 # bounty after clip
self.reward_final *= 0.0 # reward be used to update policy
'''START: computer normalized reward_bounty, EVERY T interval'''
if (args.reward_bounty>0) and (self.hierarchy_id not in [args.num_hierarchy-1]) and (self.is_final_step_by_upper_layer or self.is_extend_step):
'''START: compute none normalized reward_bounty_raw_to_return'''
action_rb = self.rollouts.input_actions[self.step_i].nonzero()[:,1]
obs_rb = self.obs.astype(float)-self.observation_predicted_from_by_upper_layer.cpu().numpy()
prediction_rb = self.predicted_next_observations_by_upper_layer.cpu().numpy()
if self.args.inverse_mask:
mask_rb = self.mask_of_predicted_observation_by_upper_layer.cpu().numpy()
for process_i in range(args.num_processes):
difference_list = []
for action_i in range(prediction_rb.shape[0]):
if action_i!=action_rb[process_i]:
'''compute difference'''
if args.distance in ['l2']:
if args.env_name in ['Explore2D']:
difference = np.linalg.norm(
x = (obs_rb[process_i][0,0]-prediction_rb[action_i,process_i][0,0]),
ord = 2,
)
elif ('MinitaurBulletEnv' in args.env_name) or ('AntBulletEnv' in args.env_name):
'''28:30 represents the position'''
difference = np.linalg.norm(
x = (obs_rb[process_i][28:30]-prediction_rb[action_i,process_i][28:30]),
ord = 2,
)/(obs_rb[process_i][28:30].shape[0]**0.5)
elif args.env_name in ['ReacherBulletEnv-v1','Explore2DContinuous']:
'''0:2 represents the position'''
difference = np.linalg.norm(
x = (obs_rb[process_i][0:2]-prediction_rb[action_i,process_i][0:2]),
ord = 2,
)/(obs_rb[process_i][0:2].shape[0]**0.5)
else:
raise NotImplemented
elif args.distance in ['mass_center']:
# mask here: *mask_rb[action_i,process_i][0]
difference = np.linalg.norm(
get_mass_center(obs_rb[process_i][0])-get_mass_center(prediction_rb[action_i,process_i][0])
)
else:
raise NotImplemented
difference_list += [difference*args.reward_bounty]
if args.diversity_driven_active_function in ['min']:
self.reward_bounty_raw_to_return[process_i] += float(np.amin(difference_list))
elif args.diversity_driven_active_function in ['sum']:
self.reward_bounty_raw_to_return[process_i] += float(np.sum(difference_list))
else:
raise NotImplemented
'''END: compute none normalized reward_bounty_raw_to_return'''
'''mask reward bounty, since the final state is start state,
and the estimation from transition model is not accurate'''
self.reward_bounty_raw_to_return *= self.masks.squeeze()
'''START: computer bounty after being clipped'''
if args.clip_reward_bounty:
for process_i in range(args.num_processes):
self.bounty_clip[process_i] = self.predicted_reward_bounty_by_upper_layer[action_rb[process_i]][process_i]
delta = (self.reward_bounty_raw_to_return[process_i]-self.bounty_clip[process_i])
if args.clip_reward_bounty_active_function in ['linear']:
self.reward_bounty[process_i] = delta
elif args.clip_reward_bounty_active_function in ['u']:
self.reward_bounty[process_i] = delta.sign().clamp(min=0.0,max=1.0)
elif args.clip_reward_bounty_active_function in ['relu']:
self.reward_bounty[process_i] = F.relu(delta)
elif args.clip_reward_bounty_active_function in ['shrink_relu']:
positive_active = delta.sign().clamp(min=0.0,max=1.0)
self.reward_bounty[process_i] = delta * positive_active + positive_active - 1
else:
raise Exception('No Supported')
else:
self.reward_bounty = self.reward_bounty_raw_to_return
'''END: end of computer bounty after being clipped'''
'''END: computer normalized reward_bounty'''
'''START: compute reward_final for updating the policy'''
self.reward_final += self.reward_bounty
'''rewards added to reward_final in following part will NOT be normalized'''
if args.reward_bounty>0:
if self.hierarchy_id in [args.num_hierarchy-1]:
'''top level only receive reward from env or nothing to observe unsupervised learning'''
if self.args.env_name in ['OverCooked','GridWorld'] or ('NoFrameskip-v4' in args.env_name):
'''top level only receive reward from env'''
self.reward_final += self.reward.cuda()
elif (self.args.env_name in ['MineCraft','Explore2D','Explore2DContinuous']) or ('Bullet' in args.env_name):
'''top level only receive nothing to observe unsupervised learning'''
pass
else:
raise NotImplemented
else:
'''other levels except top level'''
if (self.args.env_name in ['OverCooked','MineCraft','Explore2D','Explore2DContinuous']):
'''rewards occues less frequently or never occurs, down layers do not receive extrinsic reward'''
pass
elif self.args.env_name in ['GridWorld','AntBulletEnv-v1'] or ('NoFrameskip-v4' in args.env_name):
'''reward occurs more frequently and we want down layers to know it'''
if self.args.env_name in ['GridWorld'] or ('NoFrameskip-v4' in args.env_name):
self.reward_final += self.reward.cuda()
elif self.args.env_name in ['AntBulletEnv-v1']:
self.reward_final += (self.reward.cuda()*0.001)
else:
raise NotImplemented
else:
raise NotImplemented
'''END: compute reward_final for updating the policy'''
'''may mask to stop value function'''
if args.reward_bounty>0:
if not args.unmask_value_function:
if self.is_final_step_by_upper_layer:
'''mask it and stop reward function'''
self.masks = self.masks * 0.0
def interact_one_step(self):
'''interact with self.envs for one step and store experience into self.rollouts'''
self.rollouts.input_actions[self.step_i].copy_(input_actions_onehot_global[self.hierarchy_id])
'''Sample actions'''
with torch.no_grad():
self.value, self.action, self.action_log_prob, self.states = self.actor_critic.act(
inputs = self.rollouts.observations[self.step_i],
states = self.rollouts.states[self.step_i],
masks = self.rollouts.masks[self.step_i],
deterministic = self.deterministic,
input_action = self.rollouts.input_actions[self.step_i],
)
self.specify_action()
self.generate_actions_to_step()
'''Obser reward and next obs'''
fetched = self.envs.step(self.actions_to_step)
if self.hierarchy_id in [0]:
# print('====')
# print(self.obs[0])
self.obs, self.reward_raw_OR_reward, self.done, self.info = fetched
# print(self.obs[0])
# print(self.done[0])
# input('continue')
else:
self.obs, self.reward_raw_OR_reward, self.reward_bounty_raw_returned, self.done, self.info = fetched
if self.hierarchy_id in [0]:
if args.test_action:
win_dic['Obs'] = viz.images(
self.obs[0],
win=win_dic['Obs'],
opts=dict(title='obs')
)
self.masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in self.done]).cuda()
if self.hierarchy_id in [(args.num_hierarchy-1)]:
'''top hierarchy layer is responsible for reseting env if all env has done'''
if args.test_action:
if self.masks[0] == 0.0:
self.obs = self.reset()
else:
if self.masks.sum() == 0.0:
self.obs = self.reset()
if self.hierarchy_id in [0]:
'''only when hierarchy_id is 0, the envs is returning reward_raw from the basic game emulator'''
self.reward_raw = torch.from_numpy(self.reward_raw_OR_reward).float()
if args.env_name in ['OverCooked','MineCraft','GridWorld','Explore2D'] or ('NoFrameskip-v4' in args.env_name):
self.reward = self.reward_raw.sign()
elif ('Bullet' in args.env_name) or (args.env_name in ['Explore2DContinuous']):
self.reward = self.reward_raw
else:
raise NotImplemented
else:
'''otherwise, this is reward'''
self.reward = self.reward_raw_OR_reward
self.generate_reward_bounty()
self.log_for_specify_action()
env_0_sleeping = self.envs.get_sleeping(env_index=0)
if env_0_sleeping in [False]:
self.step_summarize_from_env_0()
elif env_0_sleeping in [True]:
pass
else:
raise NotImplementedError
'''If done then clean the history of observations'''
if self.current_obs.dim() == 4:
self.current_obs *= self.masks.unsqueeze(2).unsqueeze(2)
else:
self.current_obs *= self.masks
self.update_current_obs(self.obs)
if self.hierarchy_id not in [0]:
self.rollouts.reward_bounty_raw[self.rollouts.step].copy_(self.reward_bounty_raw_returned.unsqueeze(1))
self.rollouts.insert(
self.current_obs,
self.states,
self.action,
self.action_log_prob,
self.value,
self.reward_final.unsqueeze(1),
self.masks,
)
def refresh_update_type(self):
if args.reward_bounty > 0.0:
if args.train_mode in ['together']:
'''train_mode is together'''
self.update_type = 'both'
self.deterministic = False
elif args.train_mode in ['switch']:
'''train_mode is switch'''
'''switch training between actor_critic and transition_model'''
if self.update_i%2 == 1:
self.update_type = 'actor_critic'
self.deterministic = False
else:
self.update_type = 'transition_model'
self.deterministic = True
'''top layer do not have a transition_model'''
if self.hierarchy_id in [args.num_hierarchy-1]:
self.update_type = 'actor_critic'
self.deterministic = False
else:
'''there is no transition_model'''
self.update_type = 'actor_critic'
self.deterministic = False
'''overwrite if args.act_deterministically'''
if args.act_deterministically:
self.deterministic = True
def update_agent_one_step(self):
'''update the self.actor_critic with self.agent,
according to the experiences stored in self.rollouts'''
'''prepare rollouts for updating actor_critic'''
if self.update_type in ['actor_critic','both']:
with torch.no_grad():
self.next_value = self.actor_critic.get_value(
inputs=self.rollouts.observations[-1],
states=self.rollouts.states[-1],
masks=self.rollouts.masks[-1],
input_action=self.rollouts.input_actions[-1],
).detach()
self.rollouts.compute_returns(self.next_value, args.use_gae, args.gamma, args.tau)
'''update, either actor_critic or transition_model'''
epoch_loss = {}
epoch_loss.update(
self.agent.update(self.update_type)
)
if self.args.inverse_mask and (self.hierarchy_id in [0]):
epoch_loss.update(
update_inverse_mask_model(
bottom_layer=self,
)
)
self.num_trained_frames += (args.num_steps[self.hierarchy_id]*args.num_processes)
self.update_i += 1
'''prepare rollouts for new round of interaction'''
self.rollouts.after_update()
if (self.args.env_name in ['Explore2D']) and (self.hierarchy_id in [0]):
try:
terminal_states_f = tables.open_file(
'{}/terminal_states.h5'.format(
args.save_dir,
),
mode='a',
)
for t_s in self.terminal_states:
terminal_states_f.root.data.append(t_s)
self.terminal_states = []
terminal_states_f.close()
except Exception as e:
print('Skip appending | |
<gh_stars>10-100
# -*- coding: UTF-8 -*-
"""A class for converting ASCII inputs to JSON."""
import csv
import os
import re
from collections import Counter, OrderedDict
from decimal import Decimal
from itertools import chain, product
import inflect
import numpy as np
from astrocats.catalog.catalog import Catalog
from astrocats.catalog.entry import ENTRY, Entry
from astrocats.catalog.key import KEY_TYPES, Key
from astrocats.catalog.photometry import (PHOTOMETRY, set_pd_mag_from_counts,
set_pd_mag_from_flux_density)
from astrocats.catalog.quantity import QUANTITY
from astrocats.catalog.source import SOURCE
from astrocats.catalog.utils import jd_to_mjd
from astropy.io.ascii import Cds, Latex, read
from astropy.time import Time as astrotime
from six import string_types
from mosfit.constants import KS_DAYS
from mosfit.utils import (entabbed_json_dump, get_mosfit_hash, is_bibcode,
is_date, is_datum, is_number, listify, name_clean,
replace_multiple)
class Converter(object):
"""Convert ASCII formats to Open Catalog JSON schemas."""
_MONTH_IDS = OrderedDict(
(('January', '01'), ('February', '02'), ('March', '03'),
('April', '04'), ('June', '06'), ('July', '07'), ('August', '08'),
('September', '09'), ('October', '10'), ('November', '11'),
('December', '12'), ('Jan', '01'), ('Feb', '02'), ('Mar', '03'),
('Apr', '04'), ('May', '05'), ('Jun', '06'), ('Jul', '07'),
('Aug', '08'), ('Sep', '09'), ('Oct', '10'), ('Nov', '11'), ('Dec',
'12')))
_DEFAULT_SOURCE = 'MOSFiT paper'
_TRUE_VALS = ['t', 'true', 'T', 'True', '1', 'y', 'Y']
_FALSE_VALS = ['f', 'false', 'F', 'False', '0', 'n', 'N']
_EMPTY_VALS = ['nodata']
def __init__(self, printer, require_source=False, guess=True, cache_path='',
**kwargs):
"""Initialize."""
import pickle
self._cache_path = cache_path
if self._cache_path:
self._path = cache_path
if not os.path.isdir(os.path.join(self._path,'cache')):
os.makedirs(os.path.join(self._path,'cache'))
else:
self._path = os.path.dirname(os.path.realpath(__file__))
self._inflect = inflect.engine()
self._printer = printer
self._guess = guess
self._require_source = require_source
self._estrs = [
'err', '_err', 'err_', 'ERR', 'e_', '_e', '(err)', 'error',
'uncertainty', 'sigma'
]
self._emagstrs = self._estrs + [
'magnitude error', 'e mag', 'e magnitude', 'dmag', 'mag err',
'magerr', 'mag error'
]
self._ecntstrs = self._estrs + [
'flux error', 'e flux', 'e counts', 'count err', 'flux err',
'countrate error', 'countrate err', 'e_flux'
]
self._lmagstrs = ['l_']
self._band_names = [
'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'K_s', "Ks", "K'", 'u',
'g', 'r', 'i', 'z', 'y', 'W1', 'W2', 'M2', "u'", "g'", "r'", "i'",
"z'", 'C', 'Y', 'I1', 'I2', 'I3', 'I4', 'Open'
]
ebands = [
a + b for a, b in chain(
product(self._ecntstrs, self._band_names),
product(self._band_names, self._estrs))
]
lbands = [
a + b for a, b in chain(product(self._lmagstrs, self._band_names))
]
self._emagstrs += ebands
self._ecntstrs += ebands
self._lmagstrs += lbands
key_cache_path = os.path.join(
self._path, 'cache',
'key_cache_{}.pickle'.format(get_mosfit_hash()))
hks_loaded = False
if os.path.isfile(key_cache_path):
try:
self._header_keys = pickle.load(open(key_cache_path, 'rb'))
hks_loaded = True
except Exception:
printer.message('bad_header_pickle', warning=True)
if hasattr(self, '_header_keys'):
del (self._header_keys)
if not hks_loaded:
self._header_keys = OrderedDict((
(PHOTOMETRY.TIME, [
'time', 'mjd', ('jd', 'jd'), ('julian date', 'jd'),
('date', 'yyyy-mm-dd'), 'day',
('kiloseconds', 'kiloseconds')
]),
(PHOTOMETRY.SYSTEM, ['system', 'magsys', 'magnitude system']),
(PHOTOMETRY.MAGNITUDE,
['vega mag', 'ab mag', 'mag', 'magnitude']),
(PHOTOMETRY.E_MAGNITUDE, self._emagstrs),
(PHOTOMETRY.UPPER_LIMIT, self._lmagstrs),
(PHOTOMETRY.TELESCOPE, ['tel', 'telescope']),
(PHOTOMETRY.INSTRUMENT, ['inst', 'instrument']),
(PHOTOMETRY.OBSERVER, ['observer']),
(PHOTOMETRY.OBSERVATORY, ['observatory']),
(PHOTOMETRY.BAND,
['passband', 'band', 'filter', 'filt', 'flt']),
(PHOTOMETRY.E_LOWER_MAGNITUDE, [
a + ' ' + b for a, b in chain(
product(self._emagstrs, ['minus', 'lower']),
product(['minus', 'lower'], self._emagstrs))
]),
(PHOTOMETRY.E_UPPER_MAGNITUDE, [
a + ' ' + b for a, b in chain(
product(self._emagstrs, ['plus', 'upper']),
product(['plus', 'upper'], self._emagstrs))
]),
(PHOTOMETRY.UPPER_LIMIT,
['upper limit', 'upperlimit', 'l_mag', 'limit']),
(PHOTOMETRY.COUNT_RATE,
['count', 'counts', 'flux', 'count rate']),
(PHOTOMETRY.E_COUNT_RATE, self._ecntstrs),
(PHOTOMETRY.FLUX_DENSITY, ['flux density', 'fd', 'f_nu']),
(PHOTOMETRY.E_FLUX_DENSITY,
['e_flux_density', 'flux density error', 'e_fd', 'sigma_nu']),
(PHOTOMETRY.U_FLUX_DENSITY, []),
(PHOTOMETRY.ZERO_POINT, ['zero point', 'zp']),
('reference', ['reference', 'bibcode', 'source', 'origin']),
(ENTRY.NAME, [
'event', 'transient', 'name', 'supernova', 'sne', 'id',
'identifier', 'object'
]),
(ENTRY.REDSHIFT, ['redshift']),
(ENTRY.HOST, ['host']),
(ENTRY.LUM_DIST,
['lumdist', 'luminosity distance', 'distance']),
(ENTRY.COMOVING_DIST, ['comoving distance']),
(ENTRY.RA, ['ra', 'right ascension', 'right_ascension']),
(ENTRY.DEC, ['dec', 'declination']),
(ENTRY.EBV, ['ebv', 'extinction']),
# At the moment transient-specific keys are not in astrocats.
(Key('claimedtype', KEY_TYPES.STRING),
['claimedtype', 'type', 'claimed_type', 'claimed type'])))
self._critical_keys = [
PHOTOMETRY.TIME, PHOTOMETRY.MAGNITUDE, PHOTOMETRY.COUNT_RATE,
PHOTOMETRY.FLUX_DENSITY, PHOTOMETRY.BAND, PHOTOMETRY.E_COUNT_RATE,
PHOTOMETRY.E_FLUX_DENSITY, PHOTOMETRY.ZERO_POINT
]
self._helpful_keys = [
PHOTOMETRY.E_MAGNITUDE, PHOTOMETRY.INSTRUMENT, PHOTOMETRY.TELESCOPE
]
self._optional_keys = [
PHOTOMETRY.ZERO_POINT, PHOTOMETRY.E_MAGNITUDE,
PHOTOMETRY.U_FLUX_DENSITY
]
self._mc_keys = [
PHOTOMETRY.MAGNITUDE, PHOTOMETRY.COUNT_RATE,
PHOTOMETRY.FLUX_DENSITY
]
self._dep_keys = [
PHOTOMETRY.E_MAGNITUDE, PHOTOMETRY.E_COUNT_RATE,
PHOTOMETRY.E_FLUX_DENSITY, PHOTOMETRY.U_FLUX_DENSITY,
PHOTOMETRY.BAND
]
self._purge_non_numeric_keys = [
PHOTOMETRY.E_MAGNITUDE, PHOTOMETRY.E_LOWER_MAGNITUDE,
PHOTOMETRY.E_UPPER_MAGNITUDE, PHOTOMETRY.E_COUNT_RATE,
PHOTOMETRY.E_LOWER_COUNT_RATE, PHOTOMETRY.E_UPPER_COUNT_RATE,
PHOTOMETRY.E_FLUX, PHOTOMETRY.E_LOWER_FLUX,
PHOTOMETRY.E_UPPER_FLUX, PHOTOMETRY.E_UNABSORBED_FLUX,
PHOTOMETRY.E_LOWER_UNABSORBED_FLUX,
PHOTOMETRY.E_UPPER_UNABSORBED_FLUX
]
self._positive_keys = [PHOTOMETRY.MAGNITUDE
] + self._purge_non_numeric_keys
self._bool_keys = [PHOTOMETRY.UPPER_LIMIT]
self._specify_keys = [
PHOTOMETRY.BAND, PHOTOMETRY.INSTRUMENT, PHOTOMETRY.TELESCOPE
]
self._entry_keys = [
ENTRY.COMOVING_DIST, ENTRY.REDSHIFT, ENTRY.LUM_DIST, ENTRY.RA,
ENTRY.DEC, ENTRY.EBV, ENTRY.HOST,
Key('claimedtype', KEY_TYPES.STRING)
]
self._use_mc = False
self._month_rep = re.compile(r'\b(' +
'|'.join(self._MONTH_IDS.keys()) + r')\b')
self._converted = []
if not hks_loaded:
for key in self._header_keys.keys():
for val in self._header_keys[key]:
for i in range(val.count(' ')):
rep = val.replace(' ', '_', i + 1)
if rep not in self._header_keys[key]:
self._header_keys[key].append(rep)
for i in range(val.count(' ')):
rep = val.replace(' ', '', i + 1)
if rep not in self._header_keys[key]:
self._header_keys[key].append(rep)
pickle.dump(self._header_keys, open(key_cache_path, 'wb'))
def generate_event_list(self, event_list):
"""Generate a list of events and/or convert events to JSON format."""
prt = self._printer
cidict = OrderedDict()
intro_shown = False
check_all_files = None
shared_sources = []
new_event_list = []
previous_file = None
for event in event_list:
rsource = {SOURCE.NAME: self._DEFAULT_SOURCE}
use_self_source = None
new_events = []
toffset = Decimal('0')
if ('.' in event and os.path.isfile(event)
and not event.endswith('.json')):
with open(event, 'r') as f:
ftxt = f.read()
# Try a couple of table formats from astropy.
table = None
try:
table = read(ftxt, Reader=Cds, guess=False)
except Exception:
pass
else:
prt.message('convert_cds')
flines = [table.colnames
] + [list(x) for x in np.array(table).tolist()]
for i in range(len(flines)):
flines[i] = [str(x) for x in flines[i]]
try:
table = read(ftxt, Reader=Latex, guess=False)
except Exception:
pass
else:
prt.message('convert_latex')
flines = [table.colnames
] + [list(x) for x in np.array(table).tolist()]
if table is None:
# Count to try and determine delimiter.
delims = [' ', '\t', ',', ';', '|', '&']
delimnames = [
'Space: ` `', 'Tab: `\t`', 'Comma: `,`',
'Semi-colon: `;`', 'Bar: `|`', 'Ampersand: `&`'
]
delim = None
delimcounts = [
re.sub(
re.escape(y) + '+', y,
re.sub(
' ?[' + ''.join(
[re.escape(x)
for x in delims if x != y]) + ']' + ' ?',
'', ftxt)).count(y) for y in delims
]
maxdelimcount = max(delimcounts)
# Make sure at least one delimeter per line.
maxdelimavg = delimcounts[delimcounts.index(
maxdelimcount)] / len(ftxt.splitlines())
if maxdelimavg >= 1.0:
delim = delims[delimcounts.index(maxdelimcount)]
# If two delimiter options are close in count, ask user.
for i, x in enumerate(delimcounts):
if x > 0.5 * maxdelimcount and delims[i] != delim:
delim = None
if delim is None and maxdelimavg >= 1.0:
odelims = list(
np.array(delimnames)[np.array(delimcounts) > 0])
dchoice = prt.prompt(
'delim',
kind='option',
options=odelims,
none_string=prt.text('no_delimiter'))
if is_number(dchoice):
delim = delims[dchoice - 1]
if delim is not None:
ad = list(delims)
ad.remove(delim)
ad = ''.join(ad)
fsplit = ftxt.splitlines()
# If none of the rows contain numeric data, the file
# is likely a list of transient names.
flines = list(fsplit)
if (len(flines) and (not any(
any([
is_datum(x.strip()) or x == ''
for x in (y.split(delim)
if delim is not None else listify(y))
]) for y in flines) or len(flines) == 1)):
new_events = [
it.strip() for s in flines
for it in (s.split(delim)
if delim is not None else listify(s))
]
new_event_list.extend(new_events)
continue
if delim is None:
raise ValueError(prt.text('delimiter_not_found'))
if not intro_shown:
prt.message('converter_info')
intro_shown = True
prt.message('converting_to_json', [event])
if table is None:
# See if we need to append blank errors to upper limits.
tsplit = [
replace_multiple(x, ['$', '\\pm', '±', '-or+'],
delim).strip(ad + '()# ').replace(
'′', "'") for x in fsplit
]
append_missing_errs = False
for fl in tsplit:
dfl = list(csv.reader([fl], delimiter=delim))[0]
if any([is_number(x.strip('(<>≤≥'))
for x in dfl]) and any([
any([
y in x
for y in ['(', '<', '>', '≥', '≤']
]) for x in dfl
]):
append_missing_errs = True
break
fsplit = [
replace_multiple(
x, ['$', '\\pm', '±', '-or+'], delim).replace(
'(', delim + '(').strip(ad + '()# ').replace(
'′', "'") for x in fsplit
]
flines = []
for fs in fsplit:
# Replace repeated spaces if fixed-width
if delim in [' ']:
fsn = re.sub(r'(\s)\1+', r'\1', fs)
else:
fsn = fs
flines.append(
list(csv.reader([fsn], delimiter=delim))[0])
flines = [[x.strip(ad + '#$()\\') for x in y]
for y in flines]
# Find band columns if they exist and insert error columns
# if they don't exist.
for fi, fl in enumerate(list(flines)):
flcopy = list(fl)
offset = 0
if not any([is_datum(x) for x in fl]):
for fci, fc in enumerate(fl):
if (fc in self._band_names and
(fci == len(fl) - | |
# -*- coding: utf-8 -*-
# Copyright 2015 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import click
import os
import platform
import subprocess
import traceback
import webbrowser
from prompt_toolkit import AbortAction, Application, CommandLineInterface
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import Always, HasFocus, IsDone
from prompt_toolkit.interface import AcceptAction
from prompt_toolkit.layout.processors import \
HighlightMatchingBracketProcessor, ConditionalProcessor
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.shortcuts import create_default_layout, create_eventloop
from prompt_toolkit.history import FileHistory
from prompt_toolkit.key_binding.input_processor import KeyPress
from prompt_toolkit.keys import Keys
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from awscli import completer as awscli_completer
from .completer import AwsCompleter
from .lexer import CommandLexer
from .config import Config
from .style import StyleFactory
from .keys import KeyManager
from .toolbar import Toolbar
from .commands import AwsCommands
from .logger import SawsLogger
from .__init__ import __version__
class Saws(object):
"""Encapsulates the Saws CLI.
Attributes:
* aws_cli: An instance of prompt_toolkit's CommandLineInterface.
* key_manager: An instance of KeyManager.
* config: An instance of Config.
* config_obj: An instance of ConfigObj, reads from ~/.sawsrc.
* theme: A string representing the lexer theme.
* logger: An instance of SawsLogger.
* all_commands: A list of all commands, sub_commands, options, etc
from data/SOURCES.txt.
* commands: A list of commands from data/SOURCES.txt.
* sub_commands: A list of sub_commands from data/SOURCES.txt.
* completer: An instance of AwsCompleter.
"""
PYGMENTS_CMD = ' | pygmentize -l json'
def __init__(self, refresh_resources=True):
"""Inits Saws.
Args:
* refresh_resources: A boolean that determines whether to
refresh resources.
Returns:
None.
"""
self.aws_cli = None
self.key_manager = None
self.config = Config()
self.config_obj = self.config.read_configuration()
self.theme = self.config_obj[self.config.MAIN][self.config.THEME]
self.logger = SawsLogger(
__name__,
self.config_obj[self.config.MAIN][self.config.LOG_FILE],
self.config_obj[self.config.MAIN][self.config.LOG_LEVEL]).logger
self.all_commands = AwsCommands().all_commands
self.commands = \
self.all_commands[AwsCommands.CommandType.COMMANDS.value]
self.sub_commands = \
self.all_commands[AwsCommands.CommandType.SUB_COMMANDS.value]
self.completer = AwsCompleter(
awscli_completer,
self.all_commands,
self.config,
self.config_obj,
self.log_exception,
fuzzy_match=self.get_fuzzy_match(),
shortcut_match=self.get_shortcut_match())
if refresh_resources:
self.completer.refresh_resources_and_options()
self._create_cli()
def log_exception(self, e, traceback, echo=False):
"""Logs the exception and traceback to the log file ~/.saws.log.
Args:
* e: A Exception that specifies the exception.
* traceback: A Traceback that specifies the traceback.
* echo: A boolean that specifies whether to echo the exception
to the console using click.
Returns:
None.
"""
self.logger.debug('exception: %r.', str(e))
self.logger.error("traceback: %r", traceback.format_exc())
if echo:
click.secho(str(e), fg='red')
def set_color(self, color):
"""Setter for color output mode.
Used by prompt_toolkit's KeyBindingManager.
KeyBindingManager expects this function to be callable so we can't use
@property and @attrib.setter.
Args:
* color: A boolean that represents the color flag.
Returns:
None.
"""
self.config_obj[self.config.MAIN][self.config.COLOR] = color
def get_color(self):
"""Getter for color output mode.
Used by prompt_toolkit's KeyBindingManager.
KeyBindingManager expects this function to be callable so we can't use
@property and @attrib.setter.
Args:
* None.
Returns:
A boolean that represents the color flag.
"""
return self.config_obj[self.config.MAIN].as_bool(self.config.COLOR)
def set_fuzzy_match(self, fuzzy):
"""Setter for fuzzy matching mode
Used by prompt_toolkit's KeyBindingManager.
KeyBindingManager expects this function to be callable so we can't use
@property and @attrib.setter.
Args:
* color: A boolean that represents the fuzzy flag.
Returns:
None.
"""
self.config_obj[self.config.MAIN][self.config.FUZZY] = fuzzy
self.completer.fuzzy_match = fuzzy
def get_fuzzy_match(self):
"""Getter for fuzzy matching mode
Used by prompt_toolkit's KeyBindingManager.
KeyBindingManager expects this function to be callable so we can't use
@property and @attrib.setter.
Args:
* None.
Returns:
A boolean that represents the fuzzy flag.
"""
return self.config_obj[self.config.MAIN].as_bool(self.config.FUZZY)
def set_shortcut_match(self, shortcut):
"""Setter for shortcut matching mode
Used by prompt_toolkit's KeyBindingManager.
KeyBindingManager expects this function to be callable so we can't use
@property and @attrib.setter.
Args:
* color: A boolean that represents the shortcut flag.
Returns:
None.
"""
self.config_obj[self.config.MAIN][self.config.SHORTCUT] = shortcut
self.completer.shortcut_match = shortcut
def get_shortcut_match(self):
"""Getter for shortcut matching mode
Used by prompt_toolkit's KeyBindingManager.
KeyBindingManager expects this function to be callable so we can't use
@property and @attrib.setter.
Args:
* None.
Returns:
A boolean that represents the shortcut flag.
"""
return self.config_obj[self.config.MAIN].as_bool(self.config.SHORTCUT)
def refresh_resources_and_options(self):
"""Convenience function to refresh resources and options for completion.
Used by prompt_toolkit's KeyBindingManager.
Args:
* None.
Returns:
None.
"""
self.completer.refresh_resources_and_options(force_refresh=True)
def handle_docs(self, text=None, from_fkey=False):
"""Displays contextual web docs for `F9` or the `docs` command.
Displays the web docs specific to the currently entered:
* (optional) command
* (optional) subcommand
If no command or subcommand is present, the docs index page is shown.
Docs are only displayed if:
* from_fkey is True
* from_fkey is False and `docs` is found in text
Args:
* text: A string representing the input command text.
* from_fkey: A boolean representing whether this function is
being executed from an `F9` key press.
Returns:
A boolean representing whether the web docs were shown.
"""
base_url = 'http://docs.aws.amazon.com/cli/latest/reference/'
index_html = 'index.html'
if text is None:
text = self.aws_cli.current_buffer.document.text
# If the user hit the F9 key, append 'docs' to the text
if from_fkey:
text = text.strip() + ' ' + AwsCommands.AWS_DOCS
tokens = text.split()
if len(tokens) > 2 and tokens[-1] == AwsCommands.AWS_DOCS:
prev_word = tokens[-2]
# If we have a command, build the url
if prev_word in self.commands:
prev_word = prev_word + '/'
url = base_url + prev_word + index_html
webbrowser.open(url)
return True
# if we have a command and subcommand, build the url
elif prev_word in self.sub_commands:
command_url = tokens[-3] + '/'
sub_command_url = tokens[-2] + '.html'
url = base_url + command_url + sub_command_url
webbrowser.open(url)
return True
webbrowser.open(base_url + index_html)
# If we still haven't opened the help doc at this point and the
# user hit the F9 key or typed docs, just open the main docs index
if from_fkey or AwsCommands.AWS_DOCS in tokens:
webbrowser.open(base_url + index_html)
return True
return False
def _handle_cd(self, text):
"""Handles a `cd` shell command by calling python's os.chdir.
Simply passing in the `cd` command to subprocess.call doesn't work.
Note: Changing the directory within Saws will only be in effect while
running Saws. Exiting the program will return you to the directory
you were in prior to running Saws.
Attributes:
* text: A string representing the input command text.
Returns:
A boolean representing a `cd` command was found and handled.
"""
CD_CMD = 'cd'
stripped_text = text.strip()
if stripped_text.startswith(CD_CMD):
directory = ''
if stripped_text == CD_CMD:
# Treat `cd` as a change to the root directory.
# os.path.expanduser does this in a cross platform manner.
directory = os.path.expanduser('~')
else:
tokens = text.split(CD_CMD + ' ')
directory = tokens[-1]
try:
os.chdir(directory)
except OSError as e:
self.log_exception(e, traceback, echo=True)
return True
return False
def _colorize_output(self, text):
"""Highlights output with pygments.
Only highlights the output if all of the following conditions are True:
* The color option is enabled
* The command will be handled by the `aws-cli`
* The text does not contain the `configure` command
* The text does not contain the `help` command, which already does
output highlighting
Args:
* text: A string that represents the input command text.
Returns:
A string that represents:
* The original command text if no highlighting was performed.
* The pygments highlighted command text otherwise.
"""
stripped_text = text.strip()
if not self.get_color() or stripped_text == '':
return text
if AwsCommands.AWS_COMMAND not in stripped_text.split():
return text
excludes = [AwsCommands.AWS_CONFIGURE,
AwsCommands.AWS_HELP,
'|']
if not any(substring in stripped_text for substring in excludes):
return text.strip() + self.PYGMENTS_CMD
else:
return text
def _handle_keyboard_interrupt(self, e, platform):
"""Handles keyboard interrupts more gracefully on Mac/Unix/Linux.
Allows Mac/Unix/Linux to continue running on keyboard interrupt,
as the user might interrupt a long-running AWS command with Control-C
while continuing to work with Saws.
On Windows, the "Terminate batch job (Y/N)" confirmation makes it
tricky to handle this gracefully. Thus, we re-raise KeyboardInterrupt.
Args:
* e: A KeyboardInterrupt.
* platform: A string that denotes platform such as
'Windows', 'Darwin', etc.
Returns:
None
Raises:
Exception: A KeyboardInterrupt if running on Windows.
"""
if platform == 'Windows':
raise e
else:
# Clear the renderer and send a carriage return
self.aws_cli.renderer.clear()
self.aws_cli.input_processor.feed(KeyPress(Keys.ControlM, u''))
self.aws_cli.input_processor.process_keys()
def _process_command(self, | |
<filename>miri/datamodels/miri_spectral_spatial_resolution_model.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
An extension to the standard STScI data model, which defines the MIRI
spectral and spatial resolution models.
:Reference:
The STScI jwst_lib documentation. See
https://jwst-pipeline.readthedocs.io/en/latest/jwst/datamodels/index.html
:History:
30 Aug 2016: Created.
12 Oct 2016: Renamed class from MiriMrsSpectralResolutionModel to
MiriMrsResolutionModel and data type from SPECRES to
RESOL to reflect the fact that this data model
describes both the spectral and spatial resolution of
the MRS. psf_alpha extension renamed to psf_fwhm_alpha
and psf_beta extension renamed to psf_fwhm_beta.
17 Oct 2016: Data model schema modified to match above changes.
SUB_BAND column added to RESOLVING_POWER table.
Identical copies of the same CDP file generated for
MIRIFULONG and MIRIFUSHORT.
06 Dec 2016: SUB_BAND column added to YAML version of schema.
15 Jun 2017: meta.reffile schema level removed to match changes in the
JWST build 7.1 data models release. meta.reffile.type also
changed to meta.reftype. TYPE keyword replaced by DATAMODL.
26 Sep 2018: Major reorganisation of the data model by <NAME>. The code
supports both the old and new models but will give a warning if
data structured according to the old model is detected.
04 Oct 2018: Define exposure type.
08 Oct 2018: Added some reconstruction functions.
17 Oct 2018: The string 'ANY' is no longer recommended within CDP metadata.
'N/A' should be used instead.
14 Nov 2018: Explicitly set table column units based on the tunit definitions
in the schema. All units now defined in the schema and all
tables defined in the module test.
19 Nov 2018: Documentation updated from Jeb's model. RESOLVING_POWER marked
as obsolete (to be removed after CDP-7 release).
30 Jan 2019: self.meta.model_type now set to the name of the STScI data
model this model is designed to match (skipped if there isn't
a corresponding model defined in ancestry.py).
26 Mar 2020: Ensure the model_type remains as originally defined when saving
to a file.
09 Jun 2021: Reverted back to the old CDP-6 data model used before 26 Sep 2018.
@author: <NAME> (UKATC), <NAME> (CSIC)
"""
import warnings
import numpy as np
import scipy
# Import the MIRI base data model and utilities.
from miri.datamodels.ancestry import get_my_model_type
from miri.datamodels.miri_model_base import MiriDataModel
# List all classes and global functions here.
__all__ = ['MiriMrsResolutionModel', 'MAX_NELEM']
MAX_NELEM = 50 # Maximum size of the lcoeff arrays
class MiriMrsResolutionModel(MiriDataModel):
"""
A generic data model for a MIRI spectral resolution table,
based on the STScI base model, DataModel.
See MIRI-RP-00514-NLC for a detailed description of the content
of the data model.
:Parameters:
init: shape tuple, file path, file object, pyfits.HDUList, numpy array
An optional initializer for the data model, which can have one
of the following forms:
* None: A default data model with no shape.
* Shape tuple: Initialize with empty data of the given shape.
* File path: Initialize from the given file.
* Readable file object: Initialize from the given file object.
* pyfits.HDUList: Initialize from the given pyfits.HDUList.
resolving_power: list of tuples or numpy record array (optional)
Either: A list of tuples containing columns in the spectral
resolving power table;
Or: A numpy record array containing the same information as above.
A spectral resolution table must either be defined in the
initializer or in this parameter. A blank table is not allowed.
psf_fwhm_alpha: list of tuples or numpy record array (optional)
Either: A list of tuples containing polynomial coefficients for
alpha FWHM;
Or: A numpy record array containing the same information as above.
The table must either be defined in the
initializer or in this parameter. A blank table is not allowed.
psf_fwhm_beta: list of tuples or numpy record array (optional)
Either: A list of tuples containing polynomial coefficients for
beta FWHM;
Or: A numpy record array containing the same information as above.
The table must either be defined in the
initializer or in this parameter. A blank table is not allowed.
\*\*kwargs:
All other keyword arguments are passed to the DataModel initialiser.
See the jwst_lib documentation for the meaning of these keywords.
"""
schema_url = "miri_spectral_spatial_resolution_mrs.schema"
fieldnames_resolving = ('SUB_BAND', 'R_CENTRE', 'R_A_LOW', 'R_B_LOW', 'R_C_LOW',
'R_A_HIGH', 'R_B_HIGH', 'R_C_HIGH',
'R_A_AVG', 'R_B_AVG', 'R_C_AVG')
fieldnames_alpha = ('A_CUTOFF','A_A_SHORT', 'A_B_SHORT',
'A_A_LONG', 'A_B_LONG')
fieldnames_beta = ('B_CUTOFF', 'B_A_SHORT', 'B_B_SHORT',
'B_A_LONG', 'B_B_LONG')
def __init__(self, init=None, resolving_power=None, psf_fwhm_alpha=None,
psf_fwhm_beta=None, **kwargs):
"""
Initialises the MiriMrsResolutionModel class.
Parameters: See class doc string.
"""
super(MiriMrsResolutionModel, self).__init__(init=init, **kwargs)
# Data type is spectral resolution.
self.meta.reftype = 'RESOL'
# Initialise the model type
self._init_data_type()
# This is a reference data model.
self._reference_model()
if resolving_power is not None:
try:
self.resolving_power = resolving_power
except (ValueError, TypeError) as e:
strg = "resolving_power must be a numpy record array or list of records."
strg += "\n %s" % str(e)
raise TypeError(strg)
if psf_fwhm_alpha is not None:
try:
self.psf_fwhm_alpha = psf_fwhm_alpha
except (ValueError, TypeError) as e:
strg = "psf_fwhm_alpha must be a numpy record array or list of records."
strg += "\n %s" % str(e)
raise TypeError(strg)
if psf_fwhm_beta is not None:
try:
self.psf_fwhm_beta = psf_fwhm_beta
except (ValueError, TypeError) as e:
strg = "psf_fwhm_beta must be a numpy record array or list of records."
strg += "\n %s" % str(e)
raise TypeError(strg)
# Define the exposure type (if not already contained in the data model)
# NOTE: This will only define an exposure type when a valid detector
# is defined in the metadata.
if not self.meta.exposure.type:
self.set_exposure_type()
# Copy the table column units from the schema, if defined.
resolving_power_units = self.set_table_units('resolving_power')
psf_fwhm_alpha_units = self.set_table_units('psf_fwhm_alpha')
psf_fwhm_beta_units = self.set_table_units('psf_fwhm_beta')
def _init_data_type(self):
# Initialise the data model type
model_type = get_my_model_type( self.__class__.__name__ )
self.meta.model_type = model_type
def on_save(self, path):
super(MiriMrsResolutionModel, self).on_save(path)
# Re-initialise data type on save
self._init_data_type()
# TODO: Is this function needed?
def __str__(self):
"""
Return the contents of the spectral-spatial resolution object
as a readable string.
"""
# Start with the data object title and metadata
strg = self.get_title_and_metadata()
# Describe the spectral resolution tables
if self.resolving_power is not None:
strg += self.get_data_str('resolving_power', underline=True, underchar="-")
if self.psf_fwhm_alpha is not None:
strg += self.get_data_str('psf_fwhm_alpha', underline=True, underchar="-")
if self.psf_fwhm_beta is not None:
strg += self.get_data_str('psf_fwhm_beta', underline=True, underchar="-")
return strg
def reconstruct_mlsf_model(self):
"""
Reconstruct the best-fit MLSF profile using the code
provided in the MRS spectral resolution model document (3.1.6)
"""
raise NotImplementedError("Function reconstruct_mlsf_model not implemented yet")
def regenerate_phase1_spline(self):
"""
Regenerate the phase 1 spline using the formula
provided in the MRS spectral resolution model document (3.1.7)
:Parameters:
None
:Returns:
polynomial: Legendre
Legendre polynomial object
"""
raise NotImplementedError("Function regenerate_phase1_spline is no longer available.")
def regenerate_phase2_model(self, slice):
"""
Regenerate the phase 2 model for the given slice using the formula
provided in the MRS spectral resolution model document (3.1.8)
:Parameters:
slice: int
The slice required
:Returns:
polynomial: Legendre
Legendre polynomial object
"""
raise NotImplementedError("Function regenerate_phase2_model is no longer available.")
def regenerate_phase3_model(self, slice):
"""
Regenerate the phase 3 model for the given slice using the formula
provided in the MRS spectral resolution model document (3.1.9)
:Parameters:
slice: int
The slice required
:Returns:
polynomial: Legendre
Legendre polynomial object
"""
raise NotImplementedError("Function regenerate_phase3_model is no longer available.")
def reconstruct_etalon_model(self):
"""
Reconstruct the etalon line fit using the code
provided in the MRS spectral resolution model document (3.1.10)
"""
raise NotImplementedError("Function reconstruct_etalon_model not available")
#
# A minimal test is run when this file is run as a main program.
# For a more substantial test see miri/datamodels/tests.
#
if __name__ == '__main__':
print("Testing the MiriMrsResolutionModel module.")
PLOTTING = False
SAVE_FILES = True
# Sub-bands
sb = ['1SHORT', '1MEDIUM', '1LONG',
'2SHORT', '2MEDIUM', '2LONG',
'3SHORT', '3MEDIUM', '3LONG',
'4SHORT', '4MEDIUM', '4LONG'
]
# Centros
cntr = [5.3, 6.15, 7.1, 8.15, 9.4, 10.9, 12.5, 14.5, 16.75, 19.2, 22.25, 26.05]
# Low limits
al = [2745.211671, 2737.58174, 2691.826643, 2716.566802, 2575.145064, 2563.664138, 2469.622611, 1864.309643, 2071.315475, 1899.989987, 1547.295843, 1220.329908]
bl = [541.3274075, 506.8427022, 393.1504096, 324.8195469, 280.4117705, 257.0795746, 128.1952696, -24.10526842, 268.5610664, -50.94618217, 12.23096891, -146.1678896]
cl = [310.8853084, | |
shuffle
self.to_fit = to_fit
self.stack_feats = stack_feats
self.indices = None
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.data[0]) / self.batch_size))
def on_epoch_end(self):
self.indices = np.arange(len(self.data[0]))
if self.shuffle:
np.random.shuffle(self.indices)
def __getitem__(self, index):
indices = self.indices[index * self.batch_size: (index + 1) * self.batch_size]
X = self._generate_X(indices)
if self.to_fit:
y = self._generate_y(indices)
return X, y
else:
return X
def _get_img_features(self, cached_path):
with open(cached_path, 'rb') as fid:
try:
img_features = pickle.load(fid)
except:
img_features = pickle.load(fid, encoding='bytes')
if self.process:
if self.global_pooling == 'max':
img_features = np.squeeze(img_features)
img_features = np.amax(img_features, axis=0)
img_features = np.amax(img_features, axis=0)
elif self.global_pooling == 'avg':
img_features = np.squeeze(img_features)
img_features = np.average(img_features, axis=0)
img_features = np.average(img_features, axis=0)
else:
img_features = img_features.ravel()
return img_features
def _generate_X(self, indices):
X = []
for input_type_idx, input_type in enumerate(self.input_type_list):
features_batch = np.empty((self.batch_size, *self.data_sizes[input_type_idx]))
num_ch = features_batch.shape[-1] // len(self.data[input_type_idx][0])
for i, index in enumerate(indices):
if isinstance(self.data[input_type_idx][index][0], str):
cached_path_list = self.data[input_type_idx][index]
for j, cached_path in enumerate(cached_path_list):
if 'flow' in input_type:
img_features = read_flow_file(cached_path)
else:
img_features = self._get_img_features(cached_path)
if len(cached_path_list) == 1:
# for static model if only one image in the sequence
features_batch[i,] = img_features
else:
if self.stack_feats and 'flow' in input_type:
features_batch[i, ..., j * num_ch:j * num_ch + num_ch] = img_features
elif 'scene_context' in input_type:
features_batch[i, j,] = tf.reshape(img_features, self.data_sizes[input_type_idx][1:]) # todo fix this custom size
elif 'context_split' in input_type:
features_batch[i, j, ] = tf.reshape(img_features, self.data_sizes[input_type_idx][1:])
else:
features_batch[i, j,] = img_features
else:
features_batch[i,] = self.data[input_type_idx][index]
X.append(features_batch)
return X
def _generate_y(self, indices):
return np.array(self.labels[indices])
class PCPA_TR(ActionPredict):
"""
hierfusion PCPA_MULTI
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
def __init__(self,
num_hidden_units=256,
cell_type='gru',
**kwargs):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
super().__init__(**kwargs)
# Network parameters
self._num_hidden_units = num_hidden_units
self._rnn = self._gru if cell_type == 'gru' else self._lstm
self._rnn_cell = GRUCell if cell_type == 'gru' else LSTMCell
self._3dconv = C3DNet if self._backbone == 'c3d' else I3DNet
self._multi_self_attention = ModelTrunk
self.normlayer = BatchNormalization
def get_data(self, data_type, data_raw, model_opts):
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
self._generator = model_opts.get('generator', False)
data_type_sizes_dict = {}
process = model_opts.get('process', True)
dataset = model_opts['dataset']
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
data_type_sizes_dict['box'] = data['box'].shape[1:]
if 'speed' in data.keys():
data_type_sizes_dict['speed'] = data['speed'].shape[1:]
# if 'context_cnn' in data.keys():
# data_type_sizes_dict['context_cnn'] = data['context_cnn'].shape[1:]
# Store the type and size of each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=None,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test'), data['crossing']) # set y to None
# global_pooling=self._global_pooling,
else:
_data = (_data, data['crossing'])
return {'data': _data,
'ped_id': data['ped_id'],
'tte': data['tte'],
'image': data['image'],
'data_params': {'data_types': data_types,
'data_sizes': data_sizes,
'transformer_params': {'time2vec_dim': model_opts['t_time2vec_dim'],
'num_heads': model_opts['t_num_heads'],
'head_size': model_opts['t_head_size'],
'num_layers': model_opts['t_num_layers'],
'dropout': model_opts['t_dropout']}
},
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def get_model(self, data_params):
return_sequence = True
data_sizes = data_params['data_sizes']
data_types = data_params['data_types']
transformer_params = data_params['transformer_params']
network_inputs = []
encoder_outputs = []
core_size = len(data_sizes)
attention_size = self._num_hidden_units
for i in range(0, core_size):
network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
x = self.normlayer(name='norm0_'+data_types[0], axis=-1, momentum=0.99, epsilon=0.0001)(network_inputs[0])
x = self._multi_self_attention(name='enc0_' + data_types[0], representation_size=attention_size, **transformer_params)(x)
encoder_outputs.append(x)
x = self.normlayer(name='norm1_'+data_types[0], axis=-1, momentum=0.99, epsilon=0.0001)(network_inputs[1])
x = self._multi_self_attention(name='enc1_' + data_types[1], representation_size=attention_size, **transformer_params)(x)
encoder_outputs.append(x)
x = self._rnn(name='enc2_' + data_types[2], r_sequence=return_sequence)(network_inputs[2])
current = [x, network_inputs[3]]
x = Concatenate(name='concat_early3', axis=2)(current)
x = self._rnn(name='enc3_' + data_types[3], r_sequence=return_sequence)(x)
current = [x, network_inputs[4]]
x = Concatenate(name='concat_early4', axis=2)(current)
x = self._rnn(name='enc4_' + data_types[4], r_sequence=return_sequence)(x)
encoder_outputs.append(x)
x = Concatenate(name='concat_modalities', axis=1)(encoder_outputs)
encodings = attention_3d_block(x, dense_size=attention_size, modality='_modality')
model_output = Dense(1, activation='sigmoid',
name='output_dense',
activity_regularizer=regularizers.l2(0.001))(encodings)
net_model = Model(inputs=network_inputs,
outputs=model_output)
net_model.summary()
plot_model(net_model, to_file='PCPA_ATTENTION.png')
return net_model
class PCPA_TR_FIXTTE(ActionPredict):
"""
hierfusion PCPA_MULTI
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
def __init__(self,
num_hidden_units=256,
cell_type='gru',
**kwargs):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
super().__init__(**kwargs)
# Network parameters
self._num_hidden_units = num_hidden_units
self._rnn = self._gru if cell_type == 'gru' else self._lstm
self._rnn_cell = GRUCell if cell_type == 'gru' else LSTMCell
self._3dconv = C3DNet if self._backbone == 'c3d' else I3DNet
self._multi_self_attention = ModelTrunk
def get_data(self, data_type, data_raw, model_opts):
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
self._generator = model_opts.get('generator', False)
data_type_sizes_dict = {}
process = model_opts.get('process', True)
dataset = model_opts['dataset']
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
data_type_sizes_dict['box'] = data['box'].shape[1:]
if 'speed' in data.keys():
data_type_sizes_dict['speed'] = data['speed'].shape[1:]
# if 'context_cnn' in data.keys():
# data_type_sizes_dict['context_cnn'] = data['context_cnn'].shape[1:]
# Store the type and size of each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=None,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test'), data['crossing']) # set y to None
# global_pooling=self._global_pooling,
else:
_data = (_data, data['crossing'])
return {'data': _data,
'ped_id': data['ped_id'],
'tte': data['tte'],
'image': data['image'],
'data_params': {'data_types': data_types, 'data_sizes': data_sizes},
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def get_model(self, data_params):
return_sequence = True
data_sizes = data_params['data_sizes']
data_types = data_params['data_types']
network_inputs = []
encoder_outputs = []
core_size = len(data_sizes)
attention_size = self._num_hidden_units
for i in range(0, core_size):
network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
x = self._multi_self_attention(name='enc0_' + data_types[0], representation_size=attention_size)(network_inputs[0])
encoder_outputs.append(x)
x = self._multi_self_attention(name='enc1_' + data_types[1], representation_size=attention_size)(network_inputs[1])
encoder_outputs.append(x)
x = self._rnn(name='enc2_' + data_types[2], r_sequence=return_sequence)(network_inputs[2])
current = [x, network_inputs[3]]
x = Concatenate(name='concat_early3', axis=2)(current)
x = self._rnn(name='enc3_' + data_types[3], r_sequence=return_sequence)(x)
# current = [x, network_inputs[4]]
# x = Concatenate(name='concat_early4', axis=2)(current)
# x = self._rnn(name='enc4_' + data_types[4], r_sequence=return_sequence)(x)
encoder_outputs.append(x)
x = Concatenate(name='concat_modalities', axis=1)(encoder_outputs)
encodings = attention_3d_block(x, dense_size=attention_size, modality='_modality')
model_output = Dense(1, activation='sigmoid',
name='output_dense',
activity_regularizer=regularizers.l2(0.001))(encodings)
net_model = Model(inputs=network_inputs,
outputs=model_output)
net_model.summary()
plot_model(net_model, to_file='PCPA_ATTENTION_FIXTTE.png')
return net_model
class PCPA_TR_VISUAL(ActionPredict):
"""
hierfusion PCPA_MULTI
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
def __init__(self,
num_hidden_units=256,
cell_type='gru',
**kwargs):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
super().__init__(**kwargs)
# Network parameters
self._num_hidden_units = num_hidden_units
self._rnn = self._gru if cell_type == 'gru' else self._lstm
self._rnn_cell = GRUCell if cell_type == 'gru' else LSTMCell
self._3dconv = C3DNet if self._backbone == 'c3d' else I3DNet
self._multi_self_attention = ModelTrunk
def get_data(self, data_type, data_raw, model_opts):
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
self._generator = model_opts.get('generator', False)
data_type_sizes_dict = {}
process = model_opts.get('process', True)
dataset = model_opts['dataset']
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
data_type_sizes_dict['box'] = data['box'].shape[1:]
if 'speed' in data.keys():
data_type_sizes_dict['speed'] = data['speed'].shape[1:]
# if 'context_cnn' in data.keys():
# data_type_sizes_dict['context_cnn'] = data['context_cnn'].shape[1:]
# Store the type and size of each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=None,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != | |
1:
if ftype == TType.STRUCT:
self.er = ExperimentRunEvent()
self.er.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('storeExperimentRunEvent_args')
if self.er is not None:
oprot.writeFieldBegin('er', TType.STRUCT, 1)
self.er.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class storeExperimentRunEvent_result(object):
"""
Attributes:
- success
- svEx
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ExperimentRunEventResponse, ExperimentRunEventResponse.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, svEx=None,):
self.success = success
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ExperimentRunEventResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('storeExperimentRunEvent_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 1)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class storeLinearModel_args(object):
"""
Attributes:
- modelId
- model
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'modelId', None, None, ), # 1
(2, TType.STRUCT, 'model', (LinearModel, LinearModel.thrift_spec), None, ), # 2
)
def __init__(self, modelId=None, model=None,):
self.modelId = modelId
self.model = model
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.modelId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.model = LinearModel()
self.model.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('storeLinearModel_args')
if self.modelId is not None:
oprot.writeFieldBegin('modelId', TType.I32, 1)
oprot.writeI32(self.modelId)
oprot.writeFieldEnd()
if self.model is not None:
oprot.writeFieldBegin('model', TType.STRUCT, 2)
self.model.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class storeLinearModel_result(object):
"""
Attributes:
- success
- rnfEx
- svEx
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, rnfEx=None, svEx=None,):
self.success = success
self.rnfEx = rnfEx
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.rnfEx = ResourceNotFoundException()
self.rnfEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('storeLinearModel_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.rnfEx is not None:
oprot.writeFieldBegin('rnfEx', TType.STRUCT, 1)
self.rnfEx.write(oprot)
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 2)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDataFrameAncestry_args(object):
"""
Attributes:
- dataFrameId
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'dataFrameId', None, None, ), # 1
)
def __init__(self, dataFrameId=None,):
self.dataFrameId = dataFrameId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.dataFrameId = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDataFrameAncestry_args')
if self.dataFrameId is not None:
oprot.writeFieldBegin('dataFrameId', TType.I32, 1)
oprot.writeI32(self.dataFrameId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDataFrameAncestry_result(object):
"""
Attributes:
- success
- rnfEx
- svEx
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (DataFrameAncestry, DataFrameAncestry.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, rnfEx=None, svEx=None,):
self.success = success
self.rnfEx = rnfEx
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = DataFrameAncestry()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.rnfEx = ResourceNotFoundException()
self.rnfEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDataFrameAncestry_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.rnfEx is not None:
oprot.writeFieldBegin('rnfEx', TType.STRUCT, 1)
self.rnfEx.write(oprot)
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 2)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCommonAncestor_args(object):
"""
Attributes:
- dfId1
- dfId2
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'dfId1', None, None, ), # 1
(2, TType.I32, 'dfId2', None, None, ), # 2
)
def __init__(self, dfId1=None, dfId2=None,):
self.dfId1 = dfId1
self.dfId2 = dfId2
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.dfId1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.dfId2 = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCommonAncestor_args')
if self.dfId1 is not None:
oprot.writeFieldBegin('dfId1', TType.I32, 1)
oprot.writeI32(self.dfId1)
oprot.writeFieldEnd()
if self.dfId2 is not None:
oprot.writeFieldBegin('dfId2', TType.I32, 2)
oprot.writeI32(self.dfId2)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCommonAncestor_result(object):
"""
Attributes:
- success
- rnfEx
- svEx
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CommonAncestor, CommonAncestor.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 2
)
| |
<reponame>znes/angus-scenarios
import os
import pandas as pd
from cydets.algorithm import detect_cycles
## import numpy as np
# import plotly.io as pio
import plotly.offline as offline
import matplotlib.pyplot as plt
from matplotlib import colors
import seaborn as sns
from documentation.plotly_plots import (
filling_level_plot,
hourly_plot,
stacked_plot,
energy_plot,
)
color = {
"conventional": "dimgrey",
"cavern-acaes": "crimson",
"redox-battery": "violet",
"lignite-st": "sienna",
"coal-st": "dimgrey",
"uranium-st": "yellow",
"gas-ocgt": "gray",
"gas-ccgt": "lightgray",
"solar-pv": "lightyellow",
"wind-onshore": "skyblue",
"wind-offshore": "steelblue",
"biomass-st": "yellowgreen",
"hydro-ror": "aqua",
"hydro-phs": "purple",
"hydro-reservoir": "magenta",
"hydro-rsv": "magenta",
"hydrogen-storage": "pink",
"lithium-battery": "salmon",
"waste-st": "yellowgreen",
"oil-ocgt": "black",
"other": "red",
"other-res": "orange",
"electricity-load": "slategray",
"import": "mediumpurple",
"storage": "plum",
"mixed-st": "chocolate",
"decentral_heat-gshp": "darkcyan",
"flex-decentral_heat-gshp": "darkcyan",
"fossil": "darkgray",
}
color_dict = {name: colors.to_hex(color) for name, color in color.items()}
path = os.path.join(os.getcwd(), "results")
renewables = [
"hydro-ror",
"hydro-reservoir",
"wind-offshore",
"wind-onshore",
"solar-pv",
"other-res",
"biomass-st",
]
storages = [
"hydrogen-storage",
"redox-battery",
"hydro-phs",
"cavern-acaes",
"lithium-battery",
]
conventionals = [
"lignite-st",
"gas-ccgt",
"mixed-st",
"gas-ocgt",
"coal-st",
"oil-ocgt",
"uranium-st",
"waste-st",
"chp-must-run",
]
bus = "DE"
base_scenarios = ["2030NEPC", "2050REF", "2040DG", "2040GCA", "2030DG"]
exclude = []
interactive_figures = os.path.join("documentation", "figures", "interactive")
if not os.path.exists(interactive_figures):
os.makedirs(interactive_figures)
# emissions -------------------------------------------------------------------
emissions = pd.DataFrame()
for dir in os.listdir(path):
if dir not in exclude:
df = pd.read_csv(
os.path.join(path, dir, "emissions.csv"),
index_col=0,
parse_dates=True,
)
summ = df.sum()
summ.name = dir
emissions = pd.concat([emissions, summ], axis=1, sort=False)
total_emissions = emissions.sum() / 1e6
bus_emissions = (emissions.loc[bus + "-electricity"] / 1e6).round(2)
bus_emissions.name = "CO2"
bus_emissions = bus_emissions.sort_index()
# re shares -------------------------------------------------------------------
electricity_demand = {}
shares_supply = {}
shares_demand = {}
excess = {}
imports = {}
conv_supply = {}
for dir in os.listdir(path):
if dir not in exclude:
df = pd.read_csv(
os.path.join(path, dir, "output", bus + "-electricity.csv"),
index_col=0,
parse_dates=True,
)
sums = df.clip(0).sum().to_dict()
total_supply = sum(
sums.get(bus + "-" + k, 0) for k in renewables + conventionals
)
conv_supply[dir] = (
sum(sums.get(bus + "-" + k, 0) for k in conventionals) / 1e6
)
imports[dir] = df["import"].clip(0).sum() / 1e6
re_supply = sum(sums.get(bus + "-" + k, 0) for k in renewables)
excess[dir] = df[bus + "-electricity-excess"].sum() / 1e6
shares_demand[dir] = (re_supply - excess[dir]) / df[
[
bus + "-flex-decentral_heat-gshp",
bus + "-decentral_heat-gshp",
bus + "-electricity-load",
]
].sum().sum()
shares_supply[dir] = (re_supply - excess[dir]) / total_supply
electricity_demand[dir] = df[
[
bus + "-flex-decentral_heat-gshp",
bus + "-decentral_heat-gshp",
bus + "-electricity-load",
]
].sum()
shares = pd.Series(shares_supply).sort_index()
indicators = pd.concat(
[
pd.Series(shares[base_scenarios], name="RES"),
bus_emissions[base_scenarios],
],
axis=1,
)
indicators = indicators.sort_values(by="CO2", ascending=False)
ax = indicators["CO2"].plot(linestyle="", marker="o", color="skyblue")
ax.set_ylabel("CO2 Emissions in Mio. tons")
ax.set_ylim(0, 210)
plt.xticks(rotation=45)
ax2 = ax.twinx()
indicators["RES"].plot(linestyle="", marker="o", color="darkred", label="RES")
ax2.set_ylim(0, 1.1)
ax2.set_ylabel("RE share")
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(
lines + lines2,
labels + labels2,
loc="lower left",
borderaxespad=0,
frameon=False,
)
ax.grid(linestyle="--", color="lightgray")
plt.savefig("documentation/figures/scenario-indicators.pdf")
# filling levels --------------------------------------------------------------
for dir in os.listdir(path):
if dir in base_scenarios:
if not "flex" in dir:
df = pd.read_csv(
os.path.join(path, dir, "output", "filling_levels.csv"),
index_col=[0],
parse_dates=True,
)
offline.plot(
filling_level_plot(
df,
scenario=dir,
bus=bus,
storages=storages + ["hydro-reservoir"],
color_dict=color_dict,
),
filename=os.path.join(
interactive_figures, dir + "-filling-levels.html"
),
auto_open=False,
)
# hourly plots ---------------------------------------------------------------
for dir in os.listdir(path):
if dir in base_scenarios:
supply_demand = pd.read_csv(
os.path.join(
path, dir, "output", "-".join([bus, "electricity"]) + ".csv"
),
index_col=[0],
parse_dates=True,
)
offline.plot(
hourly_plot(
supply_demand,
scenario=dir,
bus=bus,
color_dict=color_dict,
conventionals=conventionals,
storages=storages,
),
filename=os.path.join(
interactive_figures, dir + "-hourly-dispatch.html"
),
auto_open=False,
)
# stacked plot ---------------------------------------------------------------
for dir in os.listdir(path):
if dir in base_scenarios:
capacities = pd.read_csv(
os.path.join(path, dir, "output", "capacities.csv"), index_col=0
)
capacities.set_index("to", append=True, inplace=True)
capacities = capacities.drop(
index="DE-decentral_heat-bus", level=1
).reset_index(1)
capacities = (
capacities.groupby(["to", "carrier", "tech"]).sum().unstack("to")
)
capacities.index = ["-".join(i) for i in capacities.index]
capacities.columns = capacities.columns.droplevel(0)
offline.plot(
stacked_plot(capacities, scenario=dir, color_dict=color_dict),
filename=os.path.join(
interactive_figures, dir + "-installed-apacities.html"
),
auto_open=False,
)
# stacked capacities by bus --------------------------------------------------
_df = pd.DataFrame()
for dir in os.listdir(path):
if dir in base_scenarios:
capacities = pd.read_csv(
os.path.join(path, dir, "output", "capacities.csv"), index_col=0
)
capacities.set_index("to", append=True, inplace=True)
capacities = capacities.xs(bus + "-electricity", level=1)
capacities.index = [i.replace(bus + "-", "") for i in capacities.index]
value = capacities["value"]
value = value.groupby(
value.index
).sum() # sum investemnt and existing capacity
value.name = dir
_df = pd.concat([_df, value], axis=1, sort=False)
offline.plot(
stacked_plot(_df, scenario=dir, color_dict=color_dict),
filename=os.path.join(
interactive_figures, bus + "-installed-apacities.html"
),
auto_open=False,
)
# matplotlib static figure
_df = _df[base_scenarios]
aux = dict()
for x in shares[base_scenarios].sort_values().index:
aux[x] = _df[x].values
_df = pd.DataFrame(aux, index=_df.index)
conv = [c for c in conventionals if c in _df.index]
_df.loc["fossil"] = _df.loc[conv].sum()
_df = _df.drop(conv)
stor = [c for c in storages if c in _df.index]
_df.loc["storage"] = _df.loc[stor].sum()
_df = _df.drop(stor)
de = _df / 1000
# de.sort_index(axis=1, inplace=True)
ax = (de.T).plot(
kind="bar", stacked=True, color=[color_dict.get(c) for c in de.index]
)
lgd = ax.legend(
loc="lower left",
bbox_to_anchor=(0, -0.5),
shadow=False,
frameon=False,
ncol=3,
)
ax.set_ylabel("Installed capacity in GW")
ax.grid(linestyle="--", lw=0.2)
plt.xticks(rotation=45)
ax2 = ax.twinx()
ax2.set_ylim(0, 1)
plt.plot(
shares[base_scenarios].sort_values().index,
shares[base_scenarios].sort_values(),
"o",
)
ax2.set_ylabel("RE share")
# plt.plot(figsize=(10, 5))
plt.savefig(
"documentation/figures/" + bus + "-installed_capacities.pdf",
bbox_extra_artists=(lgd,),
figsize=(15, 8),
bbox_inches="tight",
)
# scenario data ---------------------------------------------------------------
scenarios = pd.DataFrame()
for dir in os.listdir(path):
if dir in base_scenarios:
df = pd.read_csv(
os.path.join(path, dir, "output", bus + "-electricity.csv"),
index_col=0,
parse_dates=True,
)
cols = [
("-").join([bus, ct])
for ct in [
"electricity-load",
"electricity-excess",
"flex-decentral_heat-gshp",
"decentral_heat-gshp",
]
if ("-").join([bus, ct]) in df.columns
]
df[cols] = df[cols] * -1
pos = df.clip(lower=0).sum()
neg = df.clip(upper=0).sum()
neg = neg.loc[neg < 0]
neg.index = [i + "-cos" for i in neg.index]
df = pd.concat([pos, neg], sort=False)
# if bus + "-decentral_heat-hp" in df.index:
# df.drop(bus + "-decentral_heat-hp", inplace=True)
df.name = dir
scenarios = pd.concat([scenarios, df], axis=1, sort=False)
scenarios.fillna(0, inplace=True)
scenarios
scenarios = (scenarios / 1e6).round(2)
scenarios.index = [
"-".join(i.split("-")[1:]) if not "import" in i else i
for i in scenarios.index
]
storages_cos = [i + "-cos" for i in scenarios.index if i in storages]
storages = [s for s in storages if s in scenarios.index]
scenarios.loc["storage"] = scenarios.loc[storages].sum()
scenarios.loc["storage-cos"] = scenarios.loc[storages_cos].sum()
scenarios.drop(storages, inplace=True)
scenarios.drop(storages_cos, inplace=True)
scenarios.sort_index(axis=1, inplace=True)
# energy plot ----------------------------------------------------------------
scenarios_plot = scenarios[[c for c in scenarios.columns if not "flex" in c]]
scenarios_plot = scenarios[base_scenarios]
scenarios_plot = scenarios_plot[scenarios_plot.columns.sort_values()]
ax = scenarios_plot.T.plot(
kind="bar",
stacked=True,
color=[color_dict.get(i.replace("-cos", "")) for i in scenarios.index],
label=[i if not "-cos" in i else None for i in scenarios.index],
)
ax.legend()
handles, labels = ax.get_legend_handles_labels()
lgd = {k: v for k, v in dict(zip(handles, labels)).items() if "-cos" not in v}
lgd = ax.legend(
lgd.keys(),
lgd.values(),
loc="lower left",
bbox_to_anchor=(-0.2, -0.65),
ncol=4,
borderaxespad=0,
frameon=False,
)
ax.set_ylabel("Energy in TWh")
ax.grid(linestyle="--", lw=0.5)
plt.xticks(rotation=45)
# plt.plot(figsize=(10, 5))
plt.savefig(
"documentation/figures/" + bus + "-aggregated_supply_demand.pdf",
bbox_extra_artists=(lgd,),
bbox_inches="tight",
)
offline.plot(
energy_plot(scenarios_plot, color_dict=color_dict),
image="svg",
image_filename=os.path.join(
interactive_figures, bus + "-aggregated_supply_demand"
),
auto_open=True,
)
# literature scenarios comparison ---------------------------------------------
scenarios = pd.read_csv(
"documentation/data/scenarios-literature.csv", index_col=[0, 1]
)
scenarios.index = scenarios.index.droplevel(1)
demand = scenarios.loc["demand"]
scenarios = scenarios.drop(["demand", "import", "other-res"])
ax = scenarios.T.plot(
kind="bar", grid=True, color=[color_dict.get(c) for c in scenarios.index]
)
lgd = ax.legend(
loc="lower left",
bbox_to_anchor=(0.0, 1.02),
ncol=2,
borderaxespad=0,
frameon=False,
)
ax.set_ylabel("Installed capacity in GW")
plt.xticks(rotation=45)
ax2 = ax.twinx()
ax2 = demand.plot(
linestyle="", marker="o", color=color_dict.get("electricity-load")
)
ax2.set_ylabel("Demand in TWh")
ax2.set_ylim([0, 820])
ax2.set_xlim([-0.5, 5.5])
plt.savefig(
"documentation/figures/scenario-comparison.pdf",
bbox_extra_artists=(lgd,),
bbox_inches="tight",
)
# shadow prices ---------------------------------------------------------------
sorted = {}
unsorted = {}
for dir in os.listdir(path):
data_path = os.path.join(path, dir, "output", "shadow_prices.csv")
sprices = pd.read_csv(data_path, index_col=[0], parse_dates=True)[
bus + "-electricity"
]
sorted[dir] = sprices.sort_values().values
unsorted[dir] = sprices.values
renewables = ["wind-onshore", "wind-offshore", "solar-pv", "hydro-ror"]
timestamps = {}
shadow_prices = {}
rload = {}
for dir in os.listdir(path):
data_path = os.path.join(path, dir, "output", bus + "-electricity.csv")
country_electricity_df = pd.read_csv(
data_path, index_col=[0], parse_dates=True
)
country_electricity_df["rload"] = country_electricity_df[
("-").join([bus, "electricity-load"])
] - country_electricity_df[[("-").join([bus, i]) for i in renewables]].sum(
axis=1
)
rload[dir] = country_electricity_df["rload"].values
timestamps[dir] = country_electricity_df.index
# resiudal load plot ---------------------------------------
rload_df = pd.DataFrame(rload)[base_scenarios] / 1e3
rload_df.sort_index(axis=1, inplace=True)
for c in rload_df[base_scenarios].columns:
rload_df[c] = rload_df[c].sort_values(ascending=False).values
ax = rload_df.plot(cmap="RdYlBu")
ax.grid(linestyle="--", lw="0.5")
ax.set_ylabel("Residualload in GW")
# ax.axhline(y=0, color='black', lw=1)
ax.set_xlim(-50, 8860)
ax.set_xlabel("Hour")
plt.savefig("documentation/figures/rload.pdf", bbox_inches="tight")
tuples = {
(0, 0): ("2050REF-GS-flex0", "2050REF-GS"),
(0, 1): ("2030DG-flex0", "2030DG"),
(0, 2): ("2050REF-flex0", "2050REF"),
(0, 3): ("2040DG-flex0", "2040DG"),
}
fig, axs = plt.subplots(1, 4, sharex=True, sharey=True, figsize=(15, 5))
for k, values in tuples.items():
for v in values:
axs[k[1]].scatter(
rload[v] / 1e3, unsorted[v], s=1, marker="o", label=v
)
# axs.hist(unsorted[v], label=v)
axs[k[1]].set_ylim(-10, 150)
axs[k[1]].grid(True, linestyle="--", color="lightgray")
axs[k[1]].set_ylabel("Shadwo price in \n Euro / MWh")
# ax = sns.jointplot(x="x", y="y", data=df, kind="kde")
# axs[k[1]].spines['top'].set_visible(False)
# axs[k[1]].spines['right'].set_visible(False)
lgd = axs[k[1]].legend(
title="",
loc="upper left",
# bbox_to_anchor=(1, 1),
ncol=2,
borderaxespad=0,
frameon=False,
)
axs[k[1]].set_xlabel("Residual load in GW")
axs[k[0]].set_xlabel("Residual load in GW")
plt.suptitle("Shadow prices vs. Residual load")
# plt.ylim(-10, 200)
plt.savefig(
"documentation/figures/shadow-prices-vs-rload.pdf", bbox_inches="tight"
)
# boxplot for prices --------------------------------------------------------
df = pd.DataFrame(unsorted)[base_scenarios]
df.columns = [i.replace("flex", "") for i in df.columns]
df = df.sort_index(axis=1)
ax = df.boxplot(flierprops=dict(markerfacecolor="r", marker="+"))
ax.set_ylim(-10, 220)
ax.grid(True, linestyle="--", color="lightgray")
ax.set_ylabel("Shadow price in Euro / MWh")
plt.xticks(rotation=45)
# plt.suptitle("Shadow prices within different scenarios")
plt.savefig(
"documentation/figures/boxplot-shadow-prices.pdf", bbox_inches="tight"
)
# comparison of transmission -------------------------------------------------
exchange_df = pd.DataFrame()
bus = "DE"
compare = ("2050REF-flex0", "2050REF")
for dir in os.listdir("results"):
if dir in compare:
tr = pd.read_csv(
os.path.join(path, dir, "output", "transmission.csv"),
sep=",",
index_col=0,
parse_dates=True,
header=[0, 1, 2],
)
imports = tr.loc[:, (slice(None), bus + "-electricity")]
imports.columns = imports.columns.droplevel(["type", "to"])
imports.columns = [
c.replace(bus + "-electricity", "") for c in imports
]
exports = tr.loc[:, (bus + "-electricity")]
exports.columns = exports.columns.droplevel("type")
exports.columns = [
c.replace(bus + "-electricity", "") for c in exports
| |
<filename>evaluate_models.py
import argparse
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from pathlib import Path
from joblib import load
from baselines.dagmm.dagmm import DAGMM
from baselines.devnet.devnet_kdd19 import predict_devnet
from libs.ExperimentWrapper import ExperimentConfig, ExperimentWrapper
from libs.DataHandler import MNIST, CreditCard, NSL_KDD, IDS
from libs.A3 import A3
from libs.Metrics import evaluate, evaluate_multiple
from libs.architecture import RandomNoise, VariationalAutoEncoder
from utils import BASE_PATH, N_ANOMALY_SAMPLES
# Saves the ROC and the metrics evaluated on the test data for all experiments specified in the configs below.
# Expects the model path to be 'BASE_PATH / "mnist_SEED" / "models" / "a3"' respectively.
# On a previous version, we used threshold-dependent metrics (e.g. precision). The threshold was determined on the ROC
# using the validation data. We switched to the AUC and the AP for the recent version of the paper. Please ignore the
# old metrics as they are now determined based on the test data which causes overoptimistic results.
def roc_to_threshold(tpr: np.ndarray, fpr: np.ndarray, thresholds: np.ndarray, max_fpr: float = .05) -> float:
"""
Return the threshold that causes the highest tpr for the given maximum fpr
:param tpr: true positive rate
:param fpr: false positive rate
:param thresholds: threshold at the given tpr/fpr
:param max_fpr: maximum allowed fpr
:return: threshold with highest tpr at the maximum fpr
"""
# Find the index where fpr is below the maximum
try:
idx_fpr_max = np.argmax(fpr[fpr < max_fpr])
except ValueError:
print("No best FPR found!")
idx_fpr_max = 0
# Find maximum tpr for this index
try:
idx_tpr_max = np.argmax(tpr[:idx_fpr_max])
except ValueError:
print("No best TPR found!")
idx_tpr_max = 0
# Find values at this index
tpr_max = tpr[idx_tpr_max]
fpr_max = fpr[idx_tpr_max]
thresh_max = thresholds[idx_tpr_max]
return thresh_max
def roc_to_pandas(fpr: np.ndarray, tpr: np.ndarray, suffix: str, decimals: int = 3) -> pd.DataFrame:
"""
Round the ROC results to save some computation time in TikZ (in fact, the IDS results are too big otherwise)
:param fpr: false positive rate
:param tpr: true positive rate
:param suffix: string appended to the column names
:param decimals: decimals kept
:return: DataFrame with the rounded TPR&FPR values
"""
out_df = pd.concat([
pd.Series(fpr, name=f"fpr_{suffix}"),
pd.Series(tpr, name=f"tpr_{suffix}")
], axis=1)
# Round and delete duplicates (look for duplicates in the FPR)
out_df = out_df.round(decimals=decimals)
out_df = out_df.drop_duplicates(subset=f"fpr_{suffix}", ignore_index=True)
return out_df
if __name__ == '__main__':
# Configuration
this_parse = argparse.ArgumentParser(description="Evaluate A^3 performance on all experiments")
this_parse.add_argument(
"random_seed", type=int, help="Seed to fix randomness"
)
this_parse.add_argument(
"--folder_suffix", default="", type=str, help="Suffix added to the foldernames (e.g., the random seed)"
)
this_parse.add_argument(
"--in_path", default=BASE_PATH / "models", type=Path, help="Base input path for the models"
)
this_parse.add_argument(
"--out_path", default=BASE_PATH / "results", type=Path, help="Base output path for the results"
)
this_parse.add_argument(
"--use_vae", default=False, type=bool, help="Use a VAE as anomaly network instead of noise (experiment 4)"
)
this_args = this_parse.parse_args()
# Config
RANDOM_SEED = this_args.random_seed
MAX_FPR = [0.000001, 0.00001, 0.0001, 0.001, 0.01]
# Data path
OUT_PATH = this_args.out_path
FOLDER_SUFFIX = this_args.folder_suffix
BASE_PATH = this_args.in_path
# Take the right setting
if this_args.use_vae:
# For the VAE, we only consider 0 anomaly samples
MODEL_N_ANOMALIES = [0]
MODEL_CONFIG = [
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(0, 6)), [6, 7], [6, 7]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_vae{FOLDER_SUFFIX}" / "models" / "a3",
"vae_layers": [800, 400, 100, 25],
"thresh": True
},
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(0, 6)), [6, 7],
list(range(6, 10))),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_vae{FOLDER_SUFFIX}" / "models" / "a3",
"vae_layers": [800, 400, 100, 25],
"thresh": False
},
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(4, 10)), [0, 1], [0, 1]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_vae{FOLDER_SUFFIX}" / "models" / "a3",
"vae_layers": [800, 400, 100, 25],
"thresh": True
},
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(4, 10)), [0, 1],
list(range(0, 4))),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_vae{FOLDER_SUFFIX}" / "models" / "a3",
"vae_layers": [800, 400, 100, 25],
"thresh": False
},
]
else:
# We'll reverse the order such that we can automatically determine the threshold on the maximum available anomalies
MODEL_N_ANOMALIES = list(reversed(sorted(N_ANOMALY_SAMPLES)))
this_ids = IDS(random_state=RANDOM_SEED)
MODEL_CONFIG = [
# MNIST experiments
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(0, 6)), [6, 7], [6, 7]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(0, 6)), [6, 7],
list(range(6, 10))),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(4, 10)), [0, 1], [0, 1]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(MNIST(random_state=RANDOM_SEED), list(range(4, 10)), [0, 1],
list(range(0, 4))),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
# CC Experiments
{
"conf": ExperimentConfig(CreditCard(random_state=RANDOM_SEED), [0], [1], [1]),
"prefix": f"CC_{RANDOM_SEED}_",
"path": BASE_PATH / f"creditcard{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
# KDD experiments
{
"conf": ExperimentConfig(NSL_KDD(
random_state=RANDOM_SEED), ["normal"], ["DoS", "Probe"],
["DoS", "Probe"]
),
"prefix": f"NSL_KDD_{RANDOM_SEED}_",
"path": BASE_PATH / f"nsl_kdd{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(NSL_KDD(
random_state=RANDOM_SEED), ["normal"], ["DoS", "Probe"],
["DoS", "Probe", "R2L", "U2R"]
),
"prefix": f"NSL_KDD_{RANDOM_SEED}_",
"path": BASE_PATH / f"nsl_kdd{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
{
"conf": ExperimentConfig(NSL_KDD(
random_state=RANDOM_SEED), ["normal"], ["R2L", "U2R"],
["R2L", "U2R"]
),
"prefix": f"NSL_KDD_{RANDOM_SEED}_",
"path": BASE_PATH / f"nsl_kdd{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(NSL_KDD(
random_state=RANDOM_SEED), ["normal"], ["R2L", "U2R"],
["DoS", "Probe", "R2L", "U2R"]
),
"prefix": f"NSL_KDD_{RANDOM_SEED}_",
"path": BASE_PATH / f"nsl_kdd{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
# EMNIST experiments
{
"conf": ExperimentConfig(MNIST(
random_state=RANDOM_SEED, enrich_mnist_by=[10, 11, 12, 13, 14, 31, 32, 33, 34, 35],
), list(range(0, 10)), [10, 11, 12, 13, 14], [10, 11, 12, 13, 14]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_emnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(MNIST(
random_state=RANDOM_SEED, enrich_mnist_by=[10, 11, 12, 13, 14, 31, 32, 33, 34, 35],
), list(range(0, 10)), [10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 31, 32, 33, 34, 35]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_emnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
{
"conf": ExperimentConfig(MNIST(
random_state=RANDOM_SEED, enrich_mnist_by=[10, 11, 12, 13, 14, 31, 32, 33, 34, 35],
), list(range(0, 10)), [31, 32, 33, 34, 35], [31, 32, 33, 34, 35]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_emnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(MNIST(
random_state=RANDOM_SEED, enrich_mnist_by=[10, 11, 12, 13, 14, 31, 32, 33, 34, 35],
), list(range(0, 10)), [31, 32, 33, 34, 35], [10, 11, 12, 13, 14, 31, 32, 33, 34, 35]),
"prefix": f"MNIST_{RANDOM_SEED}_",
"path": BASE_PATH / f"mnist_emnist{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
# IDS experiments
{
"conf": ExperimentConfig(this_ids, ["Benign"], ["BruteForce", "DoS", "WebAttacks", "Infiltration"],
["BruteForce", "DoS", "WebAttacks", "Infiltration"]
),
"prefix": f"IDS_{RANDOM_SEED}_",
"path": BASE_PATH / f"ids{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(this_ids, ["Benign"], ["BruteForce", "DoS", "WebAttacks", "Infiltration"],
["BruteForce", "DoS", "WebAttacks", "Infiltration", "Bot"]
),
"prefix": f"IDS_{RANDOM_SEED}_",
"path": BASE_PATH / f"ids{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
{
"conf": ExperimentConfig(this_ids, ["Benign"], ["Bot", "Infiltration", "WebAttacks", "DoS"],
["Bot", "Infiltration", "WebAttacks", "DoS"]
),
"prefix": f"IDS_{RANDOM_SEED}_",
"path": BASE_PATH / f"ids{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": True
},
{
"conf": ExperimentConfig(this_ids, ["Benign"], ["Bot", "Infiltration", "WebAttacks", "DoS"],
["Bot", "Infiltration", "WebAttacks", "DoS", "BruteForce"]
),
"prefix": f"IDS_{RANDOM_SEED}_",
"path": BASE_PATH / f"ids{FOLDER_SUFFIX}" / "models" / "a3",
"thresh": False
},
]
# We must determine a threshold on the first entry, otherwise we get some Nones
assert MODEL_CONFIG[0]["thresh"] is True
# As we loop through all data sets, we might as well evaluate them
column_names = ["AUC-ROC", "AUC-PR"]
for cur_fpr in MAX_FPR:
column_names.extend([f"F1_{cur_fpr}", f"Precision_{cur_fpr}", f"Recall_{cur_fpr}"])
all_results = pd.DataFrame(columns=column_names)
for cur_conf in MODEL_CONFIG:
# Due to DAGMM (not TF2 compatible), we have to reset tensorflow after each iteration
tf.compat.v1.reset_default_graph()
tf.compat.v1.enable_v2_behavior()
print(f"Currently evaluating {ExperimentWrapper.parse_name(cur_conf['conf'])}")
# Get output name
out_path = OUT_PATH / f"{cur_conf['prefix']}{ExperimentWrapper.parse_name(cur_conf['conf'])}"
# # Check if exists
# if out_path.with_suffix(".csv").exists():
# print("This has already been evaluated. Please delete the old output first.")
# continue
# Prepare ROC plot and data output
fig = plt.figure()
# We need an x and y column for all n_anomalies as well as the AE
dat = pd.DataFrame()
# Request validation (for the threshold) and test data (for the evaluation)
this_data = cur_conf["conf"].to_data()
# Determine the anomaly network
if "vae_layers" in cur_conf:
anomaly_net = VariationalAutoEncoder(
input_shape=this_data.data_shape,
layer_dims=cur_conf["vae_layers"]
)
else:
anomaly_net = RandomNoise()
# Combine | |
<gh_stars>0
"""
MIT License
Copyright (c) 2020 <NAME> - dominik.kopczynski {at} isas.de
<NAME> - nils.hoffmann {at} isas.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
import os
try:
import pyximport
pyximport.install(setup_args = {"script_args" : ["--force"]}, language_level = 3)
except:
print("Warning: cython module is not installed, parsing performance will be lower since pure python code will be applied.")
from pygoslin.parser.Parser import *
from pygoslin.parser.GoslinParserEventHandler import GoslinParserEventHandler
from pygoslin.parser.GoslinFragmentParserEventHandler import GoslinFragmentParserEventHandler
from pygoslin.parser.LipidMapsParserEventHandler import LipidMapsParserEventHandler
from pygoslin.domain.LipidLevel import LipidLevel
from pygoslin.domain.LipidMolecularSubspecies import LipidMolecularSubspecies
from pygoslin.domain.FattyAcid import FattyAcid
from pygoslin.domain.LipidFaBondType import LipidFaBondType
from random import randint
lipid_parser = LipidParser()
swiss_lipids_parser = SwissLipidsParser()
goslin_parser = GoslinParser()
goslin_fragment_parser = GoslinFragmentParser()
lipid_maps_parser = LipidMapsParser()
hmdb_parser = HmdbParser()
class ParserTest(unittest.TestCase):
PARSER_QUOTE = '\''
def test_lipid_parser(self):
global lipid_parser
lipid_name = "PE 16:1-12:0[M+H]1+"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PE 16:1-12:0[M+H]1+"
lipid_name = "PA 16:1-12:0 - fragment"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PA 16:1-12:0"
assert lipid.get_lipid_fragment_string() == "PA 16:1-12:0 - fragment"
lipid_name = "PE O-16:1p/12:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PE O-16:1p/12:0"
lipid_name = "PAT16 16:1/12:0/14:1/8:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PAT16 16:1/12:0/14:1/8:0"
lipid_name = "SLBPA 16:1/12:0/14:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "SLBPA 16:1/12:0/14:1"
lipid_name = "MLCL 16:1/12:0/14:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "MLCL 16:1/12:0/14:1"
lipid_name = "DLCL 14:1/8:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "DLCL 14:1/8:0"
lipid_name = "PE O-12:1p/10:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PE O-12:1p/10:0"
lipid_name = "PE 12:1/10:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PE 12:1/10:0"
lipid_name = "PIP[3'] 17:0/20:4(5Z,8Z,11Z,14Z)"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PIP[3'] 17:0/20:4(5Z,8Z,11Z,14Z)"
lipid_name = "AC2SGL 12:0-14:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "AC2SGL 12:0-14:1"
lipid_name = "NAPE 16:1(6Z)/12:0/14:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "NAPE 16:1(6Z)/12:0/14:1"
lipid_name = "PE-NMe 12:1(6Z)/10:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PE-NMe 12:1(6Z)/10:0"
lipid_name = "PIMIP 12:0-14:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "PIMIP 12:0-14:1"
lipid_name = "LCDPDAG 24:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "LCDPDAG 24:1"
lipid_name = "LPIMIP 10:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "LPIMIP 10:0"
def test_extended_class(self):
global lipid_parser
lipid_name = "PE O-16:1-12:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "PE-O"
lipid_name = "LPE O-16:2p"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "LPE-p"
lipid_name = "PC O-16:1p/12:0"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "PC-p"
lipid_name = "PC O-16:1p"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "PC-p"
lipid_name = "LPC O-16:1a"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "LPC-O"
lipid_name = "LPC O-16:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "LPC-O"
lipid_name = "LPC 16:1"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "LPC"
lipid_name = "PC 16:1/12:2"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_extended_class() == "PC"
def test_lipid_maps(self):
lipid_maps_parser = LipidMapsParser()
for lipid_name_input, lipid_name_output in [["PA(16:1/12:0)", "PA 16:1/12:0"],
["PC(O-14:0/0:0)", "LPC O-14:0a"],
["SQMG(16:1(11Z)/0:0)", "SQMG 16:1(11Z)"],
["TG(13:0/22:3(10Z,13Z,16Z)/22:5(7Z,10Z,13Z,16Z,19Z))[iso6]", "TAG 13:0/22:3(10Z,13Z,16Z)/22:5(7Z,10Z,13Z,16Z,19Z)"],
["13R-HODE", "13R-HODE"],
["CL(1'-[20:0/20:0],3'-[20:4(5Z,8Z,11Z,14Z)/18:2(9Z,12Z)])", "CL 20:0/20:0/20:4(5Z,8Z,11Z,14Z)/18:2(9Z,12Z)"],
["PA(P-20:0/18:3(6Z,9Z,12Z))", "PA O-20:1p/18:3(6Z,9Z,12Z)"],
["M(IP)2C(t18:0/20:0(2OH))", "M(IP)2C 18:0;3/20:0;1"],
["Cer(d16:2(4E,6E)/22:0(2OH))", "Cer 16:2(4E,6E);2/22:0;1"],
["MG(18:1(11E)/0:0/0:0)[rac]", "MAG 18:1(11E)"],
["PAT18(24:1(2E)(2Me,4Me[S],6Me[S])/25:1(2E)(2Me,4Me[S],6Me[S])/26:1(2E)(2Me,4Me[S],6Me[S])/24:1(2E)(2Me,4Me[S],6Me[S]))", "PAT18 24:1(2E)/25:1(2E)/26:1(2E)/24:1(2E)"],
["(3'-sulfo)Galbeta-Cer(d18:1/20:0)", "SHexCer 18:1;2/20:0"],
["GlcCer(d15:2(4E,6E)/22:0(2OH))", "HexCer 15:2(4E,6E);2/22:0;1"]
]:
lipid = lipid_maps_parser.parse(lipid_name_input)
assert lipid_maps_parser.word_in_grammar
assert lipid != None
assert lipid.get_lipid_string() == lipid_name_output
@unittest.expectedFailure
def test_LP(self):
global lipid_parser
lipid = lipid_parser.parse("LP 19:1p")
def test_hydroxyls(self):
global goslin_parser, swiss_lipids_parser, lipid_maps_parser, hmdb_parser
lipid = swiss_lipids_parser.parse("Cer(d18:1(4E)/24:0-2OH)")
assert lipid != None
assert lipid.get_lipid_string() == "Cer 18:1(4E);2/24:0;1"
assert lipid.get_sum_formula() == "C42H83NO4"
assert abs(lipid.get_mass() - 665.632209) < 1e-3
lipid = swiss_lipids_parser.parse("Cer(d18:1(4E)/24:0(2OH))")
assert lipid != None
assert lipid.get_lipid_string() == "Cer 18:1(4E);2/24:0;1"
assert lipid.get_sum_formula() == "C42H83NO4"
assert abs(lipid.get_mass() - 665.632209) < 1e-3
lipid = lipid_maps_parser.parse("Cer(d18:1(4E)/24:0(2OH))")
assert lipid != None
assert lipid.get_lipid_string() == "Cer 18:1(4E);2/24:0;1"
assert lipid.get_sum_formula() == "C42H83NO4"
assert abs(lipid.get_mass() - 665.632209) < 1e-3
lipid = goslin_parser.parse("Cer 18:1(4E);2/24:0;1")
assert lipid != None
assert lipid.get_lipid_string() == "Cer 18:1(4E);2/24:0;1"
assert lipid.get_sum_formula() == "C42H83NO4"
assert abs(lipid.get_mass() - 665.632209) < 1e-3
lipid = hmdb_parser.parse("SM(d18:1/16:1(9Z)(OH))")
assert lipid != None
assert lipid.get_lipid_string() == "SM 18:1;2/16:1(9Z);1"
assert lipid.get_sum_formula() == "C39H77N2O7P"
assert abs(lipid.get_mass() - 716.546841) < 1e-3
def test_lyso(self):
global lipid_parser
lipid_name = "LPA O-16:1a"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "LPA O-16:1a"
lipid_name = "LPC O-16:1a"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "LPC O-16:1a"
lipid_name = "LPE O-16:1p"
lipid = lipid_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "LPE O-16:1p"
@unittest.expectedFailure
def test_lpe_fail(self):
global lipid_parser
lipid_name = "LPE O-16:1p/12:0"
lipid = lipid_parser.parse(lipid_name)
@unittest.expectedFailure
def test_lipid_parser_fail(self):
global lipid_parser
lipid_name = "fail"
lipid = lipid_parser.parse(lipid_name)
def test_species_level(self):
global goslin_parser
lipid_name = "PG 22:1(5Z)/12:0"
lipid = goslin_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string(LipidLevel.ISOMERIC_SUBSPECIES) == "PG 22:1(5Z)/12:0"
assert lipid.get_lipid_string(LipidLevel.STRUCTURAL_SUBSPECIES) == "PG 22:1(5Z)/12:0"
assert lipid.get_lipid_string(LipidLevel.MOLECULAR_SUBSPECIES) == "PG 22:1(5Z)-12:0"
assert lipid.get_lipid_string(LipidLevel.SPECIES) == "PG 34:1"
assert lipid.get_lipid_string(LipidLevel.CLASS) == "PG"
assert lipid.get_lipid_string(LipidLevel.CATEGORY) == "GP"
lipid_name = "Cer 28:1;2"
lipid = goslin_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "Cer 28:1;2"
lipid_name = "DAG 38:1"
lipid = goslin_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string() == "DAG 38:1"
def test_info_level(self):
global swiss_lipids_parser
global lipid_maps_parser
lipid_name = "PG(22:1(5Z)/12:0)"
lipid = swiss_lipids_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string(LipidLevel.ISOMERIC_SUBSPECIES) == "PG 22:1(5Z)/12:0"
assert lipid.get_lipid_string(LipidLevel.STRUCTURAL_SUBSPECIES) == "PG 22:1(5Z)/12:0"
assert lipid.get_lipid_string(LipidLevel.MOLECULAR_SUBSPECIES) == "PG 22:1(5Z)-12:0"
assert lipid.get_lipid_string(LipidLevel.SPECIES) == "PG 34:1"
assert lipid.get_lipid_string(LipidLevel.CLASS) == "PG"
assert lipid.get_lipid_string(LipidLevel.CATEGORY) == "GP"
lipid_name = "PG(22:1(5Z)/12:0)"
lipid = swiss_lipids_parser.parse(lipid_name)
assert lipid != None
assert lipid.lipid.info.level == LipidLevel.ISOMERIC_SUBSPECIES
assert lipid.get_lipid_string() == "PG 22:1(5Z)/12:0"
lipid_name = "PG(22:1/12:0)"
lipid = swiss_lipids_parser.parse(lipid_name)
assert lipid
assert lipid.lipid.info.level == LipidLevel.STRUCTURAL_SUBSPECIES
assert lipid.get_lipid_string() == "PG 22:1/12:0"
lipid_name = "PG(22:1_12:0)"
lipid = swiss_lipids_parser.parse(lipid_name)
assert lipid
assert lipid.lipid.info.level == LipidLevel.MOLECULAR_SUBSPECIES
assert lipid.get_lipid_string() == "PG 22:1-12:0"
lipid_name = "LPG(O-22:1)"
lipid = swiss_lipids_parser.parse(lipid_name)
assert lipid
assert lipid.lipid.info.level == LipidLevel.SPECIES
assert lipid.get_lipid_string() == "LPG O-22:1a"
lipid_name = "PG(22:1(5Z)/12:0)"
lipid = lipid_maps_parser.parse(lipid_name)
assert lipid != None
assert lipid.get_lipid_string(LipidLevel.ISOMERIC_SUBSPECIES) == "PG 22:1(5Z)/12:0"
assert lipid.get_lipid_string(LipidLevel.STRUCTURAL_SUBSPECIES) == "PG 22:1(5Z)/12:0"
assert lipid.get_lipid_string(LipidLevel.MOLECULAR_SUBSPECIES) == "PG 22:1(5Z)-12:0"
assert lipid.get_lipid_string(LipidLevel.SPECIES) == "PG 34:1"
assert lipid.get_lipid_string(LipidLevel.CLASS) == "PG"
assert lipid.get_lipid_string(LipidLevel.CATEGORY) == "GP"
def test_mediators(self):
global goslin_parser
for lipid_name in ["10-HDoHE","11-HDoHE","11-HETE","11,12-DHET","11(12)-EET", "12-HEPE","12-HETE","12-HHTrE","12-OxoETE","12(13)-EpOME","13-HODE","13-HOTrE","14,15-DHET","14(15)-EET","14(15)-EpETE","15-HEPE","15-HETE","15d-PGJ2","16-HDoHE","16-HETE","18-HEPE","5-HEPE","5-HETE","5-HpETE","5-OxoETE","5,12-DiHETE","5,6-DiHETE","5,6,15-LXA4","5(6)-EET","8-HDoHE","8-HETE","8,9-DHET","8(9)-EET","9-HEPE","9-HETE","9-HODE","9-HOTrE","9(10)-EpOME","AA","alpha-LA","DHA","EPA","Linoleic acid","LTB4","LTC4","LTD4","Maresin 1","Palmitic acid","PGB2","PGD2","PGE2","PGF2alpha","PGI2","Resolvin D1","Resolvin D2","Resolvin D3","Resolvin D5","tetranor-12-HETE","TXB1","TXB2","TXB3"]:
lipid = goslin_parser.parse(lipid_name)
assert lipid != None
n = lipid.lipid.get_lipid_string()
assert lipid.get_lipid_string() == lipid_name
@unittest.expectedFailure
def test_lipid_fragment_fail(self):
goslin_parser = Parser(GoslinParserEventHandler(), "pygoslin/data/goslin/Goslin.g4", ParserTest.PARSER_QUOTE)
lipid_name = "PE 16:1-12:0 - -(H20)"
lipid = goslin_parser.parse(lipid_name)
assert lipid != None
def test_lipid_fragment_success(self):
goslin_fragment_parser = Parser(GoslinFragmentParserEventHandler(), "pygoslin/data/goslin/GoslinFragments.g4", ParserTest.PARSER_QUOTE)
lipid_name = "PE 16:1-12:0 - -(H20)"
lipid = goslin_fragment_parser.parse(lipid_name)
assert lipid != None
assert lipid.fragment != None
assert lipid.fragment.name | |
b"A"),
(b"16", 16, 132255, 0x0002049f, 0x08b80000, 0x8001024f88b80000, b"8001024f88b80000", b"C", b"T"),
(b"16", 16, 132256, 0x000204a0, 0x08c00000, 0x8001025008c00000, b"8001025008c00000", b"G", b"A"),
(b"16", 16, 132328, 0x000204e8, 0x08b80000, 0x8001027408b80000, b"8001027408b80000", b"C", b"T"),
(b"16", 16, 132378, 0x0002051a, 0x08900000, 0x8001028d08900000, b"8001028d08900000", b"A", b"G"),
(b"16", 16, 132402, 0x00020532, 0x08d80000, 0x8001029908d80000, b"8001029908d80000", b"G", b"T"),
(b"16", 16, 132430, 0x0002054e, 0x08e80000, 0x800102a708e80000, b"800102a708e80000", b"T", b"C"),
(b"16", 16, 132463, 0x0002056f, 0x08b80000, 0x800102b788b80000, b"800102b788b80000", b"C", b"T"),
(b"16", 16, 132464, 0x00020570, 0x08c00000, 0x800102b808c00000, b"800102b808c00000", b"G", b"A"),
(b"16", 16, 132504, 0x00020598, 0x08d80000, 0x800102cc08d80000, b"800102cc08d80000", b"G", b"T"),
(b"16", 16, 132509, 0x0002059d, 0x08c00000, 0x800102ce88c00000, b"800102ce88c00000", b"G", b"A"),
(b"16", 16, 132527, 0x000205af, 0x08b80000, 0x800102d788b80000, b"800102d788b80000", b"C", b"T"),
(b"16", 16, 132535, 0x000205b7, 0x08c00000, 0x800102db88c00000, b"800102db88c00000", b"G", b"A"),
(b"16", 16, 132559, 0x000205cf, 0x08d80000, 0x800102e788d80000, b"800102e788d80000", b"G", b"T"),
(b"16", 16, 132595, 0x000205f3, 0x08b80000, 0x800102f988b80000, b"800102f988b80000", b"C", b"T"),
(b"16", 16, 132627, 0x00020613, 0x08c00000, 0x8001030988c00000, b"8001030988c00000", b"G", b"A"),
(b"16", 16, 132648, 0x00020628, 0x08c00000, 0x8001031408c00000, b"8001031408c00000", b"G", b"A"),
(b"16", 16, 132654, 0x0002062e, 0x08b80000, 0x8001031708b80000, b"8001031708b80000", b"C", b"T"),
(b"17", 17, 66884, 0x00010544, 0x08c00000, 0x880082a208c00000, b"880082a208c00000", b"G", b"A"),
(b"17", 17, 66886, 0x00010546, 0x08900000, 0x880082a308900000, b"880082a308900000", b"A", b"G"),
(b"17", 17, 66887, 0x00010547, 0x08b80000, 0x880082a388b80000, b"880082a388b80000", b"C", b"T"),
(b"17", 17, 66901, 0x00010555, 0x08c00000, 0x880082aa88c00000, b"880082aa88c00000", b"G", b"A"),
(b"17", 17, 66928, 0x00010570, 0x08880000, 0x880082b808880000, b"880082b808880000", b"A", b"C"),
(b"17", 17, 66936, 0x00010578, 0x08e00000, 0x880082bc08e00000, b"880082bc08e00000", b"T", b"A"),
(b"17", 17, 66939, 0x0001057b, 0x08c80000, 0x880082bd88c80000, b"880082bd88c80000", b"G", b"C"),
(b"17", 17, 66970, 0x0001059a, 0x08b80000, 0x880082cd08b80000, b"880082cd08b80000", b"C", b"T"),
(b"17", 17, 66971, 0x0001059b, 0x08f00000, 0x880082cd88f00000, b"880082cd88f00000", b"T", b"G"),
(b"17", 17, 66988, 0x000105ac, 0x08900000, 0x880082d608900000, b"880082d608900000", b"A", b"G"),
(b"17", 17, 66995, 0x000105b3, 0x10c40000, 0x880082d990c40000, b"880082d990c40000", b"GA", b"G"),
(b"17", 17, 66997, 0x000105b5, 0x08b80000, 0x880082da88b80000, b"880082da88b80000", b"C", b"T"),
(b"17", 17, 67010, 0x000105c2, 0x08e80000, 0x880082e108e80000, b"880082e108e80000", b"T", b"C"),
(b"17", 17, 67028, 0x000105d4, 0x20de4000, 0x880082ea20de4000, b"880082ea20de4000", b"GTTA", b"G"),
(b"17", 17, 67036, 0x000105dc, 0x08c00000, 0x880082ee08c00000, b"880082ee08c00000", b"G", b"A"),
(b"17", 17, 67041, 0x000105e1, 0x08b00000, 0x880082f088b00000, b"880082f088b00000", b"C", b"G"),
(b"17", 17, 67065, 0x000105f9, 0x08c00000, 0x880082fc88c00000, b"880082fc88c00000", b"G", b"A"),
(b"17", 17, 67071, 0x000105ff, 0x08c00000, 0x880082ff88c00000, b"880082ff88c00000", b"G", b"A"),
(b"17", 17, 67119, 0x0001062f, 0x08e80000, 0x8800831788e80000, b"8800831788e80000", b"T", b"C"),
(b"17", 17, 67138, 0x00010642, 0x5df7000b, 0x880083215df7000b, b"880083215df7000b", b"CATTAAAATTGGCCTCACTGAAATCAGGACCCCCAAGGCATTTCGTTCCCATCTCGGGCAGTTATTACAGAGCCCTTCTTCTGTGCCAAAATTGGCCTCGCTGAAATCAGGACCCCCAAGGCATTTCATTCCCATCTGGGGCAGTTATTACAGAGCCCTTCTTCTGCGC", b"CAT"),
(b"17", 17, 67139, 0x00010643, 0x08900000, 0x8800832188900000, b"8800832188900000", b"A", b"G"),
(b"17", 17, 67140, 0x00010644, 0x08e80000, 0x8800832208e80000, b"8800832208e80000", b"T", b"C"),
(b"17", 17, 67158, 0x00010656, 0x08900000, 0x8800832b08900000, b"8800832b08900000", b"A", b"G"),
(b"17", 17, 67165, 0x0001065d, 0x09500000, 0x8800832e89500000, b"8800832e89500000", b"G", b"GA"),
(b"17", 17, 67165, 0x0001065d, 0x10c40000, 0x8800832e90c40000, b"8800832e90c40000", b"GA", b"G"),
(b"17", 17, 67181, 0x0001066d, 0x08b80000, 0x8800833688b80000, b"8800833688b80000", b"C", b"T"),
(b"17", 17, 67192, 0x00010678, 0x08b00000, 0x8800833c08b00000, b"8800833c08b00000", b"C", b"G"),
(b"18", 18, 85036, 0x00014c2c, 0x08b80000, 0x9000a61608b80000, b"9000a61608b80000", b"C", b"T"),
(b"18", 18, 85047, 0x00014c37, 0x08b80000, 0x9000a61b88b80000, b"9000a61b88b80000", b"C", b"T"),
(b"18", 18, 85068, 0x00014c4c, 0x08c00000, 0x9000a62608c00000, b"9000a62608c00000", b"G", b"A"),
(b"18", 18, 85092, 0x00014c64, 0x08e00000, 0x9000a63208e00000, b"9000a63208e00000", b"T", b"A"),
(b"18", 18, 85094, 0x00014c66, 0x08b80000, 0x9000a63308b80000, b"9000a63308b80000", b"C", b"T"),
(b"18", 18, 85112, 0x00014c78, 0x08b00000, 0x9000a63c08b00000, b"9000a63c08b00000", b"C", b"G"),
(b"18", 18, 85127, 0x00014c87, 0x08d80000, 0x9000a64388d80000, b"9000a64388d80000", b"G", b"T"),
(b"18", 18, 85135, 0x00014c8f, 0x08c00000, 0x9000a64788c00000, b"9000a64788c00000", b"G", b"A"),
(b"18", 18, 85144, 0x00014c98, 0x08b80000, 0x9000a64c08b80000, b"9000a64c08b80000", b"C", b"T"),
(b"18", 18, 85145, 0x00014c99, 0x08900000, 0x9000a64c88900000, b"9000a64c88900000", b"A", b"G"),
(b"18", 18, 85148, 0x00014c9c, 0x08b80000, 0x9000a64e08b80000, b"9000a64e08b80000", b"C", b"T"),
(b"18", 18, 85156, 0x00014ca4, 0x08b80000, 0x9000a65208b80000, b"9000a65208b80000", b"C", b"T"),
(b"18", 18, 85158, 0x00014ca6, 0x08b80000, 0x9000a65308b80000, b"9000a65308b80000", b"C", b"T"),
(b"18", 18, 85176, 0x00014cb8, 0x188e0000, 0x9000a65c188e0000, b"9000a65c188e0000", b"ACT", b"A"),
(b"18", 18, 85187, 0x00014cc3, 0x08b80000, 0x9000a66188b80000, b"9000a66188b80000", b"C", b"T"),
(b"18", 18, 85199, 0x00014ccf, 0x20a42000, 0x9000a667a0a42000, b"9000a667a0a42000", b"CAGA", b"C"),
(b"18", 18, 85206, 0x00014cd6, 0x10880000, 0x9000a66b10880000, b"9000a66b10880000", b"AC", b"A"),
(b"18", 18, 85208, 0x00014cd8, 0x08900000, 0x9000a66c08900000, b"9000a66c08900000", b"A", b"G"),
(b"18", 18, 85229, 0x00014ced, 0x08c00000, 0x9000a67688c00000, b"9000a67688c00000", b"G", b"A"),
(b"18", 18, 85234, 0x00014cf2, 0x08b80000, 0x9000a67908b80000, b"9000a67908b80000", b"C", b"T"),
(b"18", 18, 85236, 0x00014cf4, 0x08c80000, 0x9000a67a08c80000, b"9000a67a08c80000", b"G", b"C"),
(b"18", 18, 85238, 0x00014cf6, 0x0ad49800, 0x9000a67b0ad49800, b"9000a67b0ad49800", b"G", b"GGCAT"),
(b"18", 18, 85238, 0x00014cf6, 0x08c00000, 0x9000a67b08c00000, b"9000a67b08c00000", b"G", b"A"),
(b"18", 18, 85248, 0x00014d00, 0x08e80000, 0x9000a68008e80000, b"9000a68008e80000", b"T", b"C"),
(b"18", 18, 85272, 0x00014d18, 0x08a00000, 0x9000a68c08a00000, b"9000a68c08a00000", b"C", b"A"),
(b"18", 18, 85277, 0x00014d1d, 0x08b80000, 0x9000a68e88b80000, b"9000a68e88b80000", b"C", b"T"),
(b"18", 18, 85278, 0x00014d1e, 0x08c00000, 0x9000a68f08c00000, b"9000a68f08c00000", b"G", b"A"),
(b"18", 18, 85323, 0x00014d4b, 0x08b80000, 0x9000a6a588b80000, b"9000a6a588b80000", b"C", b"T"),
(b"19", 19, 281247, 0x00044a9f, 0x08c00000, 0x9802254f88c00000, b"9802254f88c00000", b"G", b"A"),
(b"19", 19, 281254, 0x00044aa6, 0x08c00000, 0x9802255308c00000, b"9802255308c00000", b"G", b"A"),
(b"19", 19, 281296, 0x00044ad0, 0x08d80000, 0x9802256808d80000, b"9802256808d80000", b"G", b"T"),
(b"19", 19, 281298, 0x00044ad2, 0x08c00000, 0x9802256908c00000, b"9802256908c00000", b"G", b"A"),
(b"19", 19, 281316, 0x00044ae4, 0x08c00000, 0x9802257208c00000, b"9802257208c00000", b"G", b"A"),
(b"19", 19, 281320, 0x00044ae8, 0x08e80000, 0x9802257408e80000, b"9802257408e80000", b"T", b"C"),
(b"19", 19, 281324, 0x00044aec, 0x08c80000, 0x9802257608c80000, b"9802257608c80000", b"G", b"C"),
(b"19", 19, 281336, 0x00044af8, 0x08c00000, 0x9802257c08c00000, b"9802257c08c00000", b"G", b"A"),
(b"19", 19, 281342, 0x00044afe, 0x08a00000, 0x9802257f08a00000, b"9802257f08a00000", b"C", b"A"),
(b"19", 19, 281354, 0x00044b0a, 0x08b80000, 0x9802258508b80000, b"9802258508b80000", b"C", b"T"),
(b"19", 19, 281356, 0x00044b0c, 0x08b80000, 0x9802258608b80000, b"9802258608b80000", b"C", b"T"),
(b"19", 19, 281359, 0x00044b0f, 0x08b80000, 0x9802258788b80000, b"9802258788b80000", b"C", b"T"),
(b"19", 19, 281362, 0x00044b12, 0x08b00000, 0x9802258908b00000, b"9802258908b00000", b"C", b"G"),
(b"19", 19, 281367, 0x00044b17, 0x08f00000, 0x9802258b88f00000, b"9802258b88f00000", b"T", b"G"),
(b"19", 19, 281371, 0x00044b1b, 0x08e80000, 0x9802258d88e80000, b"9802258d88e80000", b"T", b"C"),
(b"19", 19, 281375, 0x00044b1f, 0x08b80000, 0x9802258f88b80000, b"9802258f88b80000", b"C", b"T"),
(b"19", 19, 281382, 0x00044b26, 0x08b00000, 0x9802259308b00000, b"9802259308b00000", b"C", b"G"),
(b"19", 19, 281383, 0x00044b27, 0x08c00000, 0x9802259388c00000, b"9802259388c00000", b"G", b"A"),
(b"19", 19, 281394, 0x00044b32, 0x08c00000, 0x9802259908c00000, b"9802259908c00000", b"G", b"A"),
(b"19", 19, 281402, 0x00044b3a, 0x08b80000, 0x9802259d08b80000, b"9802259d08b80000", b"C", b"T"),
(b"19", 19, 281403, 0x00044b3b, 0x08c00000, 0x9802259d88c00000, b"9802259d88c00000", b"G", b"A"),
(b"19", 19, 281404, 0x00044b3c, 0x08c00000, 0x9802259e08c00000, b"9802259e08c00000", b"G", b"A"),
(b"19", 19, 281412, 0x00044b44, 0x08e80000, 0x980225a208e80000, b"980225a208e80000", b"T", b"C"),
(b"19", 19, 281414, 0x00044b46, 0x08c80000, 0x980225a308c80000, b"980225a308c80000", b"G", b"C"),
(b"19", 19, 281421, 0x00044b4d, 0x08e80000, 0x980225a688e80000, b"980225a688e80000", b"T", b"C"),
(b"19", 19, 281423, 0x00044b4f, 0x08d80000, 0x980225a788d80000, b"980225a788d80000", b"G", b"T"),
(b"19", 19, 281429, 0x00044b55, 0x08b80000, 0x980225aa88b80000, b"980225aa88b80000", b"C", b"T"),
(b"19", 19, 281432, 0x00044b58, 0x08c00000, 0x980225ac08c00000, b"980225ac08c00000", b"G", b"A"),
(b"19", 19, 281434, 0x00044b5a, 0x08b80000, 0x980225ad08b80000, b"980225ad08b80000", b"C", b"T"),
(b"20", 20, 152849, 0x00025511, 0x08b80000, 0xa0012a8888b80000, b"a0012a8888b80000", b"C", b"T"),
(b"20", 20, 152863, 0x0002551f, 0x08e00000, 0xa0012a8f88e00000, b"a0012a8f88e00000", b"T", b"A"),
(b"20", 20, 152869, 0x00025525, 0x08c00000, 0xa0012a9288c00000, b"a0012a9288c00000", b"G", b"A"),
(b"20", 20, 152883, 0x00025533, 0x08c00000, 0xa0012a9988c00000, b"a0012a9988c00000", b"G", b"A"),
(b"20", 20, 152898, 0x00025542, 0x08d80000, 0xa0012aa108d80000, b"a0012aa108d80000", b"G", b"T"),
(b"20", 20, 152907, 0x0002554b, 0x08c00000, 0xa0012aa588c00000, b"a0012aa588c00000", b"G", b"A"),
(b"20", 20, 152914, 0x00025552, 0x08b80000, 0xa0012aa908b80000, b"a0012aa908b80000", b"C", b"T"),
(b"20", 20, 152946, 0x00025572, 0x08c00000, 0xa0012ab908c00000, b"a0012ab908c00000", b"G", b"A"),
(b"20", 20, 152971, 0x0002558b, 0x08e80000, 0xa0012ac588e80000, b"a0012ac588e80000", b"T", b"C"),
(b"20", 20, 152983, 0x00025597, 0x08b80000, 0xa0012acb88b80000, b"a0012acb88b80000", b"C", b"T"),
(b"20", 20, 152991, 0x0002559f, 0x08b00000, 0xa0012acf88b00000, b"a0012acf88b00000", b"C", b"G"),
(b"20", 20, 152996, 0x000255a4, 0x08c00000, 0xa0012ad208c00000, b"a0012ad208c00000", b"G", b"A"),
(b"20", 20, 153004, 0x000255ac, 0x08c00000, 0xa0012ad608c00000, b"a0012ad608c00000", b"G", b"A"),
(b"20", 20, 153005, 0x000255ad, 0x08b80000, 0xa0012ad688b80000, b"a0012ad688b80000", b"C", b"T"),
(b"20", 20, 153006, 0x000255ae, 0x08c00000, 0xa0012ad708c00000, b"a0012ad708c00000", b"G", b"A"),
(b"20", 20, 153022, 0x000255be, 0x08c00000, 0xa0012adf08c00000, b"a0012adf08c00000", b"G", b"A"),
(b"20", 20, 153056, 0x000255e0, 0x08e80000, 0xa0012af008e80000, b"a0012af008e80000", b"T", b"C"),
(b"20", 20, 153098, 0x0002560a, 0x08b80000, 0xa0012b0508b80000, b"a0012b0508b80000", b"C", b"T"),
(b"20", 20, 153138, 0x00025632, 0x08b80000, 0xa0012b1908b80000, b"a0012b1908b80000", b"C", b"T"),
(b"20", 20, 153139, 0x00025633, 0x08f00000, 0xa0012b1988f00000, b"a0012b1988f00000", b"T", b"G"),
(b"20", 20, 153150, 0x0002563e, 0x08900000, 0xa0012b1f08900000, b"a0012b1f08900000", b"A", b"G"),
(b"20", 20, 153153, 0x00025641, 0x08900000, 0xa0012b2088900000, b"a0012b2088900000", b"A", b"G"),
(b"20", 20, 153168, 0x00025650, 0x08900000, 0xa0012b2808900000, b"a0012b2808900000", b"A", b"G"),
(b"20", 20, 153266, 0x000256b2, 0x08e80000, 0xa0012b5908e80000, b"a0012b5908e80000", b"T", b"C"),
(b"20", 20, 153284, 0x000256c4, 0x08b80000, 0xa0012b6208b80000, b"a0012b6208b80000", b"C", b"T"),
(b"20", 20, 153285, 0x000256c5, 0x08c00000, 0xa0012b6288c00000, b"a0012b6288c00000", b"G", b"A"),
(b"20", 20, 153291, 0x000256cb, 0x289fa000, 0xa0012b65a89fa000, b"a0012b65a89fa000", b"ATTTC", b"A"),
(b"20", 20, 153295, 0x000256cf, 0x30bfda00, 0xa0012b67b0bfda00, b"a0012b67b0bfda00", b"CTTTGT", b"C"),
(b"20", 20, 153297, 0x000256d1, 0x08e80000, 0xa0012b6888e80000, b"a0012b6888e80000", b"T", b"C"),
(b"20", 20, 153297, 0x000256d1, 0x097a0000, 0xa0012b68897a0000, b"a0012b68897a0000", b"T", b"TC"),
(b"21", 21, 10800149, 0x00a4cc15, 0x08c00000, 0xa852660a88c00000, b"a852660a88c00000", b"G", b"A"),
(b"21", 21, 10800159, 0x00a4cc1f, 0x08c00000, 0xa852660f88c00000, b"a852660f88c00000", b"G", b"A"),
(b"21", 21, 10800162, 0x00a4cc22, 0x08980000, 0xa852661108980000, b"a852661108980000", b"A", b"T"),
(b"21", 21, 10800175, 0x00a4cc2f, 0x08a00000, 0xa852661788a00000, b"a852661788a00000", b"C", b"A"),
(b"21", 21, 10800177, 0x00a4cc31, 0x08900000, 0xa852661888900000, b"a852661888900000", b"A", b"G"),
(b"21", 21, 10800179, 0x00a4cc33, 0x08c00000, 0xa852661988c00000, b"a852661988c00000", b"G", b"A"),
(b"21", 21, 10800200, 0x00a4cc48, 0x08c00000, 0xa852662408c00000, b"a852662408c00000", b"G", b"A"),
(b"21", 21, 10800209, 0x00a4cc51, 0x08c00000, 0xa852662888c00000, b"a852662888c00000", b"G", b"A"),
(b"21", 21, 10800214, 0x00a4cc56, 0x08b80000, 0xa852662b08b80000, b"a852662b08b80000", b"C", b"T"),
(b"21", 21, 10800225, 0x00a4cc61, 0x08c00000, 0xa852663088c00000, b"a852663088c00000", b"G", b"A"),
(b"21", 21, 10800242, 0x00a4cc72, 0x08c80000, 0xa852663908c80000, b"a852663908c80000", b"G", b"C"),
(b"21", | |
0.1840, 0.4687, 0.0446, 0.5272, 0.2382, -0.1124],
[-0.1656, -0.0105, 0.1899, 0.0590, 0.2222, 0.0170, -0.2002],
[-0.1969, -0.1753, -0.0796, -0.1840, -0.0689, -0.1621, -0.2494]],
[[-0.1602, -0.1039, -0.0176, -0.0856, 0.0039, -0.0726, -0.1536],
[-0.0923, 0.0348, 0.2366, 0.1703, 0.2698, 0.0873, -0.0800],
[-0.0294, 0.1735, 0.4822, 0.2155, 0.5432, 0.2428, -0.0207],
[-0.1548, 0.0148, 0.0938, -0.5813, 0.1622, 0.0895, -0.1405],
[-0.0420, 0.1480, 0.3519, -0.0164, 0.4030, 0.2037, -0.0245],
[-0.0833, 0.0013, 0.1317, 0.0175, 0.1614, 0.0347, -0.0946],
[-0.0872, -0.0965, -0.0377, -0.1285, -0.0278, -0.0795, -0.1208]],
[[-0.0992, -0.0845, -0.0436, -0.1067, -0.0231, -0.0653, -0.1092],
[-0.0554, -0.0030, 0.1150, 0.0344, 0.1440, 0.0310, -0.0667],
[-0.0189, 0.0792, 0.2779, 0.0239, 0.3277, 0.1258, -0.0357],
[-0.1332, -0.0789, -0.0770, -0.5205, -0.0130, -0.0233, -0.1462],
[-0.0319, 0.0514, 0.1537, -0.1904, 0.2039, 0.0922, -0.0436],
[-0.0469, -0.0360, 0.0212, -0.0998, 0.0549, -0.0134, -0.0840],
[-0.0216, -0.0706, -0.0475, -0.1278, -0.0355, -0.0638, -0.0796]]],
[[[-0.0101, -0.0666, -0.0535, -0.1586, -0.1162, -0.0506, 0.0681],
[-0.0261, -0.1030, -0.0093, -0.0851, -0.1213, -0.1032, 0.0767],
[-0.0571, -0.1893, 0.0737, 0.0151, -0.0537, -0.1927, 0.0625],
[-0.1086, -0.3115, -0.2670, 0.0720, -0.4147, -0.3294, 0.0200],
[-0.0146, -0.1566, -0.1850, -0.4709, -0.2806, -0.1781, 0.0895],
[-0.0134, -0.0923, -0.0948, -0.2101, -0.1173, -0.0734, 0.0592],
[0.0716, -0.0123, 0.0375, 0.0021, 0.0691, 0.0320, 0.1131]],
[[0.0499, 0.0655, 0.1020, 0.0020, 0.0364, 0.0478, 0.0807],
[0.1124, 0.1200, 0.2327, 0.1548, 0.1173, 0.0773, 0.1549],
[0.1250, 0.0855, 0.3481, 0.2514, 0.1976, 0.0147, 0.1544],
[0.0666, -0.0505, -0.0267, 0.2032, -0.1979, -0.1322, 0.1035],
[0.1716, 0.1224, 0.1037, -0.2229, -0.0135, 0.0513, 0.2036],
[0.1333, 0.1516, 0.1687, 0.0253, 0.1166, 0.1217, 0.1455],
[0.1263, 0.1361, 0.2108, 0.1505, 0.2050, 0.1324, 0.1161]],
[[0.0896, 0.0162, -0.0085, -0.0998, -0.0468, 0.0299, 0.1535],
[0.0868, -0.0380, -0.0203, -0.1008, -0.1079, -0.0548, 0.1513],
[0.0422, -0.1618, -0.0108, -0.0951, -0.1250, -0.1986, 0.0991],
[-0.0319, -0.3257, -0.4103, -0.1099, -0.5384, -0.3648, 0.0378],
[0.0707, -0.1412, -0.2830, -0.6167, -0.3756, -0.1858, 0.1257],
[0.0932, -0.0135, -0.0888, -0.2427, -0.1235, -0.0279, 0.1184],
[0.1653, 0.0948, 0.1230, 0.0737, 0.1317, 0.1026, 0.1608]]],
[[[0.0066, -0.0242, -0.0136, -0.1578, -0.0372, -0.0241, 0.0257],
[0.0047, -0.0665, 0.0376, -0.0989, 0.0304, -0.0713, 0.0001],
[0.0043, -0.0818, 0.2166, 0.0540, 0.2827, -0.0504, -0.0181],
[-0.1103, -0.2681, -0.1715, -0.2849, -0.0963, -0.2396, -0.1298],
[0.0359, -0.0476, 0.0448, -0.2982, 0.0705, -0.0529, 0.0294],
[0.0478, -0.0422, -0.0303, -0.2225, -0.0058, -0.0527, 0.0283],
[0.1137, 0.0410, 0.0547, -0.0451, 0.0908, 0.0469, 0.0959]],
[[-0.0201, -0.0532, -0.0532, -0.1929, -0.0654, -0.0540, -0.0149],
[-0.0293, -0.0954, 0.0042, -0.1147, 0.0154, -0.0939, -0.0423],
[-0.0428, -0.1076, 0.2131, 0.0799, 0.2977, -0.0665, -0.0695],
[-0.1807, -0.3099, -0.1839, -0.2975, -0.0973, -0.2698, -0.1969],
[-0.0568, -0.1277, -0.0191, -0.3510, 0.0117, -0.1214, -0.0503],
[-0.0585, -0.1401, -0.1356, -0.3301, -0.1168, -0.1496, -0.0669],
[0.0098, -0.0504, -0.0437, -0.1575, -0.0246, -0.0524, -0.0019]],
[[-0.1005, -0.0734, -0.0158, -0.1182, -0.0114, -0.0576, -0.0857],
[-0.0510, 0.0091, 0.2302, 0.1742, 0.2623, 0.0304, -0.0533],
[-0.0107, 0.1154, 0.6176, 0.5798, 0.7246, 0.1815, -0.0196],
[-0.1116, -0.0197, 0.3286, 0.3583, 0.4384, 0.0446, -0.1107],
[0.0117, 0.1432, 0.4441, 0.2151, 0.4902, 0.1610, 0.0262],
[-0.0278, 0.0297, 0.1646, 0.0401, 0.1932, 0.0166, -0.0420],
[-0.0255, -0.0216, 0.0491, -0.0242, 0.0681, -0.0390, -0.0538]]]]
# ssim v2
window_v15 = [[[[-0.0243, -0.0190, 0.0012, 0.0023, -0.0078, -0.0269, -0.0307],
[-0.0295, -0.0055, 0.0324, 0.0380, 0.0221, -0.0148, -0.0343],
[-0.0163, 0.0287, 0.0926, 0.1078, 0.0855, 0.0208, -0.0182],
[-0.0111, 0.0429, 0.1166, 1.1134, 0.1136, 0.0373, -0.0098],
[-0.0104, 0.0364, 0.1037, 0.1255, 0.1022, 0.0302, -0.0091],
[-0.0184, 0.0088, 0.0473, 0.0584, 0.0429, 0.0012, -0.0200],
[-0.0075, -0.0021, 0.0158, 0.0207, 0.0140, -0.0064, -0.0124]],
[[-0.0424, -0.0503, -0.0392, -0.0388, -0.0420, -0.0462, -0.0339],
[-0.0647, -0.0643, -0.0423, -0.0393, -0.0471, -0.0610, -0.0538],
[-0.0621, -0.0502, -0.0091, 0.0016, -0.0121, -0.0482, -0.0521],
[-0.0603, -0.0438, 0.0036, 0.9954, 0.0032, -0.0418, -0.0493],
[-0.0584, -0.0491, -0.0087, 0.0042, -0.0091, -0.0480, -0.0475],
[-0.0555, -0.0584, -0.0437, -0.0399, -0.0457, -0.0596, -0.0491],
[-0.0266, -0.0400, -0.0377, -0.0384, -0.0390, -0.0409, -0.0265]],
[[-0.0241, -0.0319, -0.0228, -0.0208, -0.0239, -0.0275, -0.0145],
[-0.0453, -0.0521, -0.0391, -0.0379, -0.0420, -0.0480, -0.0332],
[-0.0451, -0.0481, -0.0232, -0.0157, -0.0246, -0.0460, -0.0351],
[-0.0464, -0.0493, -0.0194, 0.9754, -0.0196, -0.0487, -0.0376],
[-0.0479, -0.0572, -0.0350, -0.0266, -0.0356, -0.0584, -0.0414],
[-0.0465, -0.0630, -0.0595, -0.0589, -0.0620, -0.0655, -0.0446],
[-0.0187, -0.0394, -0.0416, -0.0423, -0.0430, -0.0413, -0.0237]]],
[[[-0.0472, -0.0503, -0.0450, -0.0488, -0.0512, -0.0498, -0.0409],
[-0.0564, -0.0525, -0.0416, -0.0391, -0.0467, -0.0544, -0.0519],
[-0.0604, -0.0495, -0.0211, -0.0008, -0.0238, -0.0509, -0.0561],
[-0.0619, -0.0469, 0.0025, 1.0544, 0.0026, -0.0431, -0.0523],
[-0.0642, -0.0571, -0.0285, -0.0014, -0.0226, -0.0500, -0.0509],
[-0.0584, -0.0553, -0.0459, -0.0390, -0.0448, -0.0503, -0.0473],
[-0.0464, -0.0493, -0.0449, -0.0409, -0.0425, -0.0448, -0.0398]],
[[-0.0184, -0.0108, 0.0014, -0.0031, -0.0064, -0.0121, -0.0136],
[-0.0112, 0.0098, 0.0316, 0.0343, 0.0244, 0.0059, -0.0071],
[-0.0007, 0.0312, 0.0688, 0.0854, 0.0622, 0.0236, -0.0028],
[0.0012, 0.0383, 0.0946, 1.1321, 0.0885, 0.0326, 0.0007],
[-0.0022, 0.0271, 0.0668, 0.0900, 0.0659, 0.0255, 0.0019],
[-0.0100, 0.0120, 0.0317, 0.0383, 0.0280, 0.0100, -0.0051],
[-0.0161, -0.0059, 0.0051, 0.0089, 0.0053, -0.0053, -0.0127]],
[[-0.0212, -0.0258, -0.0263, -0.0358, -0.0363, -0.0296, -0.0170],
[-0.0323, -0.0367, -0.0390, -0.0456, -0.0480, -0.0410, -0.0255],
[-0.0417, -0.0491, -0.0477, -0.0433, -0.0533, -0.0532, -0.0355],
[-0.0499, -0.0600, -0.0462, 0.9811, -0.0498, -0.0598, -0.0402],
[-0.0442, -0.0581, -0.0590, -0.0517, -0.0608, -0.0588, -0.0347],
[-0.0305, -0.0400, -0.0508, -0.0568, -0.0579, -0.0452, -0.0248],
[-0.0153, -0.0237, -0.0310, -0.0346, -0.0344, -0.0270, -0.0129]]],
[[[-0.0160, -0.0243, -0.0245, -0.0272, -0.0286, -0.0295, -0.0192],
[-0.0268, -0.0392, -0.0419, -0.0467, -0.0429, -0.0391, -0.0239],
[-0.0317, -0.0460, -0.0398, -0.0397, -0.0390, -0.0448, -0.0279],
[-0.0384, -0.0513, -0.0382, 0.9619, -0.0371, -0.0498, -0.0300],
[-0.0374, -0.0517, -0.0446, -0.0421, -0.0438, -0.0504, -0.0286],
[-0.0326, -0.0426, -0.0440, -0.0519, -0.0479, -0.0421, -0.0220],
[-0.0202, -0.0251, -0.0270, -0.0326, -0.0301, -0.0261, -0.0108]],
[[-0.0191, -0.0285, -0.0303, -0.0343, -0.0319, -0.0293, -0.0200],
[-0.0317, -0.0448, -0.0495, -0.0560, -0.0477, -0.0407, -0.0269],
[-0.0386, -0.0530, -0.0490, -0.0520, -0.0442, -0.0477, -0.0330],
[-0.0485, -0.0611, -0.0499, 0.9405, -0.0459, -0.0560, -0.0372],
[-0.0483, -0.0628, -0.0538, -0.0526, -0.0507, -0.0577, -0.0363],
[-0.0440, -0.0554, -0.0552, -0.0623, -0.0567, -0.0520, -0.0312],
[-0.0284, -0.0353, -0.0367, -0.0415, -0.0375, -0.0337, -0.0172]],
[[-0.0360, -0.0345, -0.0229, -0.0187, -0.0193, -0.0306, -0.0366],
[-0.0305, -0.0121, 0.0188, 0.0288, 0.0223, -0.0060, -0.0272],
[-0.0174, 0.0224, 0.0858, 0.1079, 0.0890, 0.0265, -0.0148],
[-0.0145, 0.0383, 0.1200, 1.1434, 0.1226, 0.0401, -0.0103],
[-0.0177, 0.0281, 0.1001, 0.1293, 0.1013, 0.0295, -0.0130],
[-0.0288, 0.0026, 0.0466, 0.0579, 0.0418, 0.0016, -0.0227],
[-0.0335, -0.0182, 0.0033, 0.0077, -0.0006, -0.0203, -0.0270]]]]
# Normalize
mean = np.array([0.408, 0.447, 0.470], dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.289, 0.274, 0.278], dtype=np.float32).reshape(1, 1, 3)
# Convolution
'''def privacy_third(img):
out = np.zeros((1200,1600,3))
image_data = ((img / 255. - mean) / std).astype(np.float32)
for j in range(3):
kernel = np.flip(np.flip(np.array(window_v7[0][j]), 0), 1)
tmp = cv2.filter2D(image_data[:,:,j],-1, kernel, borderType=cv2.BORDER_CONSTANT)
out[:,:,j] = tmp
return np.sum(out, axis = 2)'''
# Convolution
def privacy_conv(img):
H = img.shape[0]
W = img.shape[1]
out = np.zeros((H, W, 3)) # ((1200,1600,3))
image_data = ((img / 255. - mean) / std).astype(np.float32)
for i in range(3):
channel = []
for j in range(3):
kernel = np.flip(np.flip(np.array(window_v13[i][j]), 0), 1) # np.array(window[i][j])
tmp = cv2.filter2D(image_data[:, :, j], -1, kernel, borderType=cv2.BORDER_CONSTANT)
channel.append(tmp)
channel_result = sum(channel)
out[:, :, i] = channel_result
return out
def privacy_conv_back(img):
out = np.zeros((1200, 1600, 3))
image_data = ((img / 255. - mean) / std).astype(np.float32)
for i in range(3):
channel = []
for j in range(1):
kernel = np.flip(np.flip(np.array(window_v10_back[i][j]), 0), 1) # np.array(window[i][j])
tmp = cv2.filter2D(image_data[:, :, j], -1, kernel, borderType=cv2.BORDER_CONSTANT)
channel.append(tmp)
channel_result = sum(channel)
out[:, :, i] = channel_result
return out
# @jit(nopython=True)
def scipy_conv(img, kernel):
image_data = ((img / 255. - mean) / std).astype(np.float32)
# kernel = np.flip(np.flip(np.array(window[2]), 0), 1)
result = ndimage.convolve(img, kernel, mode='constant', cval=0.0)
return result
# @vectorize(target="parallel")
def scipy_signal(img, kernel):
image_data = ((img / 255. - mean) / std).astype(np.float32)
# kernel = np.flip(np.flip(np.array(window[2]), 0), 1)
result = signal.convolve(img, kernel)
return result
def fft(array):
out = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(array)))
return out
def ifft(array):
out = np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(array)))
return out
def fft_conv(img):
image_data = ((img / 255. - mean) / std).astype(np.float32)
kernel = np.flip(np.flip(np.array(window[2]), 0), 1)
conv = np.abs(ifft(np.matmul(fft(img), fft(kernel))))
return conv
def fft_conv2(img):
kernel = np.flip(np.flip(np.array(window[2]), 0), 1)
result = signal.fftconvolve(img, kernel, mode='full')
return result
# @vectorize(target="parallel")
def my_convolution(img, kernel):
result = my_conv(img, kernel)
return result
def privacy_encode(img):
'''jpeg compression encode'''
encode_img = cv2.imencode('.jpg', img)[1]
ndarray_encode_img = np.array(encode_img)
return ndarray_encode_img
def sigmoid(X):
return 1 / (1 + np.exp(-1 * X))
def psnr(img1, img2):
# mse = np.mean( (img1/255. - img2/255.) ** 2 )
mse = np.mean((img1 - img2) ** 2)
if mse < 1.0e-10:
return 100
PIXEL_MAX = 1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
folder = '/home/kobe/CenterNet_Easy_Pytorch/data/Virtual_victor_v1/images/*.png' # '/home/henry/fisheye_human_pose/val/images/*.jpg'#'/media/henry/My Passport/0905_demo/demo_data/*.jpg'#
save_folder = '/home/kobe/CenterNet_Easy_Pytorch/data/Virtual_victor_v1/images_privacy/' # '/home/henry/fisheye_human_pose_privacy/privacy_edge/victor_rgb/images/'
if not os.path.exists(save_folder):
os.mkdir(save_folder)
file_list = glob.glob(folder)
tanh_function = False
tanh_recover = False
conv_back = False
for i in range(len(file_list)):
name = file_list[i]
print(name)
img = cv2.imread(name) # cv2.imread("/home/henry/fisheye_victor/pic49.jpg")##
conv_img = privacy_conv(img)
if conv_back:
conv_img = privacy_conv_back(conv_img)
conv_img = sigmoid(conv_img)
if tanh_function:
result = np.tanh(conv_img) / 2 + 0.5 # (np.tanh(privacy_conv(img)) + 1) / 2
save_result = result * 255
if tanh_recover:
read_result = np.where(read_result == 255, 254.9999, read_result)
read_result = np.where(read_result == 0, 0.005, read_result)
recover = np.arctanh((result - 0.5) * 2)
# cv2.imshow('test', conv_img)
# cv2.waitKey(500)
file_list_temp = file_list[i].split('/')
save_name = file_list_temp[len(file_list_temp) - 1]
| |
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_svm_C_horizontalSlider_16.sizePolicy().hasHeightForWidth())
self.clas_svm_C_horizontalSlider_16.setSizePolicy(sizePolicy)
self.clas_svm_C_horizontalSlider_16.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.clas_svm_C_horizontalSlider_16.setMinimum(1)
self.clas_svm_C_horizontalSlider_16.setMaximum(100)
self.clas_svm_C_horizontalSlider_16.setPageStep(1)
self.clas_svm_C_horizontalSlider_16.setProperty("value", 10)
self.clas_svm_C_horizontalSlider_16.setOrientation(QtCore.Qt.Horizontal)
self.clas_svm_C_horizontalSlider_16.setObjectName("clas_svm_C_horizontalSlider_16")
self.horizontalLayout_78.addWidget(self.clas_svm_C_horizontalSlider_16)
spacerItem50 = QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_78.addItem(spacerItem50)
self.clas_svm_C_label_16 = QtWidgets.QLabel(self.frame_17)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_svm_C_label_16.sizePolicy().hasHeightForWidth())
self.clas_svm_C_label_16.setSizePolicy(sizePolicy)
self.clas_svm_C_label_16.setMinimumSize(QtCore.QSize(25, 0))
self.clas_svm_C_label_16.setObjectName("clas_svm_C_label_16")
self.horizontalLayout_78.addWidget(self.clas_svm_C_label_16)
self.gridLayout_55.addLayout(self.horizontalLayout_78, 3, 0, 1, 1)
self.verticalLayout_49 = QtWidgets.QVBoxLayout()
self.verticalLayout_49.setObjectName("verticalLayout_49")
spacerItem51 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_49.addItem(spacerItem51)
self.clas_svm_shirinking_checkBox_5 = QtWidgets.QCheckBox(self.frame_17)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_svm_shirinking_checkBox_5.sizePolicy().hasHeightForWidth())
self.clas_svm_shirinking_checkBox_5.setSizePolicy(sizePolicy)
self.clas_svm_shirinking_checkBox_5.setChecked(True)
self.clas_svm_shirinking_checkBox_5.setObjectName("clas_svm_shirinking_checkBox_5")
self.verticalLayout_49.addWidget(self.clas_svm_shirinking_checkBox_5)
spacerItem52 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_49.addItem(spacerItem52)
self.gridLayout_55.addLayout(self.verticalLayout_49, 7, 0, 1, 1)
self.verticalLayout_46.addWidget(self.frame_17)
self.gridLayout_37.addLayout(self.verticalLayout_46, 0, 0, 1, 1)
self.classification_parameters_stackedWidget.addWidget(self.randomforest_classification_page)
self.gradientboosting_classification_page = QtWidgets.QWidget()
self.gradientboosting_classification_page.setObjectName("gradientboosting_classification_page")
self.gridLayout_38 = QtWidgets.QGridLayout(self.gradientboosting_classification_page)
self.gridLayout_38.setObjectName("gridLayout_38")
self.label_12 = QtWidgets.QLabel(self.gradientboosting_classification_page)
self.label_12.setObjectName("label_12")
self.gridLayout_38.addWidget(self.label_12, 0, 0, 1, 1)
self.classification_parameters_stackedWidget.addWidget(self.gradientboosting_classification_page)
self.knn_classification_page = QtWidgets.QWidget()
self.knn_classification_page.setObjectName("knn_classification_page")
self.gridLayout_39 = QtWidgets.QGridLayout(self.knn_classification_page)
self.gridLayout_39.setObjectName("gridLayout_39")
self.label_13 = QtWidgets.QLabel(self.knn_classification_page)
self.label_13.setObjectName("label_13")
self.gridLayout_39.addWidget(self.label_13, 0, 0, 1, 1)
self.classification_parameters_stackedWidget.addWidget(self.knn_classification_page)
self.horizontalLayout_10.addWidget(self.classification_parameters_stackedWidget)
self.gridLayout_21.addLayout(self.horizontalLayout_10, 1, 0, 1, 1)
self.regression_and_classification_stackedWidget.addWidget(self.page_classification)
self.verticalLayout_7.addWidget(self.regression_and_classification_stackedWidget)
self.gridLayout_20.addLayout(self.verticalLayout_7, 0, 1, 1, 1)
self.tabs_widget.addTab(self.model_selection_tab, "")
self.inputoutput_tab = QtWidgets.QWidget()
self.inputoutput_tab.setObjectName("inputoutput_tab")
self.gridLayout_28 = QtWidgets.QGridLayout(self.inputoutput_tab)
self.gridLayout_28.setObjectName("gridLayout_28")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setContentsMargins(-1, -1, 12, -1)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setContentsMargins(12, -1, -1, 12)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_2 = QtWidgets.QLabel(self.inputoutput_tab)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.available_columns_listWidget = QtWidgets.QListWidget(self.inputoutput_tab)
self.available_columns_listWidget.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.available_columns_listWidget.setObjectName("available_columns_listWidget")
self.verticalLayout_2.addWidget(self.available_columns_listWidget)
self.horizontalLayout_5.addLayout(self.verticalLayout_2)
self.gridLayout_26 = QtWidgets.QGridLayout()
self.gridLayout_26.setObjectName("gridLayout_26")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(0, 0, 0, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.add_input_columns_pushButton = QtWidgets.QPushButton(self.inputoutput_tab)
self.add_input_columns_pushButton.setObjectName("add_input_columns_pushButton")
self.horizontalLayout.addWidget(self.add_input_columns_pushButton)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_4 = QtWidgets.QLabel(self.inputoutput_tab)
self.label_4.setObjectName("label_4")
self.verticalLayout_3.addWidget(self.label_4)
self.input_columns_listWidget = QtWidgets.QListWidget(self.inputoutput_tab)
self.input_columns_listWidget.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.input_columns_listWidget.setObjectName("input_columns_listWidget")
self.verticalLayout_3.addWidget(self.input_columns_listWidget)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.remove_input_columns_pushButton = QtWidgets.QPushButton(self.inputoutput_tab)
self.remove_input_columns_pushButton.setObjectName("remove_input_columns_pushButton")
self.horizontalLayout_2.addWidget(self.remove_input_columns_pushButton)
self.clear_input_columns_pushButton = QtWidgets.QPushButton(self.inputoutput_tab)
self.clear_input_columns_pushButton.setObjectName("clear_input_columns_pushButton")
self.horizontalLayout_2.addWidget(self.clear_input_columns_pushButton)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.gridLayout_26.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.output_selection_stackedWidget = QtWidgets.QStackedWidget(self.inputoutput_tab)
self.output_selection_stackedWidget.setMinimumSize(QtCore.QSize(0, 200))
self.output_selection_stackedWidget.setFrameShape(QtWidgets.QFrame.NoFrame)
self.output_selection_stackedWidget.setObjectName("output_selection_stackedWidget")
self.regression = QtWidgets.QWidget()
self.regression.setObjectName("regression")
self.gridLayout_25 = QtWidgets.QGridLayout(self.regression)
self.gridLayout_25.setObjectName("gridLayout_25")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.add_output_columns_pushButton = QtWidgets.QPushButton(self.regression)
self.add_output_columns_pushButton.setObjectName("add_output_columns_pushButton")
self.horizontalLayout_3.addWidget(self.add_output_columns_pushButton)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label_5 = QtWidgets.QLabel(self.regression)
self.label_5.setObjectName("label_5")
self.verticalLayout_6.addWidget(self.label_5)
self.output_columns_listWidget = QtWidgets.QListWidget(self.regression)
self.output_columns_listWidget.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.output_columns_listWidget.setObjectName("output_columns_listWidget")
self.verticalLayout_6.addWidget(self.output_columns_listWidget)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.remove_output_columns_pushButton = QtWidgets.QPushButton(self.regression)
self.remove_output_columns_pushButton.setObjectName("remove_output_columns_pushButton")
self.horizontalLayout_4.addWidget(self.remove_output_columns_pushButton)
self.clear_output_columns_pushButton = QtWidgets.QPushButton(self.regression)
self.clear_output_columns_pushButton.setObjectName("clear_output_columns_pushButton")
self.horizontalLayout_4.addWidget(self.clear_output_columns_pushButton)
self.verticalLayout_6.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3.addLayout(self.verticalLayout_6)
self.gridLayout_25.addLayout(self.horizontalLayout_3, 1, 0, 1, 1)
self.output_selection_stackedWidget.addWidget(self.regression)
self.classification = QtWidgets.QWidget()
self.classification.setObjectName("classification")
self.horizontalLayout_39 = QtWidgets.QHBoxLayout(self.classification)
self.horizontalLayout_39.setObjectName("horizontalLayout_39")
self.horizontalLayout_45 = QtWidgets.QHBoxLayout()
self.horizontalLayout_45.setObjectName("horizontalLayout_45")
spacerItem53 = QtWidgets.QSpacerItem(51, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_45.addItem(spacerItem53)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_14 = QtWidgets.QLabel(self.classification)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_14.sizePolicy().hasHeightForWidth())
self.label_14.setSizePolicy(sizePolicy)
self.label_14.setObjectName("label_14")
self.verticalLayout_4.addWidget(self.label_14)
self.clas_output_colum_comboBox = QtWidgets.QComboBox(self.classification)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_output_colum_comboBox.sizePolicy().hasHeightForWidth())
self.clas_output_colum_comboBox.setSizePolicy(sizePolicy)
self.clas_output_colum_comboBox.setMinimumSize(QtCore.QSize(150, 0))
self.clas_output_colum_comboBox.setMaximumSize(QtCore.QSize(200, 16777215))
self.clas_output_colum_comboBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.clas_output_colum_comboBox.setObjectName("clas_output_colum_comboBox")
self.verticalLayout_4.addWidget(self.clas_output_colum_comboBox)
spacerItem54 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem54)
self.horizontalLayout_45.addLayout(self.verticalLayout_4)
spacerItem55 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_45.addItem(spacerItem55)
self.horizontalLayout_39.addLayout(self.horizontalLayout_45)
self.output_selection_stackedWidget.addWidget(self.classification)
self.gridLayout_26.addWidget(self.output_selection_stackedWidget, 1, 0, 1, 1)
self.horizontalLayout_5.addLayout(self.gridLayout_26)
self.gridLayout_28.addLayout(self.horizontalLayout_5, 0, 0, 1, 1)
self.tabs_widget.addTab(self.inputoutput_tab, "")
self.train_tab = QtWidgets.QWidget()
self.train_tab.setObjectName("train_tab")
self.gridLayout_13 = QtWidgets.QGridLayout(self.train_tab)
self.gridLayout_13.setObjectName("gridLayout_13")
self.verticalLayout_15 = QtWidgets.QVBoxLayout()
self.verticalLayout_15.setContentsMargins(12, -1, -1, 12)
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.label_33 = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_33.sizePolicy().hasHeightForWidth())
self.label_33.setSizePolicy(sizePolicy)
self.label_33.setMaximumSize(QtCore.QSize(160, 16777215))
self.label_33.setBaseSize(QtCore.QSize(0, 0))
self.label_33.setObjectName("label_33")
self.verticalLayout_15.addWidget(self.label_33)
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
self.train_percentage_horizontalSlider = QtWidgets.QSlider(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.train_percentage_horizontalSlider.sizePolicy().hasHeightForWidth())
self.train_percentage_horizontalSlider.setSizePolicy(sizePolicy)
self.train_percentage_horizontalSlider.setMinimumSize(QtCore.QSize(0, 0))
self.train_percentage_horizontalSlider.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.train_percentage_horizontalSlider.setMinimum(5)
self.train_percentage_horizontalSlider.setMaximum(95)
self.train_percentage_horizontalSlider.setProperty("value", 85)
self.train_percentage_horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.train_percentage_horizontalSlider.setObjectName("train_percentage_horizontalSlider")
self.horizontalLayout_27.addWidget(self.train_percentage_horizontalSlider)
self.train_percentage_label = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.train_percentage_label.sizePolicy().hasHeightForWidth())
self.train_percentage_label.setSizePolicy(sizePolicy)
self.train_percentage_label.setMinimumSize(QtCore.QSize(35, 0))
self.train_percentage_label.setMaximumSize(QtCore.QSize(0, 16777215))
self.train_percentage_label.setBaseSize(QtCore.QSize(30, 0))
self.train_percentage_label.setObjectName("train_percentage_label")
self.horizontalLayout_27.addWidget(self.train_percentage_label)
self.verticalLayout_15.addLayout(self.horizontalLayout_27)
self.label_34 = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_34.sizePolicy().hasHeightForWidth())
self.label_34.setSizePolicy(sizePolicy)
self.label_34.setObjectName("label_34")
self.verticalLayout_15.addWidget(self.label_34)
self.horizontalLayout_29 = QtWidgets.QHBoxLayout()
self.horizontalLayout_29.setObjectName("horizontalLayout_29")
self.test_percentage_horizontalSlider = QtWidgets.QSlider(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.test_percentage_horizontalSlider.sizePolicy().hasHeightForWidth())
self.test_percentage_horizontalSlider.setSizePolicy(sizePolicy)
self.test_percentage_horizontalSlider.setMinimumSize(QtCore.QSize(0, 0))
self.test_percentage_horizontalSlider.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.test_percentage_horizontalSlider.setMinimum(5)
self.test_percentage_horizontalSlider.setMaximum(95)
self.test_percentage_horizontalSlider.setProperty("value", 15)
self.test_percentage_horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.test_percentage_horizontalSlider.setObjectName("test_percentage_horizontalSlider")
self.horizontalLayout_29.addWidget(self.test_percentage_horizontalSlider)
self.test_percentage_label = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.test_percentage_label.sizePolicy().hasHeightForWidth())
self.test_percentage_label.setSizePolicy(sizePolicy)
self.test_percentage_label.setMinimumSize(QtCore.QSize(35, 0))
self.test_percentage_label.setMaximumSize(QtCore.QSize(0, 16777215))
self.test_percentage_label.setBaseSize(QtCore.QSize(30, 0))
self.test_percentage_label.setObjectName("test_percentage_label")
self.horizontalLayout_29.addWidget(self.test_percentage_label)
self.verticalLayout_15.addLayout(self.horizontalLayout_29)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.label_8 = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
self.label_8.setSizePolicy(sizePolicy)
self.label_8.setObjectName("label_8")
self.horizontalLayout_14.addWidget(self.label_8)
spacerItem56 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem56)
self.shuffle_samples_checkBox = QtWidgets.QCheckBox(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.shuffle_samples_checkBox.sizePolicy().hasHeightForWidth())
self.shuffle_samples_checkBox.setSizePolicy(sizePolicy)
self.shuffle_samples_checkBox.setMaximumSize(QtCore.QSize(30, 16777215))
self.shuffle_samples_checkBox.setText("")
self.shuffle_samples_checkBox.setObjectName("shuffle_samples_checkBox")
self.horizontalLayout_14.addWidget(self.shuffle_samples_checkBox)
self.verticalLayout_15.addLayout(self.horizontalLayout_14)
self.horizontalLayout_40 = QtWidgets.QHBoxLayout()
self.horizontalLayout_40.setObjectName("horizontalLayout_40")
self.train_model_pushButton = QtWidgets.QPushButton(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.train_model_pushButton.sizePolicy().hasHeightForWidth())
self.train_model_pushButton.setSizePolicy(sizePolicy)
self.train_model_pushButton.setMinimumSize(QtCore.QSize(0, 0))
self.train_model_pushButton.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.train_model_pushButton.setLayoutDirection(QtCore.Qt.LeftToRight)
self.train_model_pushButton.setObjectName("train_model_pushButton")
self.horizontalLayout_40.addWidget(self.train_model_pushButton)
self.verticalLayout_15.addLayout(self.horizontalLayout_40)
self.line_9 = QtWidgets.QFrame(self.train_tab)
self.line_9.setFrameShape(QtWidgets.QFrame.HLine)
self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_9.setObjectName("line_9")
self.verticalLayout_15.addWidget(self.line_9)
self.horizontalLayout_34 = QtWidgets.QHBoxLayout()
self.horizontalLayout_34.setObjectName("horizontalLayout_34")
self.label_11 = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_11.sizePolicy().hasHeightForWidth())
self.label_11.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_11.setObjectName("label_11")
self.horizontalLayout_34.addWidget(self.label_11)
self.train_dataset_shape_label = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.train_dataset_shape_label.sizePolicy().hasHeightForWidth())
self.train_dataset_shape_label.setSizePolicy(sizePolicy)
self.train_dataset_shape_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.train_dataset_shape_label.setObjectName("train_dataset_shape_label")
self.horizontalLayout_34.addWidget(self.train_dataset_shape_label)
self.verticalLayout_15.addLayout(self.horizontalLayout_34)
self.horizontalLayout_35 = QtWidgets.QHBoxLayout()
self.horizontalLayout_35.setObjectName("horizontalLayout_35")
self.label_24 = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_24.sizePolicy().hasHeightForWidth())
self.label_24.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
self.horizontalLayout_35.addWidget(self.label_24)
self.test_dataset_shape_label = QtWidgets.QLabel(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.test_dataset_shape_label.sizePolicy().hasHeightForWidth())
self.test_dataset_shape_label.setSizePolicy(sizePolicy)
self.test_dataset_shape_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.test_dataset_shape_label.setObjectName("test_dataset_shape_label")
self.horizontalLayout_35.addWidget(self.test_dataset_shape_label)
self.verticalLayout_15.addLayout(self.horizontalLayout_35)
self.train_metrics_stackedWidget = QtWidgets.QStackedWidget(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.train_metrics_stackedWidget.sizePolicy().hasHeightForWidth())
self.train_metrics_stackedWidget.setSizePolicy(sizePolicy)
self.train_metrics_stackedWidget.setMinimumSize(QtCore.QSize(0, 0))
self.train_metrics_stackedWidget.setMaximumSize(QtCore.QSize(320, 16777215))
self.train_metrics_stackedWidget.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.train_metrics_stackedWidget.setFrameShadow(QtWidgets.QFrame.Plain)
self.train_metrics_stackedWidget.setLineWidth(1)
self.train_metrics_stackedWidget.setObjectName("train_metrics_stackedWidget")
self.regression_metrics = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.regression_metrics.sizePolicy().hasHeightForWidth())
self.regression_metrics.setSizePolicy(sizePolicy)
self.regression_metrics.setObjectName("regression_metrics")
self.gridLayout_3 = QtWidgets.QGridLayout(self.regression_metrics)
self.gridLayout_3.setObjectName("gridLayout_3")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setContentsMargins(10, 10, 10, 10)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_25 = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_25.sizePolicy().hasHeightForWidth())
self.label_25.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(17)
font.setBold(True)
font.setWeight(75)
self.label_25.setFont(font)
self.label_25.setAlignment(QtCore.Qt.AlignCenter)
self.label_25.setObjectName("label_25")
self.gridLayout_2.addWidget(self.label_25, 0, 0, 1, 2)
self.line_10 = QtWidgets.QFrame(self.regression_metrics)
self.line_10.setFrameShape(QtWidgets.QFrame.HLine)
self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_10.setObjectName("line_10")
self.gridLayout_2.addWidget(self.line_10, 1, 0, 1, 2)
self.label_29 = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_29.sizePolicy().hasHeightForWidth())
self.label_29.setSizePolicy(sizePolicy)
self.label_29.setObjectName("label_29")
self.gridLayout_2.addWidget(self.label_29, 2, 0, 1, 1)
self.reg_mse_label = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.reg_mse_label.sizePolicy().hasHeightForWidth())
self.reg_mse_label.setSizePolicy(sizePolicy)
self.reg_mse_label.setAlignment(QtCore.Qt.AlignCenter)
self.reg_mse_label.setObjectName("reg_mse_label")
self.gridLayout_2.addWidget(self.reg_mse_label, 2, 1, 1, 1)
self.label_31 = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_31.sizePolicy().hasHeightForWidth())
self.label_31.setSizePolicy(sizePolicy)
self.label_31.setObjectName("label_31")
self.gridLayout_2.addWidget(self.label_31, 3, 0, 1, 1)
self.reg_rmse_label = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.reg_rmse_label.sizePolicy().hasHeightForWidth())
self.reg_rmse_label.setSizePolicy(sizePolicy)
self.reg_rmse_label.setAlignment(QtCore.Qt.AlignCenter)
self.reg_rmse_label.setObjectName("reg_rmse_label")
self.gridLayout_2.addWidget(self.reg_rmse_label, 3, 1, 1, 1)
self.label_30 = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_30.sizePolicy().hasHeightForWidth())
self.label_30.setSizePolicy(sizePolicy)
self.label_30.setObjectName("label_30")
self.gridLayout_2.addWidget(self.label_30, 4, 0, 1, 1)
self.reg_r2_label = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.reg_r2_label.sizePolicy().hasHeightForWidth())
self.reg_r2_label.setSizePolicy(sizePolicy)
self.reg_r2_label.setAlignment(QtCore.Qt.AlignCenter)
self.reg_r2_label.setObjectName("reg_r2_label")
self.gridLayout_2.addWidget(self.reg_r2_label, 4, 1, 1, 1)
self.label_32 = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_32.sizePolicy().hasHeightForWidth())
self.label_32.setSizePolicy(sizePolicy)
self.label_32.setObjectName("label_32")
self.gridLayout_2.addWidget(self.label_32, 5, 0, 1, 1)
self.reg_mea_label = QtWidgets.QLabel(self.regression_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.reg_mea_label.sizePolicy().hasHeightForWidth())
self.reg_mea_label.setSizePolicy(sizePolicy)
self.reg_mea_label.setAlignment(QtCore.Qt.AlignCenter)
self.reg_mea_label.setObjectName("reg_mea_label")
self.gridLayout_2.addWidget(self.reg_mea_label, 5, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_2, 0, 0, 1, 1)
self.train_metrics_stackedWidget.addWidget(self.regression_metrics)
self.classification_metrics = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.classification_metrics.sizePolicy().hasHeightForWidth())
self.classification_metrics.setSizePolicy(sizePolicy)
self.classification_metrics.setObjectName("classification_metrics")
self.gridLayout = QtWidgets.QGridLayout(self.classification_metrics)
self.gridLayout.setObjectName("gridLayout")
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.gridLayout_7.setContentsMargins(10, 10, 10, 10)
self.gridLayout_7.setObjectName("gridLayout_7")
self.label_26 = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_26.sizePolicy().hasHeightForWidth())
self.label_26.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(17)
font.setBold(True)
font.setWeight(75)
self.label_26.setFont(font)
self.label_26.setAlignment(QtCore.Qt.AlignCenter)
self.label_26.setObjectName("label_26")
self.gridLayout_7.addWidget(self.label_26, 0, 0, 1, 2)
self.line_8 = QtWidgets.QFrame(self.classification_metrics)
self.line_8.setFrameShape(QtWidgets.QFrame.HLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.gridLayout_7.addWidget(self.line_8, 1, 0, 1, 2)
self.label_38 = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_38.sizePolicy().hasHeightForWidth())
self.label_38.setSizePolicy(sizePolicy)
self.label_38.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_38.setObjectName("label_38")
self.gridLayout_7.addWidget(self.label_38, 2, 0, 1, 1)
self.clas_accuracy_label = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_accuracy_label.sizePolicy().hasHeightForWidth())
self.clas_accuracy_label.setSizePolicy(sizePolicy)
self.clas_accuracy_label.setAlignment(QtCore.Qt.AlignCenter)
self.clas_accuracy_label.setObjectName("clas_accuracy_label")
self.gridLayout_7.addWidget(self.clas_accuracy_label, 2, 1, 1, 1)
self.label_39 = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_39.sizePolicy().hasHeightForWidth())
self.label_39.setSizePolicy(sizePolicy)
self.label_39.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_39.setObjectName("label_39")
self.gridLayout_7.addWidget(self.label_39, 3, 0, 1, 1)
self.clas_recall_label = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_recall_label.sizePolicy().hasHeightForWidth())
self.clas_recall_label.setSizePolicy(sizePolicy)
self.clas_recall_label.setAlignment(QtCore.Qt.AlignCenter)
self.clas_recall_label.setObjectName("clas_recall_label")
self.gridLayout_7.addWidget(self.clas_recall_label, 3, 1, 1, 1)
self.label_37 = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_37.sizePolicy().hasHeightForWidth())
self.label_37.setSizePolicy(sizePolicy)
self.label_37.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_37.setObjectName("label_37")
self.gridLayout_7.addWidget(self.label_37, 4, 0, 1, 1)
self.clas_precision_label = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_precision_label.sizePolicy().hasHeightForWidth())
self.clas_precision_label.setSizePolicy(sizePolicy)
self.clas_precision_label.setAlignment(QtCore.Qt.AlignCenter)
self.clas_precision_label.setObjectName("clas_precision_label")
self.gridLayout_7.addWidget(self.clas_precision_label, 4, 1, 1, 1)
self.label_36 = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_36.sizePolicy().hasHeightForWidth())
self.label_36.setSizePolicy(sizePolicy)
self.label_36.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_36.setObjectName("label_36")
self.gridLayout_7.addWidget(self.label_36, 5, 0, 1, 1)
self.clas_f1_score_label = QtWidgets.QLabel(self.classification_metrics)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clas_f1_score_label.sizePolicy().hasHeightForWidth())
self.clas_f1_score_label.setSizePolicy(sizePolicy)
self.clas_f1_score_label.setAlignment(QtCore.Qt.AlignCenter)
self.clas_f1_score_label.setObjectName("clas_f1_score_label")
self.gridLayout_7.addWidget(self.clas_f1_score_label, 5, 1, 1, 1)
self.gridLayout.addLayout(self.gridLayout_7, 0, 0, 1, 1)
self.train_metrics_stackedWidget.addWidget(self.classification_metrics)
self.verticalLayout_15.addWidget(self.train_metrics_stackedWidget)
spacerItem57 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_15.addItem(spacerItem57)
self.gridLayout_13.addLayout(self.verticalLayout_15, 0, 0, 2, 1)
self.model_train_widget = MplWidget(self.train_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(100)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.model_train_widget.sizePolicy().hasHeightForWidth())
self.model_train_widget.setSizePolicy(sizePolicy)
self.model_train_widget.setMinimumSize(QtCore.QSize(0, 0))
self.model_train_widget.setAutoFillBackground(False)
self.model_train_widget.setObjectName("model_train_widget")
self.gridLayout_13.addWidget(self.model_train_widget, 0, 1, 1, 1)
self.tabs_widget.addTab(self.train_tab, "")
self.gridLayout_5.addWidget(self.tabs_widget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabs_widget.setCurrentIndex(3)
self.pre_process_tabWidget.setCurrentIndex(1)
self.pre_process_filtering_stackedWidget.setCurrentIndex(1)
self.pre_process_replacing_stackedWidget.setCurrentIndex(0)
self.regression_and_classification_stackedWidget.setCurrentIndex(0)
self.regression_parameters_stackedWidget.setCurrentIndex(1)
self.classification_parameters_stackedWidget.setCurrentIndex(1)
self.output_selection_stackedWidget.setCurrentIndex(0)
self.train_metrics_stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Modelling ML"))
self.label_53.setText(_translate("MainWindow", "Load a Dataset: "))
self.load_file_pushButton.setText(_translate("MainWindow", "Import file"))
self.label_54.setText(_translate("MainWindow", "or"))
self.label_42.setText(_translate("MainWindow", "Select existing dataset"))
self.tabs_widget.setTabText(self.tabs_widget.indexOf(self.dataset_load_tab), _translate("MainWindow", "Dataset Load"))
self.label_44.setText(_translate("MainWindow", "Variable"))
self.plot_radioButton.setText(_translate("MainWindow", "Plot"))
self.boxplot_radioButton.setText(_translate("MainWindow", "Boxplot"))
self.histogram_radioButton.setText(_translate("MainWindow", "Histogram"))
self.label_43.setText(_translate("MainWindow", "Variable Summary"))
self.tabs_widget.setTabText(self.tabs_widget.indexOf(self.visualise_tab), _translate("MainWindow", "Visualise"))
self.label_27.setText(_translate("MainWindow", "Pre-Processing Sequence"))
self.remove_preprocessing_rule_pushButton.setText(_translate("MainWindow", "Remove Selected"))
self.clear_preprocessing_rule_pushButton.setText(_translate("MainWindow", "Clear"))
self.remove_constant_variables_pushButton.setText(_translate("MainWindow", "Remove Constant Variables"))
self.remove_duplicates_pushButton.setText(_translate("MainWindow", "Remove Dulpicate Rows"))
self.numeric_scaling_pushButton.setText(_translate("MainWindow", "Numeric Scaling "))
self.label_51.setText(_translate("MainWindow", "Cut-off: "))
self.outliers_treshold_label.setText(_translate("MainWindow", "2.0"))
self.treshold_sigma_label_2.setText(_translate("MainWindow", " σ"))
self.remove_outliers_pushButton.setText(_translate("MainWindow", "Remove Outliers"))
self.pre_process_tabWidget.setTabText(self.pre_process_tabWidget.indexOf(self.tab), _translate("MainWindow", "Basic"))
self.label_28.setText(_translate("MainWindow", "Filter Out"))
self.label_45.setText(_translate("MainWindow", "Variable"))
self.filter_operator_comboBox.setItemText(0, _translate("MainWindow", "Equal to"))
self.filter_operator_comboBox.setItemText(1, _translate("MainWindow", "Not equal to"))
self.filter_operator_comboBox.setItemText(2, _translate("MainWindow", "Less than"))
self.filter_operator_comboBox.setItemText(3, _translate("MainWindow", "Less than or equal to"))
self.filter_operator_comboBox.setItemText(4, _translate("MainWindow", "Greater than"))
self.filter_operator_comboBox.setItemText(5, _translate("MainWindow", "Greater than or equal to"))
self.label_55.setText(_translate("MainWindow", "Value"))
self.addrule_filter_value_pushButton.setText(_translate("MainWindow", "Add Filtering Rule"))
self.pre_process_tabWidget.setTabText(self.pre_process_tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Filtering"))
self.label_35.setText(_translate("MainWindow", "Replace Values"))
self.label_52.setText(_translate("MainWindow", "Values"))
self.label_in.setText(_translate("MainWindow", "in"))
self.label_with.setText(_translate("MainWindow", "with"))
self.addrule_replace_value_pushButton.setText(_translate("MainWindow", "Add Replacing Rule"))
self.pre_process_tabWidget.setTabText(self.pre_process_tabWidget.indexOf(self.tab_3), _translate("MainWindow", | |
<reponame>shivharis/pybind<filename>pybind/slxos/v16r_1_00b/igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces_/__init__.py
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class igmp_l3_interfaces(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mc-hms-operational - based on the path /igmp-snooping-state/igmp-l3-interfaces/igmp-l3-interfaces. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Igmp L3 interface Information
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__interface_name','__is_igmp_enabled','__query_interval','__other_querier_interval','__query_reponse_time','__last_member_query_interval','__immediate_leave','__igmp_querier','__is_igmp_querier_local',)
_yang_name = 'igmp-l3-interfaces'
_rest_name = 'igmp-l3-interfaces'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__is_igmp_enabled = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="is-igmp-enabled", rest_name="is-igmp-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)
self.__last_member_query_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="last-member-query-interval", rest_name="last-member-query-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
self.__igmp_querier = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="igmp-querier", rest_name="igmp-querier", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
self.__other_querier_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="other-querier-interval", rest_name="other-querier-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
self.__immediate_leave = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="immediate-leave", rest_name="immediate-leave", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)
self.__is_igmp_querier_local = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="is-igmp-querier-local", rest_name="is-igmp-querier-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)
self.__query_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-interval", rest_name="query-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
self.__interface_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='string', is_config=False)
self.__query_reponse_time = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-reponse-time", rest_name="query-reponse-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'igmp-snooping-state', u'igmp-l3-interfaces', u'igmp-l3-interfaces']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'igmp-snooping-state', u'igmp-l3-interfaces', u'igmp-l3-interfaces']
def _get_interface_name(self):
"""
Getter method for interface_name, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/interface_name (string)
YANG Description: Igmp L3 interface name
"""
return self.__interface_name
def _set_interface_name(self, v, load=False):
"""
Setter method for interface_name, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/interface_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_name() directly.
YANG Description: Igmp L3 interface name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='string', is_config=False)""",
})
self.__interface_name = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_name(self):
self.__interface_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='string', is_config=False)
def _get_is_igmp_enabled(self):
"""
Getter method for is_igmp_enabled, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/is_igmp_enabled (uint8)
YANG Description: IS IGMP Enabled on an Interface
"""
return self.__is_igmp_enabled
def _set_is_igmp_enabled(self, v, load=False):
"""
Setter method for is_igmp_enabled, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/is_igmp_enabled (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_is_igmp_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_is_igmp_enabled() directly.
YANG Description: IS IGMP Enabled on an Interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="is-igmp-enabled", rest_name="is-igmp-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """is_igmp_enabled must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="is-igmp-enabled", rest_name="is-igmp-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)""",
})
self.__is_igmp_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_is_igmp_enabled(self):
self.__is_igmp_enabled = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="is-igmp-enabled", rest_name="is-igmp-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)
def _get_query_interval(self):
"""
Getter method for query_interval, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/query_interval (uint32)
YANG Description: Igmp query interval
"""
return self.__query_interval
def _set_query_interval(self, v, load=False):
"""
Setter method for query_interval, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/query_interval (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_query_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_query_interval() directly.
YANG Description: Igmp query interval
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-interval", rest_name="query-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """query_interval must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-interval", rest_name="query-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)""",
})
self.__query_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_query_interval(self):
self.__query_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-interval", rest_name="query-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
def _get_other_querier_interval(self):
"""
Getter method for other_querier_interval, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/other_querier_interval (uint32)
YANG Description: Igmp other querier interval
"""
return self.__other_querier_interval
def _set_other_querier_interval(self, v, load=False):
"""
Setter method for other_querier_interval, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/other_querier_interval (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_other_querier_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_other_querier_interval() directly.
YANG Description: Igmp other querier interval
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="other-querier-interval", rest_name="other-querier-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """other_querier_interval must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="other-querier-interval", rest_name="other-querier-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)""",
})
self.__other_querier_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_other_querier_interval(self):
self.__other_querier_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="other-querier-interval", rest_name="other-querier-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
def _get_query_reponse_time(self):
"""
Getter method for query_reponse_time, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/query_reponse_time (uint32)
YANG Description: Igmp query response time
"""
return self.__query_reponse_time
def _set_query_reponse_time(self, v, load=False):
"""
Setter method for query_reponse_time, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/query_reponse_time (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_query_reponse_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_query_reponse_time() directly.
YANG Description: Igmp query response time
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-reponse-time", rest_name="query-reponse-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """query_reponse_time must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-reponse-time", rest_name="query-reponse-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)""",
})
self.__query_reponse_time = t
if hasattr(self, '_set'):
self._set()
def _unset_query_reponse_time(self):
self.__query_reponse_time = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="query-reponse-time", rest_name="query-reponse-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)
def _get_last_member_query_interval(self):
"""
Getter method for last_member_query_interval, mapped from YANG variable /igmp_snooping_state/igmp_l3_interfaces/igmp_l3_interfaces/last_member_query_interval (uint32)
YANG Description: Igmp last_member_query_interval
"""
return self.__last_member_query_interval
def _set_last_member_query_interval(self, | |
except exception.EntityNotFound:
LOG.debug("instance is not exist, no need to delete!",
instance=instance)
return
if provider_server:
try:
self.os_novaclient(context).delete(provider_server)
self.os_novaclient(context).check_delete_server_complete(
provider_server)
except exception.ResourceInError:
import time; time.sleep(30)
self.os_novaclient(context).check_delete_server_complete(
provider_server)
else:
LOG.error('Can not found server to delete.')
# raise exception_ex.ServerNotExistException(server_name=instance.display_name)
try:
# delete instance mapper
self.caa_db_api.instance_mapper_delete(context,
instance.uuid,
instance.project_id)
except Exception as ex:
LOG.error(_LE("instance_mapper_delete failed! ex = %s"), ex)
LOG.debug('success to delete instance: %s' % instance.uuid)
def _detach_volume(self, context, provider_volume):
if provider_volume.status == "available":
LOG.debug("provider volume(%s) has been detach", provider_volume.id)
return
attachment_id, server_id = self._get_attachment_id_for_volume(
provider_volume)
LOG.debug('server_id: %s' % server_id)
LOG.debug('submit detach task')
self.os_novaclient(context).detach_volume(server_id, provider_volume.id)
LOG.debug('wait for volume in available status.')
self.os_cinderclient(context).check_detach_volume_complete(
provider_volume)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
LOG.debug('start to detach volume.')
LOG.debug('instance: %s' % instance)
LOG.debug('connection_info: %s' % connection_info)
context = req_context.RequestContext(is_admin=True,
project_id=instance.project_id)
cascading_volume_id = connection_info['data']['volume_id']
provider_volume = self._get_provider_volume(context,
cascading_volume_id)
self._detach_volume(context, provider_volume)
LOG.debug("detach volume success!", instance=instance)
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
hostname = socket.gethostname()
return [hostname]
def _get_host_stats(self, hostname):
return {'vcpus': 999999, 'vcpus_used': 0, 'memory_mb': 999999,
'memory_mb_used': 0, 'local_gb': 99999999,
'local_gb_used': 0, 'host_memory_total': 99999999,
'disk_total': 99999999, 'host_memory_free': 99999999,
'disk_used': 0, 'hypervisor_type': 'fusionsphere',
'hypervisor_version': '5005000',
'hypervisor_hostname': hostname,
'cpu_info': '{"model": ["Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz"],'
'"vendor": ["Huawei Technologies Co., Ltd."], '
'"topology": {"cores": 16, "threads": 32}}',
'supported_instances': jsonutils.dumps(
[["i686", "xen", "uml"], ["x86_64", "xen", "uml"]]),
'numa_topology': None,}
def get_available_resource(self, nodename):
host_stats = self._get_host_stats(nodename)
supported_instances = list()
for one in jsonutils.loads(host_stats['supported_instances']):
supported_instances.append((one[0], one[1], one[2]))
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'], 'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] - host_stats[
'host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
'supported_instances': supported_instances,
'numa_topology': None,}
def get_info(self, instance):
STATUS = power_state.NOSTATE
context = req_context.RequestContext(is_admin=True,
project_id=instance.project_id)
server = self._get_provider_instance(context, instance)
LOG.debug('server: %s' % server)
if server:
instance_power_state = getattr(server, 'OS-EXT-STS:power_state')
STATUS = FS_POWER_STATE[instance_power_state]
LOG.debug('end to get_info: %s' % STATUS)
return hardware.InstanceInfo(
state=STATUS,
max_mem_kb=0,
mem_kb=0,
num_cpu=1)
def get_instance_macs(self, instance):
"""
No need to implement.
:param instance:
:return:
"""
pass
def get_volume_connector(self, instance):
return {'ip': CONF.my_block_storage_ip,
'initiator': 'fake',
'host': 'fakehost'}
def init_host(self, host):
pass
def power_off(self, instance, timeout=0, retry_interval=0):
LOG.debug('start to stop server: %s' % instance.uuid)
server = self._get_provider_instance(hybrid_instance=instance)
if not server:
LOG.debug('can not find sub os server for '
'instance: %s' % instance.uuid)
raise exception_ex.ServerNotExistException(
server_name=instance.display_name)
context = req_context.RequestContext(is_admin=True,
project_id=instance.project_id)
LOG.debug('server: %s status is: %s' % (server.id, server.status))
if server.status == vm_states.ACTIVE.upper():
LOG.debug('start to add stop task')
server.stop()
LOG.debug('submit stop task')
self.os_novaclient(context).check_stop_server_complete(server)
LOG.debug('stop server: %s success' % instance.uuid)
elif server.status == 'SHUTOFF':
LOG.debug('sub instance status is already STOPPED.')
LOG.debug('stop server: %s success' % instance.uuid)
return
else:
LOG.warning('server status is not in ACTIVE OR STOPPED,'
'can not do POWER_OFF operation')
raise exception_ex.ServerStatusException(status=server.status)
def power_on(self, context, instance, network_info,
block_device_info=None):
LOG.debug('start to start server: %s' % instance.uuid)
server = self._get_provider_instance(context, instance)
if not server:
LOG.debug('can not find sub os server for '
'instance: %s' % instance.uuid)
raise exception_ex.ServerNotExistException(instance.display_name)
LOG.debug('server: %s status is: %s' % (server.id, server.status))
if server.status == 'SHUTOFF':
LOG.debug('start to add start task')
server.start()
LOG.debug('submit start task')
self.os_novaclient(context).check_start_server_complete(server)
LOG.debug('start server: %s success' % instance.uuid)
elif server.status == vm_states.ACTIVE.upper():
LOG.debug('sub instance status is already ACTIVE.')
return
else:
LOG.warning('server status is not in ACTIVE OR STOPPED,'
'can not do POWER_ON operation')
raise exception_ex.ServerStatusException(status=server.status)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
LOG.debug('start to reboot server: %s' % instance.uuid)
server = self._get_provider_instance(context, instance)
if not server:
LOG.debug('can not find sub os server for '
'instance: %s' % instance.uuid)
raise exception_ex.ServerNotExistException(
server_name=instance.display_name)
LOG.debug('server: %s status is: %s' % (server.id, server.status))
if server.status == vm_states.ACTIVE.upper():
server.reboot(reboot_type)
self.os_novaclient(context).check_reboot_server_complete(server)
LOG.debug('reboot server: %s success' % instance.uuid)
elif server.status == 'SHUTOFF':
server.start()
self.os_novaclient(context).check_start_server_complete(server)
LOG.debug('reboot server: %s success' % instance.uuid)
else:
LOG.warning('server status is not in ACTIVE OR STOPPED,'
'can not do POWER_OFF operation')
raise exception_ex.ServerStatusException(status=server.status)
def provider_create_image(self, context, instance, image, metadata):
provider_instance = self._get_provider_instance(context,
instance)
provider_metadata = {
"disk_format": metadata.get("disk_format", "raw"),
"container_format": metadata.get("container_format", "bare")}
# provider create image
location, provider_image_id = self.os_novaclient(
context).create_image(
provider_instance, image['name'], provider_metadata)
try:
# wait create image success
self.os_novaclient(context).check_create_image_server_complete(
provider_instance)
# wait image status is active
self.os_glanceclient(context).check_image_active_complete(
provider_image_id)
except Exception as ex:
LOG.exception(_LE("create image failed! ex = %s"), ex)
with excutils.save_and_reraise_exception():
self.os_glanceclient(context).delete(provider_image_id)
return provider_instance, provider_image_id
def snapshot(self, context, instance, image_id, update_task_state):
snapshot = self._image_api.get(context, image_id)
image_format = None
metadata = self._create_snapshot_metadata(instance.image_meta,
instance,
image_format,
snapshot['name'])
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
provider_instance, provider_image_id = self.provider_create_image(
context, instance, snapshot, metadata)
try:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
try:
image = self.os_glanceclient(context).get_image(
provider_image_id)
LOG.debug("+++hw, image = %s", image)
if hasattr(image, "direct_url"):
direct_url = image.direct_url
if direct_url.startswith("swift+http://") or \
direct_url.startswith("http://") or \
direct_url.startswith("https://"):
metadata["location"] = direct_url
self._image_api.update(context, image_id, metadata,
purge_props=False)
else:
raise Exception()
else:
raise Exception()
except Exception:
metadata.pop("location", None)
# download from provider glance
LOG.debug("+++hw, begin to download image(%s)",
provider_image_id)
image_data = self.os_glanceclient(context).data(
provider_image_id)
LOG.debug("+++hw, image length = %s", len(image_data))
self._image_api.update(context,
image_id,
metadata,
image_data)
# create image mapper
values = {"provider_image_id": provider_image_id}
self.caa_db_api.image_mapper_create(context, image_id,
context.project_id,
values)
except Exception as ex:
LOG.exception(_LE("create image failed! ex = %s"), ex)
with excutils.save_and_reraise_exception():
self.os_glanceclient(context).delete(provider_image_id)
def get_provider_lxc_volume_id(self, context, instance, index):
lxc_volume_id = instance.system_metadata.get('provider_lxc_volume_id',
None)
if lxc_volume_id:
return lxc_volume_id
provider_instance_uuid = self._get_provider_instance_id(
context, instance.uuid)
if provider_instance_uuid is None:
return
volumes = self.os_novaclient(context).get_server_volumes(
provider_instance_uuid)
volumes = sorted(volumes, key=lambda volume: volume.device)
LOG.debug("+++hw, volumes = %s", volumes)
lxc_volume = None
if len(volumes) > index:
lxc_volume = volumes[index]
if lxc_volume is not None:
return lxc_volume.volumeId
def _spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
try:
LOG.debug('instance: %s' % instance)
LOG.debug('block device info: %s' % block_device_info)
flavor = instance.get_flavor()
LOG.debug('flavor: %s' % flavor)
sub_flavor_id = self._get_provider_flavor_id(context,
flavor.flavorid)
name = self._generate_provider_instance_name(instance.display_name,
instance.uuid)
LOG.debug('name: %s' % name)
image_ref = None
if instance.image_ref:
sub_image_id = self._get_provider_base_image_id(context)
try:
image_ref = self.os_glanceclient(context).get_image(
sub_image_id)
except Exception as ex:
LOG.exception(_LE("get image(%(image_id)s) failed, "
"ex = %(ex)s"), image_id=sub_image_id,
ex=ex)
raise
else:
image_ref = None
if instance.metadata:
metadata = copy.deepcopy(instance.metadata)
else:
metadata = {}
metadata = self._add_tag_to_metadata(metadata, instance.uuid)
LOG.debug('metadata: %s' % metadata)
app_security_groups = instance.security_groups
LOG.debug('app_security_groups: %s' % app_security_groups)
agent_inject_files = self._get_agent_inject_file(instance,
injected_files)
sub_bdm = self._transfer_to_sub_block_device_mapping_v2(
context, instance, block_device_info)
LOG.debug('sub_bdm: %s' % sub_bdm)
project_mapper = self._get_project_mapper(context,
context.project_id)
security_groups = self._get_provider_security_groups_list(
context, project_mapper)
nics = self._get_provider_nics(context, project_mapper)
provider_server = self.os_novaclient(context).create_server(
name, image_ref, sub_flavor_id, meta=metadata,
files=agent_inject_files,
reservation_id=instance.reservation_id,
security_groups=security_groups,
nics=nics,
availability_zone=project_mapper.get("availability_zone", None),
block_device_mapping_v2=sub_bdm)
LOG.debug('wait for server active')
try:
self.os_novaclient(context).check_create_server_complete(
provider_server)
except Exception as ex:
# rollback
with excutils.save_and_reraise_exception():
provider_server.delete()
LOG.debug('create server success.............!!!')
try:
# instance mapper
values = {'provider_instance_id': provider_server.id}
self.caa_db_api.instance_mapper_create(context,
instance.uuid,
instance.project_id,
values)
except Exception as ex:
LOG.exception(_LE("instance_mapper_create failed! ex = %s"), ex)
provider_server.delete()
raise
interface_list = self.os_novaclient(context).interface_list(
provider_server)
ips = []
for interface in interface_list:
ip = interface.fixed_ips[0].get('ip_address')
ips.append(ip)
instance_ips = ','.join(ips)
LOG.debug('luorui debug instance_ips %s' % instance_ips)
instance.system_metadata['instance_ips'] = instance_ips
instance.system_metadata['instance_id'] = provider_server.id
try:
instance.save()
except Exception:
pass
# raise exception_ex.InstanceSaveFailed(
# instance_uuid=instance.uuid)
except exception_ex.InstanceSaveFailed:
raise
except Exception as e:
LOG.error(
'Exception when spawn, exception: %s' % traceback.format_exc(e))
raise Exception(
'Exception when spawn, exception: %s' % traceback.format_exc(e))
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
Instance(
access_ip_v4=None,
access_ip_v6=None,
architecture=None,
auto_disk_config=False,
availability_zone='az31.shenzhen--aws',
cell_name=None,
cleaned=False,
config_drive='',
created_at=2015-08-31T02:44:36Z,
default_ephemeral_device=None,
default_swap_device=None,
deleted=False,
deleted_at=None,
disable_terminate=False,
display_description='server@daa5e17c-cb2c-4014-9726-b77109380ca6',
display_name='server@daa5e17c-cb2c-4014-9726-b77109380ca6',
ephemeral_gb=0,
ephemeral_key_uuid=None,
fault=<?>,
host='42085B38-683D-7455-A6A3-52F35DF929E3',
hostname='serverdaa5e17c-cb2c-4014-9726-b77109380ca6',
id=49,
image_ref='6004b47b-d453-4695-81be-cd127e23f59e',
info_cache=InstanceInfoCache,
instance_type_id=2,
kernel_id='',
key_data=None,
key_name=None,
launch_index=0,
launched_at=None,
launched_on='42085B38-683D-7455-A6A3-52F35DF929E3',
locked=False,
locked_by=None,
memory_mb=512,
metadata={},
node='h',
numa_topology=None,
os_type=None,
pci_devices=<?>,
power_state=0,
progress=0,
project_id='52957ad92b2146a0a2e2b3279cdc2c5a',
ramdisk_id='',
reservation_id='r-d1dkde4x',
root_device_name='/dev/sda',
root_gb=1,
scheduled_at=None,
security_groups=SecurityGroupList,
shutdown_terminate=False,
system_metadata={
image_base_image_ref='6004b47b-d453-4695-81be-cd127e23f59e',
image_container_format='bare',
image_disk_format='qcow2',
image_min_disk='1',
image_min_ram='0',
instance_type_ephemeral_gb='0',
instance_type_flavorid='1',
instance_type_id='2',
instance_type_memory_mb='512',
instance_type_name='m1.tiny',
instance_type_root_gb='1',
instance_type_rxtx_factor='1.0',
instance_type_swap='0',
instance_type_vcpu_weight=None,
instance_type_vcpus='1'
},
task_state='spawning',
terminated_at=None,
updated_at=2015-08-31T02:44:38Z,
user_data=u'<SANITIZED>,
user_id='ea4393b196684c8ba907129181290e8d',
uuid=92d22a62-c364-4169-9795-e5a34b5f5968,
vcpus=1,
vm_mode=None,
vm_state='building')
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
e.g.
{
u'status': u'active',
| |
' / ' + \
# var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR;\n')
code.append(indent(1) + 'data1_buf_size = LAYER_IN_NUM_T * LAYER_ROW_IL_FACTOR * LAYER_FILTER_S * LAYER_FILTER_S / %sDATA%d_FC_SIMD_FACTOR;\n' % (var_prefix, idx))
code.append(indent(1) + 'local_transfer_size = data' + str(idx) + '_buf_size * (' + str(desp['OP_ENGINE_NUM'][idx]) + ' / ' + var_prefix + 'DATA' + str(idx) + '_FC_SPLIT_FACTOR - engine_id) * ' + var_prefix + 'DATA' + str(idx) + '_FC_GROUP_FACTOR;\n\n')
code.append(indent(1) + 'while(more_to_forward){\n')
code.append('#pragma HLS PIPELINE II=1\n')
code.append(indent(2) + var_prefix + 'Data' + str(idx) + 'TransferChannelType data_read_from_fifo = fifo_transfer_in.read();\n')
code.append(indent(2) + 'bool data_is_to_buffer;\n')
code.append(indent(2) + 'bool data_is_to_forward;\n')
code.append(indent(2) + 'unsigned int feeder_id = data_read_from_fifo.feeder_id;\n')
code.append(indent(2) + 'data_is_to_buffer = LAST_ENGINE || (!LAST_ENGINE && feeder_id == engine_id);\n');
code.append(indent(2) + 'data_is_to_forward = !LAST_ENGINE && (feeder_id != engine_id);\n')
# code.append(indent(2) + 'if (!LAST_ENGINE){\n')
# code.append(indent(3) + 'if (data_is_to_forward){\n')
# code.append(indent(4) + 'fifo_transfer_out.write(data_read_from_fifo);\n')
# code.append(indent(3) + '}\n')
# code.append(indent(2) + '}\n')
buf_size = desp['DFC_BUF_SIZE'][idx] / desp['FC_SIMD_FACTOR'][idx]
width = cal_width(buf_size)
code.append(indent(2) + 'ap_uint<' + str(width) + '> buffer_ind_to_write_to_buffer = buffer_write_counter;\n\n')
code.append(indent(2) + 'if (data_is_to_buffer){\n')
code.append(indent(3) + 'buffer[buffer_gs_id][buffer_ind_to_write_to_buffer] = data_read_from_fifo;\n')
code.append(indent(3) + 'buffer_write_counter++;\n')
code.append(indent(3) + 'if (buffer_write_counter == data' + str(idx) + '_buf_size){\n')
code.append(indent(4) + 'buffer_write_counter = 0;\n')
code.append(indent(4) + 'buffer_gs_id++;\n')
code.append(indent(4) + 'if (buffer_gs_id == ' + var_prefix + 'DATA' + str(idx) + '_FC_GROUP_FACTOR){\n')
code.append(indent(5) + 'buffer_gs_id = 0;\n')
code.append(indent(5) + 'more_to_write_to_buffer = false;\n')
code.append(indent(4) + '}\n')
code.append(indent(3) + '}\n')
code.append(indent(2) + '}\n')
code.append(indent(2) + 'transfer_counter++;\n')
code.append(indent(2) + 'if (transfer_counter == local_transfer_size){\n')
code.append(indent(3) + 'transfer_counter = 0;\n')
code.append(indent(3) + 'more_to_forward = false;\n')
code.append(indent(2) + '}\n')
code.append(indent(1) + '}\n\n')
code.append('}\n\n')
idx += 1
return code
def df_feed(desp, config):
code = []
var_prefix = 'U%s' %(desp['KERNEL_ID']) + '_'
idx = 0
for op_name in desp['OP_NAME']:
if idx == 0:
code.append('void ' + var_prefix + 'Data' + str(idx) + 'FeedData0(\n')
code.append(indent(1) + var_prefix + 'Data' + str(idx) + 'TransferChannelType buffer[' + \
var_prefix + 'DATA' + str(idx) + '_FC_GROUP_FACTOR][' + var_prefix + 'DATA' + str(idx) + \
'_BUF_SIZE/' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR],\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'PEChannelType> &fifo_feed_' + str(gs) + ',\n')
#code.append(indent(1) + 'unsigned int initial_round,\n')
code.append(indent(1) + 'uint LAYER_IN_NUM_T,\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_H_T,\n')
code.append(indent(1) + 'uint LAYER_FILTER_S,\n')
code.append(indent(1) + 'uint LAYER_STRIDE,\n')
code.append(indent(1) + 'uint LAYER_ROW_IL_FACTOR,\n')
code.append(indent(1) + 'uint LAYER_COL_IL_FACTOR\n')
code.append('){\n')
code.append('#pragma HLS INLINE off\n')
code.append(indent(1) + 'bool more_to_feed_to_sys_arr = true;\n\n')
for counter in desp['DF_FEED_COUNTER']:
width = counter['WIDTH']
var = counter['VARIABLE']
bound_lower = counter['BOUND'][0]
bound_upper = counter['BOUND'][1]
str_tmp = 'ap_uint<' + str(width) + '> ' + var + ' = ' + str(bound_lower) + ';'
code.append(indent(1) + str_tmp + '\n')
code.append('\n')
# code.append(indent(1) + var_prefix + 'Data' + str(idx) + 'TransferChannelType tmp = buffer[0][0];\n')
# code.append(indent(1) + 'unsigned int FILTER_S = tmp.FILTER_S;\n\n')
val = desp['PARAMETERS']['IN_IMG_H_T']
w = cal_width(val)
code.append(indent(1) + 'ap_uint<%d> c0_counter_bound;\n' % (w))
code.append(indent(1) + 'if (LAYER_STRIDE == 1){\n')
code.append(indent(2) + 'c0_counter_bound = LAYER_IN_IMG_H_T;\n')
code.append(indent(1) + '} else if (LAYER_STRIDE == 2){\n')
code.append(indent(2) + 'c0_counter_bound = LAYER_IN_IMG_H_T / 2;\n')
code.append(indent(1) + '}\n\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
code.append(indent(1) + 'ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH*' + var_prefix + \
'SIMD_FACTOR> sel_tmp_' + str(gs) + '[' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR/' + \
var_prefix + 'SIMD_FACTOR];\n')
code.append('#pragma HLS ARRAY_PARTITION variable=sel_tmp_' + str(gs) + ' complete dim=1\n')
code.append('\n')
code.append(indent(1) + 'while(more_to_feed_to_sys_arr){\n')
code.append('#pragma HLS PIPELINE II=1\n')
buf_size = desp['DFC_BUF_SIZE'][idx]
buf_size_width = int(cal_width(buf_size))
code.append(indent(2) + 'ap_uint<' + str(buf_size_width) + '> buffer_ind_to_feed_to_sys_arr;\n')
code.append(indent(2) + 'ap_uint<%d> w_idx, h_idx;\n' % (buf_size_width))
code.append(indent(2) + 'if (LAYER_STRIDE == 1){\n')
code.append(indent(3) + 'w_idx = c2_counter + c4_counter;\n')
code.append(indent(3) + 'h_idx = c0_counter + c3_counter;\n')
code.append(indent(2) + '} else if (LAYER_STRIDE == 2){\n')
code.append(indent(3) + 'w_idx = c2_counter * 2 + 1 + c4_counter;\n')
code.append(indent(3) + 'h_idx = c0_counter * 2 + 1 + c3_counter;\n')
code.append(indent(2) + '}\n')
code.append(indent(2) + 'ap_uint<%d> w_bound = LAYER_COL_IL_FACTOR * LAYER_STRIDE + LAYER_FILTER_S - 1;\n' % (buf_size_width))
code.append(indent(2) + 'ap_uint<%d> h_bound = LAYER_IN_IMG_H_T + LAYER_FILTER_S - 1;\n' % (buf_size_width))
code.append(indent(2) + 'buffer_ind_to_feed_to_sys_arr = (w_idx + h_idx * w_bound + c5_counter * %sSIMD_FACTOR / %sDATA0_FC_SIMD_FACTOR * h_bound * w_bound) * %sDATA0_FC_SIMD_FACTOR + c5_counter * %sSIMD_FACTOR %% %sDATA0_FC_SIMD_FACTOR;\n' % (var_prefix, var_prefix, var_prefix, var_prefix, var_prefix))
# code.append(indent(2) + 'buffer_ind_to_feed_to_sys_arr = (c2_counter * LAYER_STRIDE + (LAYER_STRIDE - 1) + c4_counter) * %sDATA0_FC_SIMD_FACTOR + (c0_counter * LAYER_STRIDE + (LAYER_STRIDE - 1) + c3_counter) * (LAYER_COL_IL_FACTOR * LAYER_STRIDE + LAYER_FILTER_S - 1) * %sDATA0_FC_SIMD_FACTOR + c5_counter * %sSIMD_FACTOR / %sDATA0_FC_SIMD_FACTOR * (LAYER_IN_IMG_H_T + LAYER_FILTER_S - 1) * (LAYER_COL_IL_FACTOR * LAYER_STRIDE + LAYER_FILTER_S - 1) * %sDATA0_FC_SIMD_FACTOR + c5_counter * %sSIMD_FACTOR %% %sDATA0_FC_SIMD_FACTOR;\n' % (var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix))
code.append('\n')
code.append(indent(2) + 'ap_uint<%d> wide_index = buffer_ind_to_feed_to_sys_arr / ' % (buf_size_width) + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR;\n')
code.append(indent(2) + 'ap_uint<%d> wide_offset = buffer_ind_to_feed_to_sys_arr %% ' % (buf_size_width) + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR;\n\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
code.append(indent(2) + var_prefix + 'Data' + str(idx) + 'TransferChannelType buf_data_' + str(gs) + ' = buffer[' + str(gs) + '][wide_index];\n')
code.append(indent(2) + 'ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH*' + var_prefix + \
'DATA' + str(idx) + '_FC_SIMD_FACTOR> wide_data_' + str(gs) + ' = buf_data_' + str(gs) + \
'.data;\n')
code.append(indent(2) + 'ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH*' + var_prefix + \
'SIMD_FACTOR> data_to_feed_' + str(gs) + ';\n')
code.append(indent(2) + 'for (int s = 0; s < ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR / ' + var_prefix + 'SIMD_FACTOR; s++){\n')
code.append('#pragma HLS UNROLL\n')
code.append(indent(3) + 'sel_tmp_' + str(gs) + '[s] = wide_data_' + str(gs) + '(' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'SIMD_FACTOR-1, 0);\n')
code.append(indent(3) + 'wide_data_' + str(gs) + ' = wide_data_' + str(gs) + ' >> (' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'SIMD_FACTOR);\n')
code.append(indent(2) + '}\n')
code.append(indent(2) + 'data_to_feed_' + str(gs) + ' = sel_tmp_' + str(gs) + '[wide_offset / ' + var_prefix + 'SIMD_FACTOR];\n')
code.append('\n')
code.append(indent(2) + var_prefix + 'Data' + str(idx) + 'PEChannelType fifo_data_to_feed_' + str(gs) + ';\n')
code.append(indent(2) + 'fifo_data_to_feed_' + str(gs) + ' = ' + var_prefix + 'Data' + str(idx) + \
'PEChannelType(data_to_feed_' + str(gs) + ', buf_data_' + str(gs) + '.new_pair, buf_data_' + str(gs) + '.last_pair, buf_data_' + str(gs) + '.FILTER_S);\n')
code.append(indent(2) + 'fifo_feed_' + str(gs) + '.write(fifo_data_to_feed_' + str(gs) + ');\n\n')
code.append(indent(2) + '// counter logic\n')
# code_block = generate_df_counter_loops(desp, config)
# for codeline in code_block:
# code.append(indent(2) + codeline)
code.append(indent(2) + 'c0_counter++;\n')
# code.append(indent(2) + 'if (c0_counter == LAYER_IN_IMG_H_T / LAYER_STRIDE){\n')
code.append(indent(2) + 'if (c0_counter == c0_counter_bound){\n')
code.append(indent(3) + 'c0_counter = 0;\n')
code.append(indent(3) + 'c1_counter++;\n')
code.append(indent(3) + 'if (c1_counter == LAYER_ROW_IL_FACTOR){\n')
code.append(indent(4) + 'c1_counter = 0;\n')
code.append(indent(4) + 'c2_counter++;\n')
code.append(indent(4) + 'if (c2_counter == LAYER_COL_IL_FACTOR){\n')
code.append(indent(5) + 'c2_counter = 0;\n')
code.append(indent(5) + 'c3_counter++;\n')
code.append(indent(5) + 'if (c3_counter == LAYER_FILTER_S){\n')
code.append(indent(6) + 'c3_counter = 0;\n')
code.append(indent(6) + 'c4_counter++;\n')
code.append(indent(6) + 'if (c4_counter == LAYER_FILTER_S){\n')
code.append(indent(7) + 'c4_counter = 0;\n')
code.append(indent(7) + 'c5_counter++;\n')
code.append(indent(7) + 'if (c5_counter == LAYER_IN_NUM_T / %sSIMD_FACTOR){\n' % (var_prefix))
code.append(indent(8) + 'c5_counter = 0;\n')
code.append(indent(8) + 'more_to_feed_to_sys_arr = false;\n')
code.append(indent(7) + '}\n')
code.append(indent(6) + '}\n')
code.append(indent(5) + '}\n')
code.append(indent(4) + '}\n')
code.append(indent(3) + '}\n')
code.append(indent(2) + '}\n')
code.append(indent(1) + '}\n')
code.append('}\n\n')
elif idx == 1:
code.append('void ' + var_prefix + 'Data' + str(idx) + 'FeedData0(\n')
code.append(indent(1) + var_prefix + 'Data' + str(idx) + 'TransferChannelType buffer[' + \
var_prefix + 'DATA' + str(idx) + '_FC_GROUP_FACTOR][' + var_prefix + 'DATA' + str(idx) + \
'_BUF_SIZE/' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR],\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'PEChannelType> &fifo_feed_' + str(gs) + ',\n')
# code.append(indent(1) + 'unsigned int initial_round,\n')
code.append(indent(1) + 'uint LAYER_IN_NUM_T,\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_H_T,\n')
code.append(indent(1) + 'uint LAYER_FILTER_S,\n')
code.append(indent(1) + 'uint LAYER_STRIDE,\n')
code.append(indent(1) + 'uint LAYER_ROW_IL_FACTOR,\n')
code.append(indent(1) + 'uint LAYER_COL_IL_FACTOR\n')
code.append('){\n')
code.append('#pragma HLS INLINE off\n')
code.append(indent(1) + 'bool more_to_feed_to_sys_arr = true;\n\n')
for counter in desp['DF_FEED_COUNTER']:
width = | |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pfif_validator.py"""
import unittest
from StringIO import StringIO
import os
import sys
from pfif_validator import PfifValidator
import pfif_validator # to test main
import datetime
import utils
from utils import Message
import tests.pfif_xml as PfifXml
class ValidatorTests(unittest.TestCase):
"""Tests each validation function in pfif_validator.py"""
EXPIRED_TIME = datetime.datetime(1999, 3, 1)
PRINT_VALIDATOR_OUTPUT = True
# Set Up
def setUp(self): # pylint: disable=C0103
"""Some of the tests will run code that prints stuff out. This prevents it
from printing next to the clean dots from the unit tests."""
if not ValidatorTests.PRINT_VALIDATOR_OUTPUT:
sys.stdout = open(os.devnull, "w")
@staticmethod
def set_up_validator(xml):
"""Creates a PFIF validator from XML"""
pfif_file = StringIO(xml)
return PfifValidator(pfif_file)
# printing
def test_printing(self):
"""Tests that each of the printing options in set_printing_options changes
the behavior of print_errors"""
# set up the messages to be printed; the XML file here will not be used for
# any tests. It's just to get the validator initialized properly.
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
lines = []
for i in range(1, 12):
lines.append('ZZZ ' + str(i))
messages = []
messages.append(Message("Message 1", is_error=True, xml_line_number=11,
xml_text="Text", person_record_id="Person",
note_record_id="Note"))
messages.append(Message("Message 2", is_error=False))
messages.append(Message("Message 3"))
# With no errors or warnings, nothing should print
output = validator.validator_messages_to_str(messages, show_errors=False,
show_warnings=False)
self.assertEqual(len(output), 0)
# with only errors on, only errors should print
output = validator.validator_messages_to_str(messages, show_warnings=False,
show_line_numbers=False,
show_record_ids=False,
show_xml_text=False,
show_full_line=False)
self.assertNotEqual(output.find("Message 1"), -1)
self.assertEqual(output.find("Message 2"), -1)
# the default value of is_error should be True, so Message 3 should print
self.assertNotEqual(output.find("Message 3"), -1)
# with warnings on, warnings should print
output = validator.validator_messages_to_str(
messages, show_line_numbers=False, show_record_ids=False,
show_xml_text=False, show_full_line=False)
self.assertNotEqual(output.find("Message 2"), -1)
# line numbers, xml text, and record IDs should not print with them off and
# should print with them on
self.assertEqual(output.find("11"), -1)
output = validator.validator_messages_to_str(
messages, show_line_numbers=True, show_record_ids=False,
show_xml_text=False, show_full_line=False)
self.assertNotEqual(output.find("11"), -1)
self.assertEqual(output.find("Text"), -1)
output = validator.validator_messages_to_str(
messages, show_record_ids=False, show_xml_text=True,
show_full_line=False)
self.assertNotEqual(output.find("Text"), -1)
self.assertEqual(output.find("Person"), -1)
self.assertEqual(output.find("Note"), -1)
output = validator.validator_messages_to_str(
messages, show_record_ids=True, show_full_line=False)
self.assertNotEqual(output.find("Person"), -1)
self.assertNotEqual(output.find("Note"), -1)
self.assertEqual(output.find("ZZZ 11"), -1)
output = validator.validator_messages_to_str(
messages, show_full_line=True, xml_lines=lines)
self.assertNotEqual(output.find("ZZZ 11"), -1)
# is_html should output a div somewhere
self.assertEqual(output.find("div"), -1)
output = validator.validator_messages_to_str(
messages, is_html=True, xml_lines=lines)
self.assertNotEqual(output.find("div"), -1)
# validate_root_has_child
def test_root_has_child(self):
"""validate_root_has_child should return an empty list if the root node has
at least one child"""
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
self.assertEqual(len(validator.validate_root_has_child()), 0)
def test_root_lacks_child(self):
"""validate_root_has_child should return a list with a message if the root
node does not have at least one child"""
validator = self.set_up_validator(PfifXml.XML_ROOT_LACKS_CHILD)
self.assertNotEqual(len(validator.validate_root_has_child()), 0)
# validate_root_has_mandatory_children
def test_root_has_mandatory_children(self):
"""validate_root_has_mandatory_children should return an empty list if one
of the children is a person"""
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
self.assertEqual(len(validator.validate_root_has_mandatory_children()), 0)
def test_root_lacks_mandatory_children(self):
"""validate_root_has_mandatory_children should return a list with a message
if the only children are not notes or persons"""
validator = self.set_up_validator(PfifXml.XML_ROOT_HAS_BAD_CHILD)
self.assertNotEqual(
len(validator.validate_root_has_mandatory_children()), 0)
def test_root_has_note_child_11(self):
"""validate_root_has_mandatory_children should return a list with a message
if the only children are notes and the version is 1.1"""
validator = self.set_up_validator(PfifXml.XML_TOP_LEVEL_NOTE_11)
self.assertNotEqual(
len(validator.validate_root_has_mandatory_children()), 0)
def test_root_has_note_child_12(self):
"""validate_root_has_mandatory_children should return an empty list if the
only children are notes and the version is greater than 1.1"""
validator = self.set_up_validator(PfifXml.XML_TOP_LEVEL_NOTE_12)
self.assertEqual(len(validator.validate_root_has_mandatory_children()), 0)
# validate_has_mandatory_children
def test_note_has_mandatory_children(self):
"""validate_has_mandatory_children should return an empty list if it is
given notes with all mandatory children"""
validator = self.set_up_validator(PfifXml.XML_NOTES_WITH_CHILDREN)
self.assertEqual(len(validator.validate_note_has_mandatory_children()), 0)
def test_note_has_no_mandatory_children(self):
"""validate_has_mandatory_children should return a list with nine missing
children when given one child of a person with no children and one top level
note (which also must have a person_record_id) with no children."""
validator = self.set_up_validator(PfifXml.XML_NOTES_NO_CHILDREN)
self.assertEqual(len(validator.validate_note_has_mandatory_children()), 9)
def test_person_has_mandatory_children_11(self):
"""validate_has_mandatory_children should return an empty list if it is
given a version 1.1 person with all mandatory children"""
validator = self.set_up_validator(PfifXml.XML_PERSON_WITH_CHILDREN_11)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 0)
def test_person_has_mandatory_children_13(self):
"""validate_has_mandatory_children should return an empty list if it is
given a version 1.3 person with all mandatory children"""
validator = self.set_up_validator(PfifXml.XML_PERSON_WITH_CHILDREN_13)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 0)
def test_person_has_no_mandatory_children_11(self):
"""validate_has_mandatory_children should return a list with three missing
children when given a version 1.1 person with no children"""
validator = self.set_up_validator(PfifXml.XML_11_SMALL)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 3)
def test_person_has_no_mandatory_children_13(self):
"""validate_has_mandatory_children should return a list with three missing
children when given a version 1.3 person with no children"""
validator = self.set_up_validator(PfifXml.XML_PERSON_NO_CHILDREN_13)
self.assertEqual(len(validator.validate_person_has_mandatory_children()), 3)
# validate_fields_have_correct_format
def test_no_fields_exist(self):
"""validate_fields_have_correct_format should return an empty list when
passed a tree with no subelements of person or note because no nodes are
improperly formatted."""
validator = self.set_up_validator(PfifXml.XML_PERSON_NO_CHILDREN_13)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
validator = self.set_up_validator(PfifXml.XML_NOTES_NO_CHILDREN)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
def test_all_11_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return an empty list when
passed a tree with all 1.1 elements in the correct formats."""
validator = self.set_up_validator(PfifXml.XML_11_FULL)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
#TODO(samking): test that non-ascii characters should be rejected
def test_no_11_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return a list with every
subnode of person and note when every such subnode is of an incorrect
format. This tests all fields in version 1.1 for which incorrect input is
possible."""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_FORMAT_11)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 23)
def test_all_12_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return an empty list when
presented with a document where all fields have the correct format. This
tests all fields introduced or changed in 1.2; it does not test fields that
were unchanged from 1.1."""
validator = self.set_up_validator(PfifXml.XML_FULL_12)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
def test_no_12_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return a list with every
element presented to it when all fields have an incorrect format. This
tests all fields introduced or changed in 1.2, except ones that are always
accepted; it does not test fields that were unchanged from 1.1."""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_FORMAT_12)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 12)
def test_all_13_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return an empty list when
presented with a document where all fields have the correct format. This
tests all fields introduced or changed in 1.3; it does not test fields that
were unchanged from 1.1 and 1.2."""
validator = self.set_up_validator(PfifXml.XML_CORRECT_FORMAT_13)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 0)
def test_no_13_fields_have_correct_format(self):
"""validate_fields_have_correct_format should return a list with every
element presented to it when all fields have an incorrect format. This
tests all fields introduced or changed in 1.3, except ones that are always
accepted; it does not test fields that were unchanged from 1.1 and 1.2."""
validator = self.set_up_validator(PfifXml.XML_INCORRECT_FORMAT_13)
self.assertEqual(len(validator.validate_fields_have_correct_format()), 1)
# validate_unique_id
def test_person_ids_are_unique(self):
"""validate_person_ids_are_unique should return an empty list when all
person ids are unique"""
validator = self.set_up_validator(PfifXml.XML_UNIQUE_PERSON_IDS)
self.assertEqual(len(validator.validate_person_ids_are_unique()), 0)
def test_note_ids_are_unique(self):
"""validate_note_ids_are_unique should return an empty list when all note
ids are unique"""
validator = self.set_up_validator(PfifXml.XML_UNIQUE_NOTE_IDS)
self.assertEqual(len(validator.validate_note_ids_are_unique()), 0)
def test_person_ids_are_not_unique(self):
"""validate_person_ids_are_unique should return a list with all non-unique
person ids when there are non-unique person ids"""
validator = self.set_up_validator(PfifXml.XML_DUPLICATE_PERSON_IDS)
self.assertEqual(len(validator.validate_person_ids_are_unique()), 2)
def test_note_ids_are_not_unique(self):
"""validate_person_ids_are_unique should return a list with all non-unique
note ids when there are non-unique note ids"""
validator = self.set_up_validator(PfifXml.XML_DUPLICATE_NOTE_IDS)
self.assertEqual(len(validator.validate_note_ids_are_unique()), 2)
# validate_notes_belong_to_persons
def test_notes_belong_to_people(self):
"""validate_notes_belong_to_persons should return an empty list if all top
level notes have a person_record_id and all notes inside persons have no
person_record_id or the same person_record_id as the person."""
validator = self.set_up_validator(PfifXml.XML_NOTES_BELONG_TO_PEOPLE)
self.assertEqual(len(validator.validate_notes_belong_to_persons()), 0)
def test_notes_do_not_belong_to_people(self):
"""validate_notes_belong_to_persons should return a list with all top level
notes without a person_record_id and person_record_ids for notes that are
under a person with a person_record_id that doesn't match the person"""
validator = self.set_up_validator(PfifXml.XML_NOTES_WITHOUT_PEOPLE)
self.assertEqual(len(validator.validate_notes_belong_to_persons()), 2)
# validate_field_order
def test_correct_field_order_11(self):
"""validate_person_field_order and validate_note_field_order should return
a empty lists if all elements in all persons and notes are in the correct
order"""
validator = self.set_up_validator(PfifXml.XML_11_FULL)
self.assertEqual(len(validator.validate_person_field_order()), 0)
self.assertEqual(len(validator.validate_note_field_order()), 0)
def test_omitting_fields_is_okay_11(self):
"""validate_person_field_order and validate_note_field_order should return
a empty lists if all elements in all persons and notes are in the correct
order, even if some elements are omitted (ie, 1,2,4 is in order even though
3 is omitted)"""
validator = self.set_up_validator(PfifXml.XML_MISSING_FIELDS_11)
self.assertEqual(len(validator.validate_person_field_order()), 0)
self.assertEqual(len(validator.validate_note_field_order()), 0)
def test_incorrect_field_order_11(self):
"""validate_person_field_order and validate_note_field_order should return
the | |
#!/usr/bin/env python
#-*-coding:utf8-*-
#this script is aimed to aggregate SNPs into haplotype blocks to get BAF matrices within each cell.
#Author: <NAME>
# TODO: output region file to out_dir
import os
import sys
import getopt
import gzip
from ..utils.base import assert_e, assert_n, log
from ..utils.region import load_regions
from .config import APP
def __format_chrom(chrom):
"""
@abstract Format chrom name to keep the chroms used in this script in the same style
@param chrom Chrom name [str]
@return Formatted chrom name [str]
"""
return chrom[3:] if chrom.startswith("chr") else chrom
def __load_snp_mtx(fn):
"""
@abstract Load data from SNP AD/DP mtx
@param fn Path to mtx file [str]
@return A tuple of four elements if success, None otherwise [tuple]:
- number of snps [int]
- number of cells [int]
- number of records [int]
- a dict of {snp_idx:{cell_idx:depth, }} pairs [dict]
"""
assert_e(fn, "snp mtx")
cols = []
if os.path.splitext(fn)[1] in (".gz", ".gzip"):
cols = [line[:-1].split("\t")[:3] for line in gzip.open(fn, "rt")]
else:
cols = [line[:-1].split("\t")[:3] for line in open(fn, "r")]
if len(cols) < 3 or len(cols[2]) < 3:
return None
nsnp, ncell, nrecord = [int(i) for i in cols[2][:3]]
snp_cell = {}
for c in cols[3:]:
assert len(c) >= 3, "too few columns in snp mtx"
depth = int(c[2])
if depth > 0:
snp_cell.setdefault(c[0], {})[c[1]] = depth
return (nsnp, ncell, nrecord, snp_cell)
def __load_phase_from_tsv(fn):
"""
@abstract Load data from phase file of tsv format
@param fn Path to phase file [str]
@return A tuple of three elements if success, None otherwise [tuple]:
- number of total snps [int]
- number of valid snps whose one allele is 0 and the other is 1 [int]
- a dict of {chrom:[(pos, allele1, allele2, snp_idx),]} pairs [dict]
"""
assert_e(fn, "phase file")
cols = []
if os.path.splitext(fn)[1] in (".gz", ".gzip"):
cols = [line[:-1].split("\t")[:4] for line in gzip.open(fn, "rt")]
else:
cols = [line[:-1].split("\t")[:4] for line in open(fn, "rt")]
phases = {}
i, j = 0, 0
for c in cols:
i += 1
assert len(c) >= 3, "too few columns in phase file '%s'" % fn
sep = ""
if "|" in c[2]: sep = "|"
elif "/" in c[2]: sep = "/"
else: return None
a1, a2 = c[2].split(sep)[:2]
if (a1 == "0" and a2 == "1") or (a2 == "1" and a1 == "0"):
chrom = __format_chrom(c[0])
phases.setdefault(chrom, []).append((int(c[1]), a1, a2, str(i)))
j += 1
return (i, j, phases)
def __load_phase_from_vcf(fn):
"""
@abstract Load data from phase file of vcf format
@param fn Path to phase file [str]
@return A tuple of three elements if success, None otherwise [tuple]:
- number of total snps [int]
- number of valid snps whose one allele is 0 and the other is 1 [int]
- a dict of {chrom:[(pos, allele1, allele2, snp_idx),]} pairs [dict]
"""
assert_e(fn, "phase file")
fp = gzip.open(fn, "rt") if os.path.splitext(fn)[1] in (".gz", ".gzip") else \
open(fn, "r")
phases = {}
i, j = 0, 0
for line in fp:
if line[0] in ("#", "\n"):
continue
i += 1
parts = line[:-1].split("\t")
assert len(parts) >= 10, "too few columns in vcf '%s'" % fn
fields = parts[8].split(":")
assert "GT" in fields, "GT not in vcf '%s'" % fn
idx = fields.index("GT")
values = parts[9].split(":")
assert len(values) == len(fields), \
"length of fields should be the same with length of values in vcf '%s'" % fn
gt = values[idx]
sep = ""
if "|" in gt: sep = "|"
elif "/" in gt: sep = "/"
else: return None
a1, a2 = gt.split(sep)[:2]
if (a1 == "0" and a2 == "1") or (a1 == "1" and a2 == "0"):
chrom = __format_chrom(parts[0])
phases.setdefault(chrom, []).append((int(parts[1]), a1, a2, str(i)))
j += 1
fp.close()
return (i, j, phases)
def __load_phase(fn):
"""
@abstract Load data from phase file
@param fn Path to phase file [str]
@return A tuple of three elements if success, None otherwise [tuple]:
- number of total snps [int]
- number of valid snps whose one allele is 0 and the other is 1 [int]
- a dict of {chrom:[(pos, allele1, allele2, snp_idx),]} pairs [dict]
"""
assert_e(fn, "phase file")
if fn.endswith(".vcf") or fn.endswith(".vcf.gz"):
return __load_phase_from_vcf(fn)
else:
return __load_phase_from_tsv(fn)
def __load_region(fn):
"""
@abstract Load data from region file.
@param fn Path to region file [str]
@return A tuple of two elements if success, None otherwise [tuple]:
- number of blocks [int]
- a dict of {chrom:[(start, end, reg_idx),]} pairs [dict]
"""
assert_e(fn, "region file")
suffix = os.path.splitext(fn[:-3])[1] if fn.endswith(".gz") else \
os.path.splitext(fn)[1]
assert suffix.lower() in (".bed", ".tsv", ".gff"), "region type should be bed|tsv|gff"
reg_type = suffix[1:].lower()
reg_list = load_regions(fn, reg_type)
if reg_list is None:
return None
regions = {}
for i, r in enumerate(reg_list): # here enumerate is efficient as region file is usually small.
chrom = __format_chrom(r.chrom)
regions.setdefault(chrom, []).append((r.start, r.end, i + 1))
return (i + 1, regions)
def __get_block_cell(snp_ad, snp_dp, phase, blocks):
"""
@abstract Get block-cell AD & DP matrices.
@param snp_ad SNP AD mtx, A dict of {snp_idx:{cell_idx:depth, }} pairs [dict]
@param snp_dp SNP DP mtx, A dict of {snp_idx:{cell_idx:depth, }} pairs [dict]
@param phase A dict of {chrom:[(pos, allele1, allele2, snp_idx),]} pairs,
every array of chr should be sorted by pos already [dict]
@param blocks A dict of {chrom:[(start, end, reg_idx),]} pairs,
every array of chr should be sorted by start pos already [dict]
@return A dict of {reg_idx:{cell_idx:{ad:ad_depth, dp:dp_depth}, }} pairs if success, None otherwise [dict]
@note 1. SNP AD & DP mtx should have been checked to make sure each snp-cell
record whose depth > 0 in AD should also exist in DP.
2. The two alleles of each snp should be one is 0 and the other is 1.
"""
if not (snp_ad and snp_dp and phase and blocks):
return None
block_cell = {}
for chrom, reg_dat in blocks.items():
ph_dat = phase.get(chrom, [])
if not ph_dat:
continue
ph_idx = 0
nph = len(ph_dat)
for r in reg_dat:
start, end, reg_idx = r[:3]
_idx = ph_idx - 1
while _idx >= 0:
pos, allele1, allele2, snp_idx = ph_dat[_idx][:4]
if pos < start:
break
elif pos <= end:
_idx -= 1
dp_dat = snp_dp.get(snp_idx, {})
if not dp_dat:
continue
ad_dat = snp_ad.get(snp_idx, {})
for cell_idx, dp_depth in dp_dat.items():
ad_depth = ad_dat.get(cell_idx, 0)
block_cell.setdefault(reg_idx, {}).setdefault(cell_idx, {"ad":0, "dp":0})
block_cell[reg_idx][cell_idx]["ad"] += ad_depth if allele1 == "1" else dp_depth - ad_depth
block_cell[reg_idx][cell_idx]["dp"] += dp_depth
else:
_idx -= 1
while ph_idx < nph: # donot use range() here to save memory
pos, allele1, allele2, snp_idx = ph_dat[ph_idx][:4]
if pos > end:
break
elif pos < start:
ph_idx += 1
continue
else: # this snp belongs to the block
ph_idx += 1
dp_dat = snp_dp.get(snp_idx, {})
if not dp_dat:
continue
ad_dat = snp_ad.get(snp_idx, {})
for cell_idx, dp_depth in dp_dat.items():
ad_depth = ad_dat.get(cell_idx, 0)
block_cell.setdefault(reg_idx, {}).setdefault(cell_idx, {"ad":0, "dp":0})
block_cell[reg_idx][cell_idx]["ad"] += ad_depth if allele1 == "1" else dp_depth - ad_depth
block_cell[reg_idx][cell_idx]["dp"] += dp_depth
return block_cell
def __output_block_mtx(block_cell, nblock, ncell, block_ad_file, block_dp_file, _gzip = 0):
"""
@abstract Output block-cell AD & DP matrices to mtx file
@param block_cell A dict of {reg_idx:{cell_idx:{ad:ad_depth, dp:dp_depth}, }} pairs [dict]
@param nblock Number of total blocks [int]
@param ncell Number of total cells [int]
@param block_ad_file Path to block AD mtx file [str]
@param block_dp_file Path to block DP mtx file [str]
@param _gzip If the output files need to be gziped: 0, no; 1, yes [int]
@return 0 if success, -1 otherwise [int]
"""
if not (block_cell and block_ad_file and block_dp_file):
return -1
# count number of total records in block AD & DP matrices
nrec_ad, nrec_dp = 0, 0
for reg_idx, reg_dat in block_cell.items():
for cell_idx, cell_dat in reg_dat.items():
if cell_dat["ad"] > 0: nrec_ad += 1
if cell_dat["dp"] > 0: nrec_dp += 1
# output mtx header
ad_fp = gzip.open(block_ad_file, "wb") if _gzip else open(block_ad_file, "w")
dp_fp = gzip.open(block_dp_file, "wb") if _gzip else open(block_dp_file, "w")
header = "%%MatrixMarket matrix coordinate integer general\n%"
ad_fp.write("%s\n%d\t%d\t%d\n" % (header, nblock, ncell, nrec_ad))
dp_fp.write("%s\n%d\t%d\t%d\n" | |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define activation functions of neural network
from ...fluid.layers import brelu #DEFINE_ALIAS
# from ...fluid.layers import erf #DEFINE_ALIAS
from ...fluid.layers import maxout #DEFINE_ALIAS
# from ...fluid.layers import soft_relu #DEFINE_ALIAS
from ...fluid.layers import swish #DEFINE_ALIAS
from ...fluid.layers import sigmoid #DEFINE_ALIAS
from ...tensor.math import tanh #DEFINE_ALIAS
from ...tensor.math import tanh_ #DEFINE_ALIAS
from ...tensor.manipulation import _print_warning_in_static_mode
from ...tensor.manipulation import chunk
from ...tensor.math import multiply
__all__ = [
'brelu',
'elu',
'elu_',
'gelu',
'hardshrink',
'hardtanh',
'hardsigmoid',
'hardswish',
'leaky_relu',
'log_sigmoid',
'maxout',
'prelu',
'relu',
'relu_',
'relu6',
'selu',
'softmax',
'softmax_',
'softplus',
'softshrink',
'softsign',
'sigmoid',
'swish',
'tanh',
'tanh_',
'tanhshrink',
'thresholded_relu',
'log_softmax',
'glu',
]
import warnings
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_
from ...fluid import core
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
import paddle
def elu(x, alpha=1.0, name=None):
r"""
elu activation.
.. math::
elu(x) = max(0, x) + min(0, \\alpha * (e^{x}-1))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
out = F.elu(x, alpha=0.2)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
if in_dygraph_mode():
return core.ops.elu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
helper = LayerHelper("elu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
def elu_(x, alpha=1.0, name=None):
r"""
Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_elu`.
"""
if in_dygraph_mode():
return core.ops.elu_(x, 'alpha', alpha)
_print_warning_in_static_mode("elu")
return elu(x, alpha, name)
def gelu(x, approximate=False, name=None):
r"""
gelu activation.
if approximate is True
.. math::
gelu(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))
else
.. math::
gelu(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
approximate (bool, optional): Wether to enable approximation. Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1, 0.5], [1, 1.5]])
out1 = F.gelu(x)
# [[-0.15865529, 0.34573123],
# [ 0.84134471, 1.39978933]]
out2 = F.gelu(x, True)
# [[-0.15880799, 0.34571400],
# [ 0.84119201, 1.39957154]]
"""
if in_dygraph_mode():
return core.ops.gelu(x, 'approximate', approximate)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu')
helper = LayerHelper("gelu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='gelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'approximate': approximate})
return out
def hardshrink(x, threshold=0.5, name=None):
r"""
hard shrinkage activation
.. math::
hardshrink(x)=
\\left\\{
\\begin{aligned}
&x, & & if \\ x > threshold \\\\
&x, & & if \\ x < -threshold \\\\
&0, & & if \\ others
\\end{aligned}
\\right.
Args:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-1, 0.3, 2.5])
out = F.hardshrink(x) # [-1., 0., 2.5]
"""
if in_dygraph_mode():
return core.ops.hard_shrink(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardshrink')
helper = LayerHelper('hardshrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='hard_shrink',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def hardtanh(x, min=-1.0, max=1.0, name=None):
r"""
hardtanh activation
.. math::
hardtanh(x)= \\begin{cases}
max, \\text{if } x > max \\\\
min, \\text{if } x < min \\\\
x, \\text{otherwise}
\\end{cases}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
min (float, optional): The minimum value of the linear region range. Default is -1.
max (float, optional): The maximum value of the linear region range. Default is 1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
out = F.hardtanh(x) # [-1., 0.3, 1.]
"""
if in_dygraph_mode():
return core.ops.brelu(x, 't_min', min, 't_max', max)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardtanh')
helper = LayerHelper('hardtanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': min,
't_max': max})
return out
def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
r"""
hardsigmoid activation.
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
which is much faster than sigmoid.
.. math::
hardsigmoid(x)=
\\left\\{
\\begin{aligned}
&0, & & \\text{if } x \\leq -3 \\\\
&1, & & \\text{if } x \\geq 3 \\\\
&slope * x + offset, & & \\text{otherwise}
\\end{aligned}
\\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
slope (float, optional): The slope of hardsigmoid function. Default is 0.1666667.
offset (float, optional): The offset of hardsigmoid function. Default is 0.5.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-4., 5., 1.])
out = F.hardsigmoid(x) # [0., 1., 0.666667]
"""
if in_dygraph_mode():
return core.ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardsigmoid')
helper = LayerHelper('hardsigmoid', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='hard_sigmoid',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': slope,
'offset': offset})
return out
def hardswish(x, name=None):
r"""
hardswish activation
hardswish is proposed in MobileNetV3, and performs better in computational stability
and efficiency compared to swish function. For more details please refer
to: https://arxiv.org/pdf/1905.02244.pdf
.. math::
hardswish(x)=
\\left\\{
\\begin{aligned}
&0, & & \\text{if } x \\leq -3 \\\\
&x, & & \\text{if } x \\geq 3 \\\\
&\\frac{x(x+3)}{6}, & & \\text{otherwise}
\\end{aligned}
\\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-4., 5., 1.])
out = F.hardswish(x) # [0., 5., 0.666667]
"""
if in_dygraph_mode():
return core.ops.hard_swish(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardswish')
helper = LayerHelper('hardswish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='hard_swish', inputs={'X': x}, outputs={'Out': out})
return out
def leaky_relu(x, negative_slope=0.01, name=None):
r"""
leaky_relu activation
.. math::
leaky\\_relu(x)=
\\left\\{
\\begin{aligned}
&x, & & if \\ x >= 0 \\\\
&negative\_slope * x, & & otherwise \\\\
\\end{aligned}
\\right. \\\\
Args:
x (Tensor): The input Tensor with data type float32, float64.
negative_slope (float, optional): Slope of the activation function at
:math:`x < 0` . Default is 0.01.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-2., 0., 1.])
out = F.leaky_relu(x) # [-0.02, 0., 1.]
"""
if in_dygraph_mode():
return core.ops.leaky_relu(x, 'alpha', negative_slope)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'leaky_relu')
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='leaky_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': negative_slope})
return out
def prelu(x, weight, name=None):
"""
prelu activation.
.. math::
prelu(x) = max(0, x) + | |
Affairs, including -relations and communications with overseas governments and United Nations agencies;treaties, including trade agreements; bilateral, regional and multilateral trade policy; international trade and commodity negotiations; market development, including market access; trade and international business development; investment promotion; international development co-operation; diplomatic and consular missions; international security issues, including disarmament, arms control and nuclear non-proliferation; public diplomacy, including information and cultural programs',
'Facilitation of the development of service industries generally',
'Family relationship services',
'Family relationship, Family and Children\'s Support Services',
'Financial sector policy',
'Food industry policy',
'Food policy, processing and exports',
'Food processing industry policy',
'Food security policy and programs',
'Foreign exchange',
'Foreign investment in Australia',
'Foundation skills for adults',
'Fraud and anti-corruption policy',
'Freedom of Information',
'Gene technology regulation',
'General policy guidelines for Commonwealth statutory authorities',
'Geoscience research and information services including geodesy, mapping, remote sensing and land information co-ordination',
'Geoscience research and information services including geodesy, mapping, remote sensing, groundwater and spatial data co-ordination',
'Government ceremonial and hospitality',
'Government financial accountability, efficiency, governance and financial management frameworks, including grants and procurement policy and services',
'Government financial accountability, efficiency, governance and financial management frameworks, including grants and procurement policy and services (excluding information and communications technology procurement policy and services)',
'Government financial accountability, governance and financial management frameworks, including grants and procurement policy and services',
'Government financial accountability, governance and financial management frameworks, including procurement policy and services',
'Government on-line delivery and information technology and communications management',
'Greenhouse emissions and energy consumption reporting',
'Greenhouse gas abatement programs',
'Greenhouse mitigation and adaptation',
'Health and ageing research',
'Health benefits schemes',
'Health promotion and disease prevention',
'Health provider compliance',
'Health research',
'Health workforce capacity',
'Hearing services policy and funding',
'Higher education policy, regulation and programs',
'Higher education, skills and vocational education policy and programs',
'Higher education, skills and vocational education policy, regulation and programs',
'Hospitals funding and policy, including relationships and linkages within the continuum of health care',
'Hospitals funding, including relationship with primary health care',
'Housing affordability',
'Housing policy co-ordination, welfare housing and rent assistance',
'Housing supply policy',
'Immigration and migration, including - border security; entry, stay and departure arrangements for non-citizens; customs and border control other than quarantine and inspection',
'Implementation of the National Health and Hospitals Network',
'Income security and support policies and programs for families with children, carers, the aged, people with disabilities and people in hardship',
'Income security policies and programs for families with children, carers, the aged and people in hardship',
'Income security policies and programs for families with children, carers, the aged, people with disabilities and people in hardship',
'Income support and participation policy for people of working age',
'Income support policies and programs for students and apprentices',
'Income support policies for students and apprentices',
'Indigenous higher education and vocational training',
'Indigenous policy co-ordination and the promotion of reconciliation',
'Indigenous policy co-ordination, programs and the promotion of reconciliation',
'Industrial energy efficiency',
'Industrial research and development, and commercialisation',
'Industry innovation policy and technology diffusion',
'Information and communications technology industry development',
'Information and communications technology procurement policy and services',
'Infrastructure and project financing',
'Infrastructure planning and co-ordination',
'Infrastructure planning and co-ordination',
'Intergovernmental relations and communications with State and Territory Governments',
'International climate change negotiations',
'International development and aid',
'International expositions',
'International finance',
'International science engagement',
'Investment promotion',
'Ionospheric prediction',
'Job Network',
'Job Services Australia',
'Jobactive',
'Labour market and income support policies and programs for people of working age',
'Labour market programs for people of working age',
'Land contamination',
'Land transport',
'Law and justice including - Administrative law; Alternative dispute resolution; Bankruptcy; Censorship; Constitutional law; Copyright; Courts and tribunals; Human rights; Indigenous law and justice programs; International law; Law reform; Legal assistance; Legislative drafting; Marriage and family law; Native Title',
'Law and justice including - Administrative law; Alternative dispute resolution; Bankruptcy; Censorship; Constitutional law; Copyright; Courts and tribunals; Human rights; Indigenous law and justice programs; International law; Law reform; Legal assistance; Legislative drafting; Marriage and family law; Native Title',
'Law and justice including - Administrative law; Alternative dispute resolution; Bankruptcy; Censorship; Constitutional law; Copyright; Courts and tribunals; Human rights; Indigenous law and justice programs; International law; Law reform; Legal assistance; Legislative drafting; Marriage and family law; Native Title; Personal property securities',
'Law and justice including - Administrative law; Alternative dispute resolution; Bankruptcy; Censorship; Constitutional law; Copyright; Courts and tribunals; Human rights; Indigenous law and justice; International law; Law reform; Legal assistance; Legislative drafting; Marriage and family law; Native Title; Personal property securities',
'Law and justice including - Administrative law; Alternative dispute resolution; Bankruptcy; Censorship; Constitutional law; Copyright; Courts and tribunals; Human rights; International law; Law reform; Legal assistance; Legislative drafting; Marriage and family law; Personal property securities',
'Law and justice including - Administrative law; Alternative dispute resolution; Bankruptcy; Constitutional law; Courts and tribunals; Human rights; International law; Law reform; Legal assistance; Legislative drafting; Marriage and family law; Personal property securities',
'Law enforcement policy and operations',
'Legal services to the Commonwealth',
'Low emissions fossil fuel energy',
'Major projects facilitation',
'Major projects office, including facilitation and implementation of all non-Defence development projects',
'Major projects, including implementation of all non-Defence development projects',
'Management of government records',
'Management of non-Defence Commonwealth property in Australia, including construction, major refurbishment, sustainability, acquisition, ownership and disposal of real property',
'Mandatory renewable energy target policy, regulation and co-ordination',
'Manufacturing and commerce including industry and market development',
'Maritime transport including shipping',
'Marketing of manufactures and services',
'Marketing, including export promotion, of manufactures and services',
'Matters relating to local government',
'Medical indemnity insurance issues',
'Medicare provider compliance',
'Mental health policy and primary mental health care',
'Meteorology',
'Migrant adult education',
'Mineral and energy industries, including oil and gas, and electricity',
'Mineral and energy resources, including oil and gas, extraction and upstream processing',
'Minerals and energy resources research, science and technology',
'Monitoring and management of service delivery and purchaser/provider relationships involving Centrelink, Medicare Australia, the Child Support Agency, Australian Hearing, Health Services Australia and CRS Australia',
'Monitoring and management of service delivery arrangements involving Centrelink, Medicare Australia, the Child Support Agency, Australian Hearing, and CRS Australia',
'Monitoring and management of service delivery arrangements involving social security, child support, students, families, aged care, health programs, disability employment services, superannuation release and Australian Hearing Services',
'Multicultural affairs',
'National drug strategy',
'National energy market',
'National energy market, including electricity, gas and liquid fuels',
'National fuel quality standards',
'National policy issues relating to the digital economy',
'National policy on cities',
'National security policy and operations',
'National security policy co-ordination',
'National security, protective security policy and co-ordination',
'Native Title',
'Natural disaster relief and mitigation in the form of financial assistance to the States and Territories',
'Natural disaster relief, recovery and mitigation policy and financial assistance including payments to the States and Territories and the Australian Government Disaster Recovery Payment',
'Natural, built and cultural heritage',
'Natural, built and movable cultural heritage',
'Non-profit sector and volunteering',
'Non-profit sector and volunteering',
'Northern Australia policy and co-ordination',
'Notification and assessment of industrial chemicals',
'Occupational health and safety, rehabilitation and compensation',
'Official Establishments, ownership and property management',
'Official Establishments, ownership and property management of the Prime Minister\'s official residences',
'Old Parliament House',
'Overseas property management, including acquisition, ownership and disposal of real property',
'Participation, activity test and compliance policy for participation payment recipients',
'Pharmaceutical benefits',
'Planning and land management in the Australian Capital Territory',
'Policy advice and administrative support to the Prime Minister',
'Policy advice on the Future Fund and Nation-building Funds',
'Policy advice on the Future Fund and Nation-building Funds and authorisation of payments from the Nation-building Funds to Agencies',
'Policy advice on the Future Fund, Nation-building Funds and the DisabilityCare Australia Fund; and authorisation of payments from the Nation-building Funds and the DisabilityCare Australia Fund recommended by relevant Agencies',
'Policy for and promotion of active ageing, other than | |
<filename>meerk40t/balor/sender.py
# Balor Galvo Laser Control Module
# Copyright (C) 2021-2022 Gnostic Instruments, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
import usb.core
import usb.util
from meerk40t.balor.command_list import CommandList, CommandSource
class BalorException(Exception):
pass
class BalorConfigException(BalorException):
pass
class BalorMachineException(BalorException):
pass
class BalorCommunicationException(BalorException):
pass
class BalorDataValidityException(BalorException):
pass
# Marked with ? - currently not seen in the wild
DISABLE_LASER = 0x0002
RESET = 0x0003
ENABLE_LASER = 0x0004
EXECUTE_LIST = 0x0005
SET_PWM_PULSE_WIDTH = 0x0006 # ?
GET_REGISTER = 0x0007
GET_SERIAL_NUMBER = 0x0009 # In EzCAD mine is 32012LI43405B, Version 4.02, LMC V4 FIB
GET_LIST_STATUS = 0x000A
GET_XY_POSITION = 0x000C # Get current galvo position
SET_XY_POSITION = 0x000D # Travel the galvo xy to specified position
LASER_SIGNAL_OFF = 0x000E # ?
LASER_SIGNAL_ON = 0x000F # ?
WRITE_CORRECTION_LINE = 0x0010 # ?
RESET_LIST = 0x0012
RESTART_LIST = 0x0013
WRITE_CORRECTION_TABLE = 0x0015
SET_CONTROL_MODE = 0x0016
SET_DELAY_MODE = 0x0017
SET_MAX_POLY_DELAY = 0x0018
SET_END_OF_LIST = 0x0019
SET_FIRST_PULSE_KILLER = 0x001A
SET_LASER_MODE = 0x001B
SET_TIMING = 0x001C
SET_STANDBY = 0x001D
SET_PWM_HALF_PERIOD = 0x001E
STOP_EXECUTE = 0x001F # Since observed in the wild
STOP_LIST = 0x0020 # ?
WRITE_PORT = 0x0021
WRITE_ANALOG_PORT_1 = 0x0022 # At end of cut, seen writing 0x07FF
WRITE_ANALOG_PORT_2 = 0x0023 # ?
WRITE_ANALOG_PORT_X = 0x0024 # ?
READ_PORT = 0x0025
SET_AXIS_MOTION_PARAM = 0x0026
SET_AXIS_ORIGIN_PARAM = 0x0027
GO_TO_AXIS_ORIGIN = 0x0028
MOVE_AXIS_TO = 0x0029
GET_AXIS_POSITION = 0x002A
GET_FLY_WAIT_COUNT = 0x002B # ?
GET_MARK_COUNT = 0x002D # ?
SET_FPK_2E = 0x002E # First pulse killer related, SetFpkParam2
# My ezcad lists 40 microseconds as FirstPulseKiller
# EzCad sets it 0x0FFB, 1, 0x199, 0x64
FIBER_CONFIG_1 = 0x002F #
FIBER_CONFIG_2 = 0x0030 #
LOCK_INPUT_PORT = 0x0031 # ?
SET_FLY_RES = 0x0032 # Unknown fiber laser parameter being set
# EzCad sets it: 0x0000, 0x0063, 0x03E8, 0x0019
FIBER_OPEN_MO = 0x0033 # "IPG (i.e. fiber) Open MO" - MO is probably Master Oscillator
# (In BJJCZ documentation, the pin 18 on the IPG connector is
# called "main oscillator"; on the raycus docs it is "emission enable.")
# Seen at end of marking operation with all
# zero parameters. My Ezcad has an "open MO delay"
# of 8 ms
FIBER_GET_StMO_AP = 0x0034 # Unclear what this means; there is no
# corresponding list command. It might be to
# get a status register related to the source.
# It is called IPG_GETStMO_AP in the dll, and the abbreviations
# MO and AP are used for the master oscillator and power amplifier
# signal lines in BJJCZ documentation for the board; LASERST is
# the name given to the error code lines on the IPG connector.
GET_USER_DATA = 0x0036 # ?
GET_FLY_PULSE_COUNT = 0x0037 # ?
GET_FLY_SPEED = 0x0038 # ?
ENABLE_Z_2 = 0x0039 # ? AutoFocus on/off
ENABLE_Z = 0x003A # AutoFocus on/off
SET_Z_DATA = 0x003B # ?
SET_SPI_SIMMER_CURRENT = 0x003C # ?
IS_LITE_VERSION = 0x0040 # Tell laser to nerf itself for ezcad lite apparently
GET_MARK_TIME = (
0x0041 # Seen at end of cutting, only and always called with param 0x0003
)
SET_FPK_PARAM = 0x0062 # Probably "first pulse killer" = fpk
class Sender:
"""This is a simplified control class for the BJJCZ (Golden Orange,
Beijing JCZ) LMCV4-FIBER-M and compatible boards. All operations are blocking
so, it should probably run in its own thread for nontrivial applications.
It does have an .abort() method that it is expected will be called
asynchronously from another thread."""
sleep_time = 0.001
# We include this "blob" here (the contents of which are all well-understood) to
# avoid introducing a dependency on job generation from within the sender.
# It just consists of the new job command followed by a bunch of NOPs.
_abort_list_chunk = bytearray([0x51, 0x80] + [0x00] * 10) + bytearray( # New job
([0x02, 0x80] + [0x00] * 10) * 255
) # NOP
_packet_size = 256 * 12
def get_packet_size(self):
return (
self._packet_size
) # TODO maybe this should get it from the usb connection class,
# n.b. not instance which will not exist at the time it's needed necessarily
def __init__(self, service, debug=False):
self.service = service
self._lock = threading.Lock()
self._terminate_execution = False
self._footswitch_callback = None
self._usb_connection = None
self._write_port = 0x0000
self._debug = debug
def is_open(self):
return self._usb_connection is not None
def open(self):
mock = self.service.mock
machine_index = self.service.machine_index
if self._usb_connection is not None:
raise BalorCommunicationException("Attempting to open an open connection.")
if not mock:
connection = UsbConnection(machine_index, debug=self._debug)
else:
connection = MockConnection(machine_index, debug=self._debug)
connection.open()
self._usb_connection = connection
self._init_machine()
time.sleep(
0.05
) # We sacrifice this time at the altar of the unknown race condition
return True
def close(self):
self.abort()
if self._usb_connection is not None:
self._usb_connection.close()
self._usb_connection = None
def job(self, *args, **kwargs):
return CommandList(*args, **kwargs, sender=self)
def command(self, *args, **kwargs):
self._send_command(*args, **kwargs)
def _send_command(self, *args, **kwargs):
if self._usb_connection is None:
self.open()
return self._usb_connection.send_command(*args, **kwargs)
def _send_correction_entry(self, *args):
if self._usb_connection is None:
self.open()
self._usb_connection.send_correction_entry(*args)
def _send_list_chunk(self, *args):
if self._usb_connection is None:
self.open()
self._usb_connection.send_list_chunk(*args)
def _init_machine(self):
"""Initialize the machine."""
self.serial_number = self.raw_get_serial_no()
self.version = self.raw_get_version()
self.raw_get_st_mo_ap()
cor_file = self.service.corfile if self.service.corfile_enabled else None
first_pulse_killer = self.service.first_pulse_killer
pwm_pulse_width = self.service.pwm_pulse_width
pwm_half_period = self.service.pwm_half_period
standby_param_1 = self.service.standby_param_1
standby_param_2 = self.service.standby_param_2
timing_mode = self.service.timing_mode
delay_mode = self.service.delay_mode
laser_mode = self.service.laser_mode
control_mode = self.service.control_mode
fpk2_p1 = self.service.fpk2_p1
fpk2_p2 = self.service.fpk2_p2
fpk2_p3 = self.service.fpk2_p3
fpk2_p4 = self.service.fpk2_p3
fly_res_p1 = self.service.fly_res_p1
fly_res_p2 = self.service.fly_res_p2
fly_res_p3 = self.service.fly_res_p3
fly_res_p4 = self.service.fly_res_p4
# Unknown function
self.raw_reset()
# Load in-machine correction table
cor_table = None
if cor_file is not None:
try:
cor_table = self._read_correction_file(cor_file)
except FileNotFoundError:
raise BalorConfigException(".cor file location did not exist")
self._send_correction_table(cor_table)
self.raw_enable_laser()
self.raw_set_control_mode(control_mode, 0)
self.raw_set_laser_mode(laser_mode, 0)
self.raw_set_delay_mode(delay_mode, 0)
self.raw_set_timing(timing_mode, 0)
self.raw_set_standby(standby_param_1, standby_param_2, 0, 0)
self.raw_set_first_pulse_killer(first_pulse_killer, 0)
self.raw_set_pwm_half_period(pwm_half_period, 0)
# unknown function
self.raw_set_pwm_pulse_width(pwm_pulse_width, 0)
# "IPG_OpenMO" (main oscillator?)
self.raw_fiber_open_mo(0, 0)
# Unclear if used for anything
self._send_command(GET_REGISTER, 0)
# 0x0FFB is probably a 12 bit rendering of int12 -5
# Apparently some parameters for the first pulse killer
self.raw_set_fpk_param_2(fpk2_p1, fpk2_p2, fpk2_p3, fpk2_p4)
# Unknown fiber laser related command
self.raw_set_fly_res(fly_res_p1, fly_res_p2, fly_res_p3, fly_res_p4)
# Is this appropriate for all laser engraver machines?
self.raw_write_port(self._write_port)
# Conjecture is that this puts the output port out of a
# high impedance state (based on the name in the DLL,
# ENABLEZ)
# Based on how it's used, it could also be about latching out
# of the data that has been set up.
self.raw_enable_z()
# We don't know what this does, since this laser's power is set
# digitally
self.raw_write_analog_port_1(0x07FF, 0)
self.raw_enable_z()
def _read_correction_file(self, filename):
table = []
with open(filename, "rb") as f:
f.seek(0x24)
for j in range(65):
for k in range(65):
dx = int.from_bytes(f.read(4), "little", signed=True)
dx = dx if dx >= 0 else -dx + 0x8000
dy = int.from_bytes(f.read(4), "little", signed=True)
dy = dy if dy >= 0 else -dy + 0x8000
table.append([dx & 0xFFFF, dy & 0xFFFF])
return table
def _send_correction_table(self, table=None):
"""Send the onboard correction table to the machine."""
self.raw_write_correction_table(True)
if table is None:
for n in range(65**2):
self.raw_write_correction_line(0, 0, 0 if n == 0 else 1)
else:
for n in range(65**2):
self.raw_write_correction_line(
table[n][0], table[n][1], 0 if n == 0 else 1
)
def is_ready(self):
"""Returns true if the laser is ready for more data, false otherwise."""
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x20)
def is_busy(self):
"""Returns true if the machine is busy, false otherwise;
Note that running a lighting job counts as being busy."""
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x04)
def is_ready_and_not_busy(self):
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x20) and not bool(
self._usb_connection.status & 0x04
)
def wait_finished(self):
while not self.is_ready_and_not_busy():
time.sleep(self.sleep_time)
if self._terminate_execution:
return
def execute(
self, command_list: CommandSource, loop_count=1, callback_finished=None
):
"""Run a job. loop_count is the number of times to repeat the
job; if it is inf, it repeats until aborted. If there is a job
already running, it will be aborted and replaced. Optionally,
calls | |
<reponame>albertvisser/filefindr
"""Gui-toolkit onafhankelijke code t.b.v. Afrift applicaties
opgezet als classes die de toolkit-dependent code aanroepen als methodes op een attribuut ervan
ze worden geïmporteerd via een aparte module die bepaalt welke toolkit er gebruikt wordt
"""
import os
import collections
import subprocess
import json
import logging
import pathlib
from .gui import SelectNamesGui, ResultsGui, MainFrameGui
from .findr_files import Finder, format_result
BASE = pathlib.Path.home() / '.afrift'
if not BASE.exists():
BASE.mkdir()
HERE = pathlib.Path(__file__).parent # os.path.dirname(__file__)
LOGFILE = HERE.parent / 'logs' / 'afrift.log'
WANT_LOGGING = 'DEBUG' in os.environ and os.environ["DEBUG"] != "0"
if WANT_LOGGING:
if not LOGFILE.parent.exists():
LOGFILE.parent.mkdir()
if not LOGFILE.exists():
LOGFILE.touch()
logging.basicConfig(filename=str(LOGFILE), level=logging.DEBUG,
format='%(asctime)s %(message)s')
common_path_txt = 'De bestanden staan allemaal in of onder de directory "{}"'
iconame = str(HERE / "find.ico") # os.path.join(HERE, "find.ico")
def log(message):
"output to log"
if WANT_LOGGING:
logging.info(message)
def get_iniloc(path=None):
"""determine location & filenames for stored settings
if given, input should be an absolute path
"""
path = pathlib.Path(path) if path else pathlib.Path.cwd()
if path == pathlib.Path.home():
here = str(path)[1:]
else:
try:
here = '~' + str(path.relative_to(pathlib.Path.home()))
except ValueError:
here = str(path.resolve())[1:]
iniloc = BASE / here.replace('/', '_')
mrufile = iniloc / 'mru_items.json'
optsfile = iniloc / 'options.json'
return iniloc, mrufile, optsfile
class SelectNames():
"""Tussenscherm om te verwerken files te kiezen
deze class bevat methoden die onafhankelijk zijn van de gekozen
GUI-toolkit
"""
def __init__(self, parent, files=True):
self.do_files = files
self.parent = parent
self.title = self.parent.title + " - file list"
self.iconame = iconame
if files:
text = "Selecteer de bestanden die je *niet* wilt verwerken"
self.names = {str(x): x for x in self.parent.names}
else:
text = "Selecteer de directories die je *niet* wilt verwerken"
self.gui = SelectNamesGui(parent, self)
captions = {'heading': text, 'sel_all': 'Select/Unselect All', 'invert': 'Invert selection',
'exit': "&Terug", 'execute': "&Klaar"}
self.gui.setup_screen(captions)
def show(self):
"""show the dialog screen
"""
return self.gui.go(), self.names
class Results():
"""Show results on screen
deze class bevat methoden die onafhankelijk zijn van de gekozen
GUI-toolkit
"""
def __init__(self, parent, common_path=''):
self.parent = parent
self.common = common_path
self.show_context = self.parent.p["context"]
self.results = []
self.titel = 'Regel' if self.parent.apptype == "single" else 'File/Regel'
self.iconame = iconame
self.gui = ResultsGui(parent, self)
self.label_only = self.parent.p['vervang'] and self.parent.apptype == 'single'
if self.label_only:
aantal = self.parent.zoekvervang.rpt[1].split(None, 1)[1]
label_txt = self.parent.zoekvervang.rpt[0]
label_txt = label_txt.replace('vervangen', aantal + ' vervangen')
else:
label_txt = "{} ({} items)".format(self.parent.zoekvervang.rpt[0],
len(self.parent.zoekvervang.rpt) - 1)
if self.parent.apptype == "multi":
label_txt += '\n' + common_path_txt.format(self.common.rstrip(os.sep))
captions = {'heading': label_txt, 'ctxt': 'Context', 'txt': 'Tekst', 'hlp': 'Help',
'rslt': '&Goto Result', 'exit': "&Klaar", 'rpt': "&Repeat Search",
'cpy': "Copy to &File", 'clp': "Copy to &Clipboard",
'alt': '&Zoek anders', 'sel': 'Vervang in &Selectie', 'all': 'Vervang &Alles',
'fmt': 'Formatteer output:',
'pth': "toon directorypad", 'dlm': "comma-delimited", 'sum': "summarized"}
self.build_list()
self.gui.setup_screen(captions)
def build_list(self):
"construct list of results"
for ix, line in enumerate(self.parent.zoekvervang.rpt):
if ix == 0:
kop = line
elif line != "":
where, what = line.split(": ", 1)
if self.parent.apptype == "single":
if "r. " in where:
where = where.split("r. ", 1)[1]
else:
where = ""
if self.common and self.common != '/':
where = where.replace(str(self.common), "")
if self.show_context:
where, rest = where.rsplit(' (', 1)
context = rest.split(')')[0]
self.results.append((where, context, what))
else:
self.results.append((where, what))
self.results.insert(0, kop)
def show(self):
"""show the dialog screen
"""
self.gui.go()
def get_results(self):
"""format output
"""
toonpad = self.gui.get_pth()
comma = self.gui.get_csv()
context = self.gui.get_sum()
text = ["{}".format(self.results[0])]
if self.parent.apptype == "multi" and not toonpad:
text.append(common_path_txt.format(self.common))
text.append("")
if comma:
import io
import csv
textbuf = io.StringIO()
writer = csv.writer(textbuf, dialect='unix')
header = [('Path/file' if toonpad else 'File'), 'Line', 'Context', 'Result']
for item in self.results[1:]:
result = list(item)
if self.parent.apptype == 'single':
result[0] = ' r. ' + result[0]
if toonpad and (self.parent.apptype == 'multi' or comma):
result[0] = self.common + result[0]
if comma:
loc, line = result[0].rsplit(' r. ', 1)
result[:1] = [loc, line]
if header and len(header) > len(result):
header[2:] = header[3:]
if self.parent.apptype == 'single' and not toonpad:
result = result[1:]
if header:
header = header[1:]
if header:
writer.writerow(header)
header = None
writer.writerow(result)
else:
text.append(" ".join(result).strip())
if comma:
text += textbuf.getvalue().split("\n")
textbuf.close()
if context:
context = 'py' if self.show_context else None
if self.parent.apptype == 'single':
text = ['{} {}'.format(self.parent.p['filelist'][0], x) if x else '' for x in text]
text = format_result(text, context)
if self.parent.apptype == 'single' and not toonpad:
text = [x.replace(str(self.parent.p['filelist'][0]), '', 1).strip() for x in text]
return text
def refresh(self, *args, **kwargs):
"""repeat search and show new results
"""
self.results = []
self.gui.clear_contents()
self.parent.zoekvervang.rpt = ["".join(self.parent.zoekvervang.specs)]
self.parent.gui.set_waitcursor(True)
self.parent.zoekvervang.go()
self.parent.gui.set_waitcursor(False)
if len(self.parent.zoekvervang.rpt) == 1:
self.gui.breekaf("Niks gevonden", done=False)
return
elif len(self.parent.zoekvervang.rpt) == 2 and self.parent.zoekvervang.p['wijzig']:
count_txt = self.parent.zoekvervang.rpt.pop().split(': ')[-1]
else:
count_txt = '{} items'.format(len(self.parent.zoekvervang.rpt) - 1)
label_txt = ''
replcount = kwargs.get('replace_count', '')
if replcount:
srch = self.parent.zoekvervang.p['zoek']
repl = kwargs.get('replace_text', '')
label_txt = '`{}` with `{}` replaced {} in lines\n'.format(srch, repl, replcount)
label_txt += "{} ({})".format(self.parent.zoekvervang.rpt[0], count_txt)
if self.parent.apptype == "multi":
label_txt += '\n' + common_path_txt.format(self.common)
self.gui.set_header(label_txt)
self.build_list()
self.gui.populate_list()
def check_option_combinations_ok(self):
"""onzinnige combinatie(s) uitsluiten
"""
title, msg = (self.parent.title,
"Summarize to comma delimited is not a sensible option, request denied")
if self.gui.get_sum() and self.gui.get_csv():
self.gui.meld(title, msg)
return False
return True
def kopie(self, *args):
"""callback for button 'Copy to file'
"""
if not self.check_option_combinations_ok():
return
f_nam = self.parent.p["zoek"]
for char in '/\\?%*:|"><.':
if char in f_nam:
f_nam = f_nam.replace(char, "~")
if self.gui.get_csv():
ext = '.csv'
else:
ext = '.txt'
f_nam = f_nam.join(("files-containing-", ext))
savename = self.gui.get_savefile(f_nam, ext)
if savename:
self.gui.remember_settings()
with open(savename, "w") as f_out:
for line in self.get_results():
f_out.write(line + "\n")
def help(self):
"""show instructions
"""
self.gui.meld('info', "Select a line and doubleclick or press Ctrl-G to open the"
" indicated file\nat the indicated line (not in single file mode)")
def to_clipboard(self, *args):
"""callback for button 'Copy to clipboard'
"""
if self.check_option_combinations_ok():
self.gui.remember_settings()
self.gui.copy_to_clipboard('\n'.join(self.get_results()) + '\n')
def goto_result(self, row, col):
"""open the file containing the selected item
"""
if self.parent.apptype == 'single':
self.gui.meld('ahem', 'Not in single file mode')
return
selected = self.results[row + 1]
target, line = selected[0].split(' r. ')
target = self.common + target
prog, fileopt, lineopt = self.parent.editor_option
subprocess.run([prog, fileopt.format(target), lineopt.format(line)])
def vervang_in_sel(self, *args):
"achteraf vervangen in geselecteerde regels"
# bepaal geselecteerde regels
items = self.gui.get_selection()
if not items:
self.gui.meld(self.parent.resulttitel, 'Geen regels geselecteerd om in te vervangen')
return
lines_to_replace = [x.split(' r. ') for x in items]
prompt = 'vervang `{}` in geselecteerde regels door:'.format(self.parent.p['zoek'])
text, ok = self.gui.get_text_from_user(self.parent.resulttitel, prompt)
if ok:
replaced = self.parent.zoekvervang.replace_selected(text, lines_to_replace)
# self.parent.zoekvervang.setup_search() -- is dit nodig als het niet wijzigt?
self.refresh(replace_text=text, replace_count=replaced)
def vervang_alles(self, *args):
"achteraf vervangen in alle regels"
prompt = 'vervang `{}` in alle regels door:'.format(self.parent.p['zoek'])
text, ok = self.gui.get_text_from_user(self.parent.resulttitel, prompt)
if ok:
self.parent.zoekvervang.p['vervang'] = text
self.parent.zoekvervang.p['wijzig'] = True
self.parent.zoekvervang.setup_search()
self.refresh()
def zoek_anders(self, *args):
"zoek naar iets anders in dezelfde selectie"
origzoek = self.parent.zoekvervang.p['zoek']
prompt = 'zoek in dezelfde selectie naar:'
text, ok = self.gui.get_text_from_user(self.parent.resulttitel, prompt)
if ok:
self.parent.zoekvervang.p['zoek'] = text
self.parent.zoekvervang.setup_search()
self.refresh()
print('In zoek_anders: origzoek terugzetten naar', origzoek)
self.parent.zoekvervang.p['zoek'] = origzoek
self.parent.zoekvervang.setup_search()
class MainFrame():
"""Hoofdscherm van de applicatie
deze class bevat methoden die onafhankelijk zijn van de gekozen
GUI-toolkit
"""
def __init__(self, **kwargs):
"""attributen die altijd nodig zijn
"""
log('in MainFrame.init: cwd is {}'.format(pathlib.Path.cwd()))
log(' kwargs is {}'.format(kwargs))
self.apptype = kwargs.pop('apptype', '')
fnaam = kwargs.pop('fnaam', '')
flist = kwargs.pop('flist', None)
self.title = "Albert's find-replace in files tool"
self.iconame = iconame
self.fouttitel = self.title + "- fout"
self.resulttitel = self.title + " - Resultaten"
# self.apptype = apptype
self.hier = pathlib.Path.cwd() # os.getcwd()
self.mru_items = {"zoek": [], "verv": [], "types": [], "dirs": []}
self.save_options_keys = (("case", 'case_sensitive'), ("woord", 'whole_words'),
("subdirs", 'recursive'), ("context", 'python_context'),
("negeer", 'ignore_comments'))
self.outopts = {'full_path': False, 'as_csv': False, 'summarize': False}
self.screen_choices = {'regex': False, 'case': False, 'woord': False,
'subdirs': False, 'follow_symlinks': False, 'select_subdirs': False,
'select_files': False, 'context': False, 'negeer': False,
'dont_save': False, 'no_gui': False,
'output_file': False, 'full_path': False, 'as_csv': False,
'summarize': False}
# het idee achter bovenstaande dict is om alle keuzes op het scherm te verzamelen
# en ze eerst vanuit de opgeslagen waarden en daarna vanuit de
# opgegeven startup-opties te vullen - zie ook onderstaande captions en read_kwargs()
fnpath = pathlib.Path(fnaam).expanduser().resolve()
if self.apptype == "" and fnpath.exists() and not | |
1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 20 :: Hue Sat Lightness 2 ###
color_map_luts['idl20'] = \
(
array([ 0.9882812, 0.9882812, 0.9804688, 0.9765625, 0.9765625, 0.9726562,
0.9726562, 0.9687500, 0.9687500, 0.9648438, 0.9648438, 0.9609375,
0.9609375, 0.9609375, 0.9570312, 0.9570312, 0.9531250, 0.9531250,
0.9531250, 0.9492188, 0.9492188, 0.9453125, 0.9453125, 0.9453125,
0.9414062, 0.9414062, 0.9375000, 0.9375000, 0.9375000, 0.9335938,
0.9335938, 0.9335938, 0.9296875, 0.9296875, 0.9296875, 0.9257812,
0.9257812, 0.9218750, 0.9218750, 0.9218750, 0.9179688, 0.9179688,
0.9179688, 0.9179688, 0.9140625, 0.9101562, 0.9101562, 0.9062500,
0.9062500, 0.9023438, 0.9023438, 0.8984375, 0.8945312, 0.8945312,
0.8906250, 0.8867188, 0.8828125, 0.8828125, 0.8789062, 0.8750000,
0.8710938, 0.8710938, 0.8671875, 0.8632812, 0.8593750, 0.8554688,
0.8515625, 0.8476562, 0.8437500, 0.8398438, 0.8359375, 0.8320312,
0.8281250, 0.8242188, 0.8203125, 0.8125000, 0.8085938, 0.8046875,
0.8007812, 0.7968750, 0.7890625, 0.7812500, 0.7773438, 0.7734375,
0.7656250, 0.7617188, 0.7578125, 0.7500000, 0.7500000, 0.7460938,
0.7460938, 0.7421875, 0.7382812, 0.7343750, 0.7304688, 0.7226562,
0.7226562, 0.7187500, 0.7148438, 0.7148438, 0.7109375, 0.7070312,
0.7031250, 0.6992188, 0.6953125, 0.6914062, 0.6875000, 0.6835938,
0.6796875, 0.6757812, 0.6718750, 0.6718750, 0.6679688, 0.6640625,
0.6601562, 0.6562500, 0.6523438, 0.6484375, 0.6445312, 0.6445312,
0.6406250, 0.6367188, 0.6328125, 0.6289062, 0.6250000, 0.6210938,
0.6171875, 0.6132812, 0.6093750, 0.6054688, 0.6015625, 0.5976562,
0.5937500, 0.5898438, 0.5820312, 0.5781250, 0.5742188, 0.5703125,
0.5664062, 0.5625000, 0.5585938, 0.5546875, 0.5468750, 0.5429688,
0.5390625, 0.5390625, 0.5351562, 0.5312500, 0.5273438, 0.5234375,
0.5156250, 0.5117188, 0.5078125, 0.5039062, 0.5000000, 0.4960938,
0.4921875, 0.4882812, 0.4843750, 0.4765625, 0.4726562, 0.4687500,
0.4648438, 0.4609375, 0.4609375, 0.4531250, 0.4453125, 0.4414062,
0.4375000, 0.4335938, 0.4335938, 0.4257812, 0.4218750, 0.4257812,
0.4335938, 0.4453125, 0.4492188, 0.4570312, 0.4609375, 0.4687500,
0.4804688, 0.4882812, 0.4960938, 0.5039062, 0.5117188, 0.5234375,
0.5351562, 0.5429688, 0.5507812, 0.5625000, 0.5742188, 0.5859375,
0.5937500, 0.6054688, 0.6171875, 0.6289062, 0.6406250, 0.6523438,
0.6679688, 0.6796875, 0.6914062, 0.7070312, 0.7187500, 0.7343750,
0.7500000, 0.7617188, 0.7773438, 0.7929688, 0.8085938, 0.8242188,
0.8437500, 0.8593750, 0.8750000, 0.8906250, 0.9101562, 0.9257812,
0.9296875, 0.9335938, 0.9335938, 0.9375000, 0.9375000, 0.9375000,
0.9414062, 0.9414062, 0.9414062, 0.9453125, 0.9453125, 0.9492188,
0.9492188, 0.9492188, 0.9531250, 0.9531250, 0.9570312, 0.9570312,
0.9570312, 0.9609375, 0.9609375, 0.9648438, 0.9648438, 0.9648438,
0.9687500, 0.9687500, 0.9726562, 0.9726562, 0.9765625, 0.9765625,
0.9804688, 0.9804688, 0.9843750, 0.9843750, 0.9882812, 0.9882812,
0.9921875, 0.9921875, 0.9921875, 0.9921875]),
array([ 0.9843750, 0.9843750, 0.9765625, 0.9765625, 0.9726562, 0.9726562,
0.9687500, 0.9687500, 0.9648438, 0.9648438, 0.9609375, 0.9609375,
0.9570312, 0.9570312, 0.9531250, 0.9531250, 0.9492188, 0.9453125,
0.9453125, 0.9414062, 0.9414062, 0.9375000, 0.9335938, 0.9335938,
0.9296875, 0.9296875, 0.9257812, 0.9218750, 0.9218750, 0.9179688,
0.9179688, 0.9140625, 0.9101562, 0.9101562, 0.9062500, 0.9062500,
0.9023438, 0.8984375, 0.8984375, 0.8945312, 0.8906250, 0.8906250,
0.8867188, 0.8828125, 0.8828125, 0.8789062, 0.8750000, 0.8710938,
0.8710938, 0.8671875, 0.8632812, 0.8632812, 0.8593750, 0.8554688,
0.8515625, 0.8515625, 0.8476562, 0.8476562, 0.8437500, 0.8398438,
0.8359375, 0.8359375, 0.8320312, 0.8281250, 0.8242188, 0.8242188,
0.8164062, 0.8164062, 0.8125000, 0.8125000, 0.8046875, 0.8046875,
0.8007812, 0.8007812, 0.7968750, 0.7890625, 0.7890625, 0.7851562,
0.7851562, 0.7812500, 0.7773438, 0.7695312, 0.7695312, 0.7656250,
0.7656250, 0.7617188, 0.7617188, 0.7578125, 0.7578125, 0.7578125,
0.7578125, 0.7578125, 0.7578125, 0.7578125, 0.7617188, 0.7578125,
0.7617188, 0.7617188, 0.7617188, 0.7656250, 0.7656250, 0.7656250,
0.7695312, 0.7695312, 0.7734375, 0.7734375, 0.7773438, 0.7773438,
0.7812500, 0.7812500, 0.7851562, 0.7890625, 0.7929688, 0.7968750,
0.7968750, 0.8007812, 0.8046875, 0.8085938, 0.8125000, 0.8203125,
0.8242188, 0.8281250, 0.8320312, 0.8398438, 0.8437500, 0.8476562,
0.8554688, 0.8593750, 0.8671875, 0.8671875, 0.8671875, 0.8671875,
0.8671875, 0.8671875, 0.8710938, 0.8710938, 0.8710938, 0.8710938,
0.8710938, 0.8710938, 0.8710938, 0.8710938, 0.8710938, 0.8710938,
0.8710938, 0.8710938, 0.8710938, 0.8710938, 0.8710938, 0.8710938,
0.8750000, 0.8750000, 0.8750000, 0.8750000, 0.8750000, 0.8750000,
0.8750000, 0.8750000, 0.8750000, 0.8789062, 0.8789062, 0.8789062,
0.8789062, 0.8789062, 0.8789062, 0.8789062, 0.8828125, 0.8828125,
0.8828125, 0.8828125, 0.8828125, 0.8867188, 0.8867188, 0.8867188,
0.8867188, 0.8867188, 0.8867188, 0.8906250, 0.8906250, 0.8906250,
0.8906250, 0.8945312, 0.8945312, 0.8945312, 0.8945312, 0.8945312,
0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.9023438, 0.9023438,
0.9023438, 0.9023438, 0.9062500, 0.9062500, 0.9062500, 0.9062500,
0.9101562, 0.9101562, 0.9101562, 0.9140625, 0.9140625, 0.9140625,
0.9140625, 0.9179688, 0.9179688, 0.9179688, 0.9218750, 0.9218750,
0.9218750, 0.9257812, 0.9257812, 0.9257812, 0.9296875, 0.9296875,
0.9179688, 0.9023438, 0.8867188, 0.8710938, 0.8515625, 0.8359375,
0.8203125, 0.8007812, 0.7851562, 0.7656250, 0.7460938, 0.7304688,
0.7109375, 0.6914062, 0.6718750, 0.6523438, 0.6328125, 0.6132812,
0.5898438, 0.5703125, 0.5468750, 0.5273438, 0.5039062, 0.4804688,
0.4570312, 0.4335938, 0.4140625, 0.3867188, 0.3632812, 0.3320312,
0.3046875, 0.2890625, 0.2617188, 0.2382812, 0.2109375, 0.1835938,
0.1562500, 0.1289062, 0.1054688, 0.1054688]),
array([ 0.9843750, 0.9843750, 0.9765625, 0.9765625, 0.9726562, 0.9726562,
0.9687500, 0.9687500, 0.9648438, 0.9648438, 0.9609375, 0.9609375,
0.9570312, 0.9570312, 0.9531250, 0.9531250, 0.9492188, 0.9492188,
0.9492188, 0.9453125, 0.9453125, 0.9414062, 0.9414062, 0.9414062,
0.9375000, 0.9375000, 0.9335938, 0.9335938, 0.9335938, 0.9296875,
0.9296875, 0.9296875, 0.9257812, 0.9257812, 0.9257812, 0.9257812,
0.9218750, 0.9218750, 0.9218750, 0.9218750, 0.9179688, 0.9179688,
0.9179688, 0.9179688, 0.9140625, 0.9140625, 0.9140625, 0.9140625,
0.9101562, 0.9101562, 0.9101562, 0.9062500, 0.9062500, 0.9062500,
0.9062500, 0.9023438, 0.9023438, 0.9023438, 0.8984375, 0.8984375,
0.8984375, 0.8984375, 0.8945312, 0.8945312, 0.8945312, 0.8945312,
0.8945312, 0.8906250, 0.8906250, 0.8906250, 0.8906250, 0.8867188,
0.8867188, 0.8867188, 0.8867188, 0.8867188, 0.8828125, 0.8828125,
0.8828125, 0.8828125, 0.8828125, 0.8828125, 0.8789062, 0.8828125,
0.8789062, 0.8789062, 0.8789062, 0.8789062, 0.8750000, 0.8789062,
0.8750000, 0.8750000, 0.8750000, 0.8750000, 0.8750000, 0.8750000,
0.8750000, 0.8750000, 0.8750000, 0.8710938, 0.8710938, 0.8710938,
0.8710938, 0.8710938, 0.8710938, 0.8710938, 0.8710938, 0.8710938,
0.8710938, 0.8710938, 0.8710938, 0.8671875, 0.8671875, 0.8671875,
0.8671875, 0.8671875, 0.8671875, 0.8671875, 0.8671875, 0.8671875,
0.8671875, 0.8671875, 0.8671875, 0.8671875, 0.8671875, 0.8671875,
0.8671875, 0.8671875, 0.8671875, 0.8671875, 0.8593750, 0.8515625,
0.8476562, 0.8398438, 0.8320312, 0.8242188, 0.8203125, 0.8125000,
0.8046875, 0.7968750, 0.7890625, 0.7812500, 0.7695312, 0.7617188,
0.7539062, 0.7460938, 0.7343750, 0.7265625, 0.7148438, 0.7070312,
0.6953125, 0.6875000, 0.6757812, 0.6640625, 0.6562500, 0.6445312,
0.6328125, 0.6210938, 0.6093750, 0.5976562, 0.5859375, 0.5703125,
0.5585938, 0.5468750, 0.5351562, 0.5195312, 0.5039062, 0.4921875,
0.4765625, 0.4609375, 0.4531250, 0.4375000, 0.4179688, 0.4140625,
0.4101562, 0.4101562, 0.4062500, 0.3984375, 0.3906250, 0.3867188,
0.3867188, 0.3789062, 0.3750000, 0.3671875, 0.3632812, 0.3632812,
0.3554688, 0.3515625, 0.3437500, 0.3437500, 0.3359375, 0.3320312,
0.3242188, 0.3242188, 0.3164062, 0.3125000, 0.3046875, 0.3046875,
0.2968750, 0.2929688, 0.2851562, 0.2812500, 0.2773438, 0.2734375,
0.2695312, 0.2617188, 0.2578125, 0.2539062, 0.2460938, 0.2421875,
0.2421875, 0.2304688, 0.2265625, 0.2265625, 0.2148438, 0.2109375,
0.2109375, 0.1992188, 0.1953125, 0.1914062, 0.1835938, 0.1796875,
0.1757812, 0.1679688, 0.1640625, 0.1601562, 0.1523438, 0.1484375,
0.1406250, 0.1367188, 0.1328125, 0.1250000, 0.1210938, 0.1171875,
0.1093750, 0.1054688, 0.0976562, 0.0937500, 0.0898438, 0.0859375,
0.0781250, 0.0703125, 0.0664062, 0.0585938, 0.0546875, 0.0507812,
0.0312500, 0.0507812, 0.0312500, 0.0273438, 0.0195312, 0.0156250,
0.0078125, 0.0039062, 0.0000000, 0.0000000]),
array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 21 :: Hue Sat Value 1 ###
color_map_luts['idl21'] = \
(
array([ 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9843750, 0.9648438, 0.9453125, 0.9257812,
0.9101562, 0.8906250, 0.8710938, 0.8554688, 0.8359375, 0.8203125,
0.8007812, 0.7851562, 0.7656250, 0.7500000, 0.7343750, 0.7187500,
0.6992188, 0.6835938, 0.6679688, 0.6523438, 0.6367188, 0.6210938,
0.6054688, 0.5898438, 0.5742188, 0.5625000, 0.5468750, 0.5312500,
0.5195312, 0.5039062, 0.4882812, 0.4765625, 0.4609375, 0.4492188,
0.4375000, 0.4218750, 0.4101562, 0.3984375, 0.3867188, 0.3710938,
0.3593750, 0.3476562, 0.3359375, 0.3320312, 0.3359375, 0.3398438,
0.3437500, 0.3476562, 0.3515625, 0.3554688, 0.3593750, 0.3632812,
0.3671875, 0.3710938, 0.3750000, 0.3789062, 0.3828125, 0.3867188,
0.3906250, 0.3945312, 0.3984375, 0.4023438, 0.4062500, 0.4101562,
0.4140625, 0.4179688, 0.4218750, 0.4257812, 0.4296875, 0.4335938,
0.4375000, 0.4414062, 0.4453125, 0.4492188, 0.4531250, 0.4570312,
0.4609375, 0.4648438, 0.4687500, 0.4726562, 0.4765625, 0.4804688,
0.4843750, 0.4882812, 0.4921875, 0.4960938, 0.5000000, 0.5039062,
0.5078125, 0.5117188, 0.5156250, 0.5195312, 0.5234375, 0.5273438,
0.5312500, 0.5351562, 0.5390625, 0.5429688, 0.5468750, 0.5507812,
0.5546875, 0.5585938, 0.5625000, 0.5664062, 0.5703125, 0.5742188,
0.5781250, 0.5820312, 0.5859375, 0.5898438, 0.5937500, 0.5976562,
0.6015625, 0.6054688, 0.6093750, 0.6132812, 0.6171875, 0.6210938,
0.6250000, 0.6289062, 0.6328125, 0.6367188, 0.6406250, 0.6445312,
0.6484375, 0.6523438, 0.6562500, 0.6601562, 0.6640625, | |
import gc
import os
import platform
import tempfile
import time
import sys
import functools
import pytest
from testing.testifycompat import (
assert_equal,
assert_raises,
mock,
)
from staticconf import (
config,
errors,
proxy,
schema,
testing,
validation,
)
import staticconf
class TestRemoveByKeys(object):
def test_empty_dict(self):
keys = range(3)
assert_equal([], config.remove_by_keys({}, keys))
def test_no_keys(self):
keys = []
testMap = dict(enumerate(range(3)))
assert_equal(list(testMap.items()), config.remove_by_keys(testMap, keys))
def test_overlap(self):
keys = [1, 3, 5, 7]
testMap = dict(enumerate(range(8)))
expected = [(0, 0), (2, 2), (4, 4), (6, 6)]
assert_equal(expected, config.remove_by_keys(testMap, keys))
class TestConfigMap(object):
@pytest.fixture(autouse=True)
def setup_config_map(self):
self.config_map = config.ConfigMap(one=1, three=3, seven=7)
def test_no_iteritems(self):
assert not hasattr(self.config_map, 'iteritems')
def test_getitem(self):
assert_equal(self.config_map['one'], 1)
assert_equal(self.config_map['seven'], 7)
def test_get(self):
assert_equal(self.config_map.get('three'), 3)
assert_equal(self.config_map.get('four', 0), 0)
def test_contains(self):
assert 'one' in self.config_map
assert 'two' not in self.config_map
def test_len(self):
assert_equal(len(self.config_map), 3)
class TestConfigurationNamespace(object):
@pytest.fixture(autouse=True)
def setup_namespace(self):
self.name = 'the_name'
self.namespace = config.ConfigNamespace(self.name)
self.config_data = dict(enumerate(['one', 'two', 'three'], 1))
def test_register_get_value_proxies(self):
proxies = [mock.Mock(), mock.Mock()]
for mock_proxy in proxies:
self.namespace.register_proxy(mock_proxy)
assert_equal(self.namespace.get_value_proxies(), proxies)
@pytest.mark.skipif('PyPy' in platform.python_implementation(), reason="Fails on PyPy")
def test_get_value_proxies_does_not_contain_out_of_scope_proxies(self):
assert not self.namespace.get_value_proxies()
def a_scope():
mock_proxy = mock.create_autospec(proxy.ValueProxy)
self.namespace.register_proxy(mock_proxy)
a_scope()
a_scope()
gc.collect()
assert_equal(len(self.namespace.get_value_proxies()), 0)
def test_update_values(self):
values = dict(one=1, two=2)
self.namespace.update_values(values)
assert 'one' in self.namespace
assert 'two' in self.namespace
def test_get_config_values(self):
self.namespace['stars'] = 'foo'
values = self.namespace.get_config_values()
assert_equal(values, {'stars': 'foo'})
def test_get_config_dict(self):
self.namespace['one.two.three.four'] = 5
self.namespace['one.two.three.five'] = 'six'
self.namespace['one.b.cats'] = [1, 2, 3]
self.namespace['a.two'] = 'c'
self.namespace['first'] = True
d = self.namespace.get_config_dict()
assert_equal(d, {
'one': {
'b': {
'cats': [1, 2, 3],
},
'two': {
'three': {
'four': 5,
'five': 'six',
},
},
},
'a': {
'two': 'c',
},
'first': True,
})
def test_get_known_keys(self):
proxies = [mock.Mock(), mock.Mock()]
for mock_proxy in proxies:
self.namespace.register_proxy(mock_proxy)
expected = set([mock_proxy.config_key for mock_proxy in proxies])
assert_equal(self.namespace.get_known_keys(), expected)
def test_validate_keys_no_unknown_keys(self):
proxies = [mock.Mock(config_key=i) for i in self.config_data]
for mock_proxy in proxies:
self.namespace.register_proxy(mock_proxy)
with mock.patch('staticconf.config.log') as mock_log:
self.namespace.validate_keys(self.config_data, True)
self.namespace.validate_keys(self.config_data, False)
assert not mock_log.warn.mock_calls
def test_validate_keys_unknown_log(self):
with mock.patch('staticconf.config.log') as mock_log:
self.namespace.validate_keys(self.config_data, False)
assert_equal(len(mock_log.info.mock_calls), 1)
def test_validate_keys_unknown_log_keys_only(self):
with mock.patch('staticconf.config.log') as mock_log:
self.namespace.validate_keys(
self.config_data,
False,
log_keys_only=True,
)
assert_equal(len(mock_log.info.mock_calls), 1)
log_msg = mock_log.info.call_args[0][0]
unknown = config.remove_by_keys(
self.config_data,
self.namespace.get_known_keys(),
)
for k, v in unknown:
# Have to cast to strings here, since log_msg is a string
key_string, val_string = str(k), str(v)
assert key_string in log_msg
assert val_string not in log_msg
def test_validate_keys_unknown_raise(self):
assert_raises(errors.ConfigurationError,
self.namespace.validate_keys, self.config_data, True)
def test_clear(self):
self.namespace.apply_config_data(self.config_data, False, False)
assert self.namespace.get_config_values()
self.namespace.clear()
assert_equal(self.namespace.get_config_values(), {})
class TestGetNamespace(object):
@pytest.yield_fixture(autouse=True)
def mock_namespaces(self):
with mock.patch.dict(config.configuration_namespaces):
yield
def test_get_namespace_new(self):
name = 'some_unlikely_name'
assert name not in config.configuration_namespaces
config.get_namespace(name)
assert name in config.configuration_namespaces
def test_get_namespace_existing(self):
name = 'the_common_name'
namespace = config.get_namespace(name)
assert_equal(namespace, config.get_namespace(name))
class TestReload(object):
@pytest.yield_fixture(autouse=True)
def mock_namespaces(self):
with mock.patch.dict(config.configuration_namespaces):
yield
def test_reload_default(self):
staticconf.DictConfiguration(dict(one='three', seven='nine'))
one, seven = staticconf.get('one'), staticconf.get('seven')
staticconf.DictConfiguration(dict(one='ten', seven='el'))
staticconf.reload()
assert_equal(one, 'ten')
assert_equal(seven, 'el')
def test_reload_all(self):
name = 'another_one'
staticconf.DictConfiguration(dict(one='three'))
staticconf.DictConfiguration(dict(two='three'), namespace=name)
one, two = staticconf.get('one'), staticconf.get('two', namespace=name)
# access the values to set the value_proxy cache
one.value, two.value
staticconf.DictConfiguration(dict(one='four'))
staticconf.DictConfiguration(dict(two='five'), namespace=name)
staticconf.reload(all_names=True)
assert_equal(one, 'four')
assert_equal(two, 'five')
def test_reload_single(self):
name = 'another_one'
staticconf.DictConfiguration(dict(one='three'))
staticconf.DictConfiguration(dict(two='three'), namespace=name)
one, two = staticconf.get('one'), staticconf.get('two', namespace=name)
# access the values to set the value_proxy cache
one.value, two.value
staticconf.DictConfiguration(dict(one='four'))
staticconf.DictConfiguration(dict(two='five'), namespace=name)
staticconf.reload()
assert_equal(one, 'four')
assert_equal(two, 'three')
class TestValidateConfig(object):
@pytest.yield_fixture(autouse=True)
def patch_config(self):
with mock.patch.dict(config.configuration_namespaces, clear=True):
with testing.MockConfiguration():
yield
def test_validate_single_passes(self):
staticconf.DictConfiguration({})
config.validate()
_ = staticconf.get_string('one.two')
staticconf.DictConfiguration({'one.two': 'nice'})
config.validate()
def test_validate_single_fails(self):
_ = staticconf.get_int('one.two')
assert_raises(errors.ConfigurationError, config.validate)
def test_validate_all_passes(self):
name = 'yan'
staticconf.DictConfiguration({}, namespace=name)
staticconf.DictConfiguration({})
config.validate(all_names=True)
staticconf.get_string('one.two')
staticconf.get_string('foo', namespace=name)
staticconf.DictConfiguration({'one.two': 'nice'})
staticconf.DictConfiguration({'foo': 'nice'}, namespace=name)
config.validate(all_names=True)
def test_validate_all_fails(self):
name = 'yan'
_ = staticconf.get_string('foo', namespace=name) # flake8: noqa
assert_raises(errors.ConfigurationError,
config.validate,
all_names=True)
def test_validate_value_token(self):
class ExampleSchema(schema.Schema):
namespace = 'DEFAULT'
thing = schema.int()
assert_raises(errors.ConfigurationError,
config.validate,
all_names=True)
class TestConfigHelp(object):
@pytest.fixture(autouse=True)
def setup_config_help(self):
self.config_help = config.ConfigHelp()
self.config_help.add('one',
validation.validate_any, None, 'DEFAULT', "the one")
self.config_help.add('when',
validation.validate_time, 'NOW', 'DEFAULT', "The time")
self.config_help.add('you sure',
validation.validate_bool, 'No', 'DEFAULT', "Are you?")
self.config_help.add('one',
validation.validate_any, None, 'Beta', "the one")
self.config_help.add('one',
validation.validate_any, None, 'Alpha', "the one")
self.config_help.add('two',
validation.validate_any, None, 'Alpha', "the two")
self.lines = self.config_help.view_help().split('\n')
def test_view_help_format(self):
line, help = self.lines[4:6]
assert_equal(help, 'The time')
assert_equal(line, 'when (Type: time, Default: NOW)')
def test_view_help_format_namespace(self):
namespace, one, _, two, _, blank = self.lines[9:15]
assert_equal(namespace, 'Namespace: Alpha')
assert one.startswith('one')
assert two.startswith('two')
assert_equal(blank, '')
def test_view_help_namespace_sort(self):
lines = list(filter(lambda l: l.startswith('Namespace'), self.lines))
expected = ['Namespace: DEFAULT', 'Namespace: Alpha', 'Namespace: Beta']
assert_equal(lines, expected)
class TestHasDuplicateKeys(object):
@pytest.fixture(autouse=True)
def setup_base_conf(self):
self.base_conf = {'fear': 'is_the', 'mind': 'killer'}
def test_has_dupliacte_keys_false(self):
config_data = dict(unique_keys=123)
assert not config.has_duplicate_keys(config_data, self.base_conf, True)
assert not config.has_duplicate_keys(config_data, self.base_conf, False)
def test_has_duplicate_keys_raises(self):
config_data = dict(fear=123)
assert_raises(
errors.ConfigurationError,
config.has_duplicate_keys,
config_data,
self.base_conf,
True)
def test_has_duplicate_keys_no_raise(self):
config_data = dict(mind=123)
assert config.has_duplicate_keys(config_data, self.base_conf, False)
class TestConfigurationWatcher(object):
@pytest.yield_fixture(autouse=True)
def setup_mocks_and_config_watcher(self):
self.loader = mock.Mock()
with mock.patch('staticconf.config.time') as self.mock_time:
with mock.patch('staticconf.config.os.stat') as self.mock_stat:
with tempfile.NamedTemporaryFile() as file:
with mock.patch('staticconf.config.os.path') as self.mock_path:
file.flush()
self.mtime = 234
self.mock_path.getmtime.return_value = self.mtime
self.mock_stat.return_value.st_ino = 1
self.mock_stat.return_value.st_dev = 2
self.filename = file.name
self.watcher = config.ConfigurationWatcher(
self.loader, self.filename)
yield
def test_get_filename_list_from_string(self):
self.mock_path.abspath.side_effect = lambda p: p
filename = 'thefilename.yaml'
filenames = self.watcher.get_filename_list(filename)
assert_equal(filenames, [filename])
def test_get_filename_list_from_list(self):
self.mock_path.abspath.side_effect = lambda p: p
filenames = ['b', 'g', 'z', 'a']
expected = ['a', 'b', 'g', 'z']
assert_equal(self.watcher.get_filename_list(filenames), expected)
def test_should_check(self):
self.watcher.last_check = 123456789
self.mock_time.time.return_value = 123456789
# Still current, but no min_interval
assert self.watcher.should_check
# With max interval
self.watcher.min_interval = 3
assert not self.watcher.should_check
# Time has passed
self.mock_time.time.return_value = 123456794
assert self.watcher.should_check
def test_file_modified_not_modified(self):
self.mock_time.time.return_value = 123460
assert not self.watcher.file_modified()
assert_equal(self.watcher.last_check, self.mock_time.time.return_value)
def test_file_modified(self):
self.watcher.comparators[0].last_max_mtime = 123456
self.mock_path.getmtime.return_value = 123460
assert self.watcher.file_modified()
assert_equal(self.watcher.last_check, self.mock_time.time.return_value)
def test_reload_default(self):
self.watcher.reload()
self.loader.assert_called_with()
def test_reload_custom(self):
reloader = mock.Mock()
watcher = config.ConfigurationWatcher(
self.loader, self.filename, reloader=reloader)
watcher.reload()
reloader.assert_called_with()
class TestInodeComparator(object):
def test_get_inodes_empty(self):
comparator = config.InodeComparator([])
assert comparator.get_inodes() == []
@mock.patch('staticconf.config.os.stat', autospec=True)
def test_get_inodes(self, mock_stat):
comparator = config.InodeComparator(['./one.file'])
inodes = comparator.get_inodes()
expected = [(mock_stat.return_value.st_dev, mock_stat.return_value.st_ino)]
assert_equal(inodes, expected)
class TestMTimeComparator(object):
@mock.patch('staticconf.config.os.path.getmtime', autospec=True, return_value=1)
def test_no_change(self, mock_mtime):
comparator = config.MTimeComparator(['./one.file'])
assert not comparator.has_changed()
assert not comparator.has_changed()
@mock.patch('staticconf.config.os.path.getmtime', autospec=True, side_effect=[0, 1, 1, 2])
def test_changes(self, mock_mtime):
comparator = config.MTimeComparator(['./one.file'])
assert comparator.has_changed()
assert not comparator.has_changed()
assert comparator.has_changed()
@mock.patch('staticconf.config.os.path.getmtime', autospec=True, side_effect=[1, 2, 1])
def test_change_when_newer_time_before_older_time(self, mock_mtime):
comparator = config.MTimeComparator(['./one.file'])
# 1 -> 2
assert comparator.has_changed()
# 2 -> 1 (can happen as a result of a revert)
assert comparator.has_changed()
class TestMTimeComparatorWithCompareFunc(object):
@pytest.fixture(autouse=True)
def setup_comparator(self):
self._LoggingMTimeComparator = functools.partial(
config.MTimeComparator,
compare_func=config.build_compare_func(self._err_logger))
@pytest.fixture(autouse=True)
def _reset_err_logger(self):
self._err_filename = None
self._exc_info = (None, None, None)
def _err_logger(self, filename):
self._err_filename = filename
self._exc_info = sys.exc_info()
def test_logs_error(self):
comparator = self._LoggingMTimeComparator(['./not.a.file'])
assert self._err_filename == "./not.a.file"
assert all(x is not None for x in self._exc_info)
def test_get_most_recent_empty(self):
comparator = self._LoggingMTimeComparator([])
assert self._err_filename is None
assert all(x is None for x in self._exc_info)
@mock.patch('staticconf.config.os.path.getmtime', autospec=True, return_value=1)
def test_no_change(self, mock_mtime):
comparator = self._LoggingMTimeComparator(['./one.file'])
assert not comparator.has_changed()
assert not comparator.has_changed()
assert self._err_filename is None
assert all(x is None for x in self._exc_info)
@mock.patch('staticconf.config.os.path.getmtime', autospec=True, side_effect=[0, 1, 1, 2])
def test_changes(self, mock_mtime):
comparator = self._LoggingMTimeComparator(['./one.file'])
assert comparator.has_changed()
assert not comparator.has_changed()
assert comparator.has_changed()
assert self._err_filename is None
assert all(x is None for x in self._exc_info)
class TestMD5Comparator(object):
@pytest.yield_fixture()
def comparator(self):
self.original_contents = b"abcdefghijkabcd"
with tempfile.NamedTemporaryFile() as self.file:
self.write_contents(self.original_contents)
yield config.MD5Comparator([self.file.name])
def write_contents(self, contents):
self.file.seek(0)
self.file.write(contents)
self.file.flush()
def test_get_hashes_empty(self):
comparator = config.MD5Comparator([])
assert comparator.get_hashes() == []
def test_has_changed_no_changes(self, comparator):
assert not comparator.has_changed()
self.write_contents(self.original_contents)
assert not comparator.has_changed()
def test_has_changed_with_changes(self, comparator):
assert not comparator.has_changed()
self.write_contents(b"this is the new content")
assert comparator.has_changed()
class TestReloadCallbackChain(object):
@pytest.fixture(autouse=True)
def setup_callback_chain(self):
self.callbacks = list(enumerate([mock.Mock(), mock.Mock()]))
self.callback_chain = config.ReloadCallbackChain(callbacks=self.callbacks)
def test_init_with_callbacks(self):
assert_equal(self.callback_chain.callbacks, dict(self.callbacks))
def test_add_remove(self):
callback = mock.Mock()
self.callback_chain.add('one', callback)
assert_equal(self.callback_chain.callbacks['one'], callback)
self.callback_chain.remove('one')
assert 'one' not in self.callback_chain.callbacks
def test_call(self):
self.callback_chain.namespace = 'the_namespace'
with mock.patch('staticconf.config.reload') as mock_reload:
self.callback_chain()
for _, callback in self.callbacks:
callback.assert_called_with()
mock_reload.assert_called_with(name='the_namespace', all_names=False)
class TestConfigFacade(object):
@pytest.fixture(autouse=True)
def setup_facade(self):
self.mock_watcher = mock.create_autospec(config.ConfigurationWatcher)
self.mock_watcher.get_reloader.return_value = mock.create_autospec(
config.ReloadCallbackChain)
self.facade = config.ConfigFacade(self.mock_watcher)
def test_load(self):
filename, namespace = "filename", "namespace"
loader = mock.Mock()
with mock.patch(
'staticconf.config.ConfigurationWatcher',
autospec=True) as mock_watcher_class:
facade = config.ConfigFacade.load(filename, namespace, loader)
facade.watcher.load_config.assert_called_with()
assert_equal(facade.watcher, mock_watcher_class.return_value)
reloader = facade.callback_chain
assert_equal(reloader, facade.watcher.get_reloader())
def test_load_passes_comparators_to_configuration_watcher(self):
filename, namespace = "filename", "namespace"
loader = mock.Mock()
comparator = mock.Mock(name='MockComparator')
with mock.patch(
'staticconf.config.ConfigurationWatcher',
autospec=True
) as mock_watcher_class:
config.ConfigFacade.load(filename, namespace, loader, comparators=[comparator])
mock_watcher_class.assert_called_with(mock.ANY, filename, min_interval=mock.ANY, reloader=mock.ANY, comparators=[comparator])
def test_add_callback(self):
name, func = 'name', mock.Mock()
self.facade.add_callback(name, func)
self.facade.callback_chain.add.assert_called_with(name, func)
def test_reload_if_changed(self):
self.facade.reload_if_changed()
self.mock_watcher.reload_if_changed.assert_called_with(force=False)
@pytest.mark.acceptance
class TestConfigFacadeAcceptance(object):
@pytest.fixture(autouse=True)
def setup_env(self):
self.file = tempfile.NamedTemporaryFile()
self.write(b"one: A")
def write(self, content, mtime_seconds=0):
time.sleep(0.03)
self.file.file.seek(0)
self.file.write(content)
self.file.flush()
tstamp = time.time() | |
######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import os
import types
from datetime import datetime
import actions
import boto_retry
import handlers
import handlers.task_tracking_table
import services
from handlers.task_tracking_table import TaskTrackingTable
from helpers import safe_dict, safe_json, full_stack
from helpers.dynamodb import unpack_record
from main import lambda_handler
from outputs.queued_logger import QueuedLogger
from outputs.result_notifications import ResultNotifications
ACTIVE_INSTANCES = "InstanceCount"
CONCURRENCY_ID = handlers.TASK_TR_CONCURRENCY_ID
ENV_DEBUG_TASK_TACKING_HANDLER = "DEBUG_TASK_TRACKING_HANDLER"
NEW_TASK = 0
FINISHED_TASK = 1
FINISHED_CONCURRENCY_TASK = 2
CHECK_COMPLETION = 3
DELETE_ITEM = 4
START_WAITING_ACTION = 5
TASK_ACTION_STRINGS = [
"New task",
"Finished task",
"Finished task with concurrency handling",
"Check task completion",
"Delete task item",
"Start waiting task"
]
WARN_DELETING_RESOURCES = "Error deleting resources from bucket {} with key {}"
DEBUG_ACTION = "Action is \"{}\" for task \"{}\", task-id is {}"
DEBUG_DRYRUN = "Action will be executed in in dry-run mode"
DEBUG_LAMBDA = "Lambda function invoked {}"
DEBUG_ACTION_PARAMETERS = "Action parameters are {}"
DEBUG_RUNNING_ECS_TASK = "Running {} step of task {} as ECS job"
DEBUG_RESULT = "Handling actions tracking update took {:>.3f} seconds"
DEBUG_MEMORY_SIZE = "Task memory allocation for executing lambda is {}"
DEBUG_LAMBDA_FUNCTION_ = "Executing action with Lambda function {}, payload is {}"
DEBUG_START_WAITING = "Waiting list count for ConcurrencyId \"{}\" is {}, action is \"{}\", starting waiting " \
"task \"{}\" with id {}"
DEBUG_WAITING = "The waiting list for action \"{}\" with concurrency key \"{}\" is {}, the maximum number of concurrent " \
"running actions for this key is {}, action with id \"{}\" has been put in waiting state"
DEBUG_DELETING_RESOURCES_FROM_S3 = "Deleting resource object {} from bucket {}, {}"
ERR_RUNNING_TASK = "Error running task {}, {}, {}"
LOG_STREAM = "{}-{:0>4d}{:0>2d}{:0>2d}"
SCHEDULER_LAMBDA_FUNCTION_DEFAULT = "SchedulerDefault"
SIZED_SCHEDULER_NAME_TEMPLATE = "Scheduler{:0>04d}"
class TaskTrackingHandler(object):
"""
Class to handle events triggered by inserting new items in the actions tracking table.
"""
def __init__(self, event, context):
"""
Initializes the instance.
:param event: Handled event
:param context: Context if running in Lambda
"""
self._context = context
self._event = event
self._tracking_table = None
self._concurrency_table = None
self.started_tasks = 0
self.started_waiting_tasks = 0
self.waiting_for_execution_tasks = 0
self.started_completion_checks = 0
self.finished_concurrency_tasks = 0
self.done_work = False
self.invoked_lambda_functions = []
self.events_client = None
self._s3_client = None
self._db_client = None
# setup logging
classname = self.__class__.__name__
dt = datetime.utcnow()
logstream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day)
self._logger = QueuedLogger(logstream=logstream,
context=self._context,
buffersize=20,
debug=os.getenv(ENV_DEBUG_TASK_TACKING_HANDLER, "false").lower() == "true")
@classmethod
def is_handling_request(cls, event, context):
# In simulation the handler is called directly when inserting or updating items in the table
if handlers.running_local(context):
return False
if event.get("Records", [{}])[0].get("eventSource", "") != "aws:dynamodb":
return False
source_arn = event["Records"][0]["eventSourceARN"]
table_name = source_arn.split("/")[1]
return table_name in [os.getenv(handlers.ENV_ACTION_TRACKING_TABLE), os.getenv(handlers.ENV_CONCURRENCY_TABLE)]
@classmethod
def task_string(cls, action):
return TASK_ACTION_STRINGS[action] if 0 <= action < len(TASK_ACTION_STRINGS) else "Unknown"
@property
def tracking_table(self):
"""
Gets an instance of the tracking table and use it in subsequent calls
:return: Instance tracking table
"""
if self._tracking_table is None:
self._tracking_table = TaskTrackingTable(self._context, self._logger)
return self._tracking_table
@property
def s3_client(self):
if self._s3_client is None:
self._s3_client = boto_retry.get_client_with_retries("s3", ["delete_item"], logger=self._logger)
return self._s3_client
@property
def db_client(self):
if self._db_client is None:
self._db_client = boto_retry.get_client_with_retries("dynamodb", ["delete_item"], logger=self._logger)
return self._db_client
def _get_action_concurrency_key(self, item):
"""
Gets the concurrency key for a tasks action
:param item: The task item
:return: The concurrency key for the tasks action
"""
action = item[handlers.TASK_TR_ACTION]
# get the name of the optional method to return the concurrency key
action_class = actions.get_action_class(action)
concurrency_key_method = getattr(action_class, handlers.ACTION_CONCURRENCY_KEY_METHOD, None)
# prepare parameters for calling static function that returns the concurrency key
if concurrency_key_method is not None:
get_key_params = {
actions.ACTION_PARAM_RESOURCES: handlers.get_item_resource_data(item, self._context),
actions.ACTION_PARAM_ACCOUNT: item[handlers.TASK_TR_ACCOUNT],
actions.ACTION_PARAM_STACK: os.getenv(handlers.ENV_STACK_NAME),
actions.ACTION_PARAM_STACK_ID: os.getenv(handlers.ENV_STACK_ID),
actions.ACTION_PARAM_TASK_ID: item[handlers.TASK_TR_ID],
actions.ACTION_PARAM_TASK: item[handlers.TASK_TR_NAME]
}
get_key_params.update(item.get(handlers.TASK_TR_PARAMETERS))
return concurrency_key_method(get_key_params)
else:
# if this method is not available for action then use the name of the action as the key
return action
def _enter_waiting_list(self, concurrency_key):
"""
Adds 1 to waiting list counter for the specified concurrency key and returns new value
:param concurrency_key: Concurrency key for counter
:return: Updated counter
"""
# update/read counter for the concurrency key
if not handlers.running_local(self._context):
resp = self.concurrency_table.update_item_with_retries(Key={CONCURRENCY_ID: concurrency_key},
UpdateExpression="ADD InstanceCount :one SET RunNext=:run",
ExpressionAttributeValues={":one": 1, ":run": False},
ReturnValues="UPDATED_NEW")
return int(resp["Attributes"].get("InstanceCount", 0))
else:
resp = self.concurrency_table.get_item_with_retries(Key={CONCURRENCY_ID: concurrency_key})
return resp.get("Item", {}).get(ACTIVE_INSTANCES, 0)
def _leave_waiting_list(self, task_id, concurrency_key):
"""
Subtracts 1 from waiting list counter for the specified concurrency key and returns new value. If the counter reaches 0
then the entry for the concurrency key is removed
:param concurrency_key: Concurrency key for counter
:return: Updated counter
"""
# make a consistent read of the task
self.tracking_table.get_task_item(task_id)
if not handlers.running_local(self._context):
resp = self.concurrency_table.update_item_with_retries(Key={CONCURRENCY_ID: concurrency_key},
UpdateExpression="ADD InstanceCount :min_one SET RunNext=:run",
ExpressionAttributeValues={":min_one": -1, ":run": True},
ReturnValues="UPDATED_NEW")
count = max(0, int(resp["Attributes"].get(ACTIVE_INSTANCES, 0)))
# remove entry if no more waiting items for this key
if count == 0:
self.concurrency_table.delete_item_with_retries(Key={CONCURRENCY_ID: concurrency_key})
else:
resp = self.concurrency_table.get_item_with_retries(Key={CONCURRENCY_ID: concurrency_key})
count = resp.get("Item", {}).get(ACTIVE_INSTANCES, 0)
TaskTrackingTable._run_local_stream_event(os.getenv(handlers.ENV_CONCURRENCY_TABLE), "UPDATE",
{"ConcurrencyId": concurrency_key, "InstanceCount": count},
{"ConcurrencyId": concurrency_key, "InstanceCount": count + 1},
self._context)
return count
@property
def concurrency_table(self):
"""
Returns table to store last execution time for this handler.
:return: table to store last execution time for this handler
"""
if self._concurrency_table is None:
tablename = os.getenv(handlers.ENV_CONCURRENCY_TABLE)
self._logger.debug("Using concurrency table {}", tablename)
self._concurrency_table = services.get_session().resource("dynamodb").Table(tablename)
boto_retry.add_retry_methods_to_resource(self._concurrency_table, ["update_item", "get_item", "delete_item"],
context=self._context)
return self._concurrency_table
def _is_wait_listed(self, item):
"""
Test if there is a max concurrency level for the tasks action. If this is the case then a concurrency key is retrieved
from the action and it is used to update the counter in the concurrency table for that key. The updated counter is tested
against the max concurrency level for the tasks action
:param item: task item
:return: True if counter for tasks action concurrency key > mac concurrency level, False if it is less or equal or the
action has no max concurrency level
"""
action = item.get(handlers.TASK_TR_ACTION, None)
if action is None:
return False
action_properties = actions.get_action_properties(action)
# test if there are concurrency restrictions
max_action_concurrency = action_properties.get(actions.ACTION_MAX_CONCURRENCY)
# no maximum
if max_action_concurrency in [None, 0]:
return False
# property may be a lambda function, call the function with parameters of task as lambda parameters
if types.FunctionType == type(max_action_concurrency):
parameters = item[handlers.TASK_TR_PARAMETERS]
max_action_concurrency = max_action_concurrency(parameters)
if max_action_concurrency in [None, 0]:
return False
# get the key for the tasks action
concurrency_key = self._get_action_concurrency_key(item)
# enter the waiting list for that key
count = int(self._enter_waiting_list(concurrency_key))
# set status to waiting if count > max concurrency level
status = handlers.STATUS_WAITING if count >= int(max_action_concurrency) else None
# store the concurrency key twice, the concurrency id is used for the index in the GSI and is removed after the
# action is handled so it does not longer show in the GSI, but we keep another copy in the task tracking table that
# we need to decrement the counter in the waiting list and possible start waiting instances with the same key
self.tracking_table.update_task(item[handlers.TASK_TR_ID],
task=item[handlers.TASK_TR_NAME],
task_metrics=item.get(handlers.TASK_TR_METRICS, False),
status=status,
status_data={
handlers.TASK_TR_CONCURRENCY_KEY: concurrency_key,
handlers.TASK_TR_CONCURRENCY_ID: concurrency_key
})
if count > max_action_concurrency:
self._logger.debug(DEBUG_WAITING, item[handlers.TASK_TR_ACTION], concurrency_key, count,
max_action_concurrency, item[handlers.TASK_TR_ID])
return True
return False
def _start_task_execution(self, task_item, action=handlers.HANDLER_ACTION_EXECUTE):
"""
Creates an instance of the lambda function that executes the tasks action. It first checks is the action has specific memory
requirements and based on this it creates a copy of this instance or one configured for the required memory. All
information for executing the action is passed in the event.
:param task_item: Task item for which action is executed
:return:
"""
try:
self._logger.debug("Entering start_task_execution ({}) with task {}", action, safe_json(task_item, indent=3))
# Create event for execution of the action and set its action so that is picked up by the execution handler
event = {i: task_item.get(i) | |
import argparse
import random
import numpy as np
import torch
import spacy
import scispacy
import json
import os
import logging
import pandas as pd
import sys
from tqdm import tqdm
from datasets import Dataset
from functools import partial
from dataclasses import dataclass, field
from custom_trainer import CustomTrainer
import ipdb
from collections import defaultdict
from scipy.special import softmax
from typing import Optional, Union
from datasets import load_dataset, load_metric, concatenate_datasets
from generate_claim_variants import kbin
from transformers import pipeline
import transformers
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
Seq2SeqTrainingArguments,
HfArgumentParser,
set_seed,
PreTrainedTokenizerBase,
PreTrainedModel,
DataCollatorForSeq2Seq,
AutoModelForCausalLM
)
from ParagraphJointModel.paragraph_model_dynamic import JointParagraphClassifier
from ParagraphJointModel.dataset import SciFactParagraphBatchDataset
from ParagraphJointModel.scifact_joint_paragraph_dynamic_prediction import predict, post_process_stance
from ParagraphJointModel.util import stance2json, rationale2json, merge_json
logger = logging.getLogger(__name__)
@dataclass
class DataAndModelArguments:
"""
Arguments for setting up training/evaluation which aren't captured by huggingface
"""
model_name: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
output_claim_dir: Optional[str] = field(
default=None, metadata={"help": "Head directory to output all generated files"}
)
external_corpus_file: Optional[str] = field(
default=None, metadata={"help": "Corpus for fact checking model"}
)
internal_corpus_file: Optional[str] = field(
default=None, metadata={"help": "Other paragraphs in citance documents"}
)
fc_model_checkpoint: Optional[str] = field(
default=None, metadata={"help": "Location of pretrained fact checking model"}
)
fc_model_name: Optional[str] = field(
default=None, metadata={"help": "Name of fact checking base model"}
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
train_dset: str = field(
default='squad', metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
val_dset: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use for validation."}
)
predict_dset: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use for final generation."}
)
test_dset: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use for generating claims for human evaluation."}
)
should_log: bool = field(
default=True,
metadata={"help": "Whether or not to log"},
)
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
set_seed(seed)
def data_preprocess(tokenizer, dset, examples):
inputs = [c for c in examples['context']]
targets = [c for c in examples['claims']]
model_inputs = tokenizer(inputs, max_length=tokenizer.model_max_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=tokenizer.model_max_length, truncation=True)
# # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# # padding in the loss.
# if padding == "max_length" and data_args.ignore_pad_token_for_loss:
# labels["input_ids"] = [
# [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
# ]
model_inputs["labels"] = labels["input_ids"]
if 'doc_id' not in examples:
model_inputs['doc_id'] = [''] * len(examples['context'])
model_inputs['paper_id'] = [''] * len(examples['context'])
model_inputs['evidence'] = [['']] * len(examples['context'])
#model_inputs['score'] = [[0.0]] * len(examples['context'])
model_inputs['score'] = [0.0] * len(examples['context'])
model_inputs['orig_context'] = [''] * len(examples['context'])
model_inputs['citance'] = [''] * len(examples['context'])
if 'num_return_sequences' not in examples:
model_inputs['num_return_sequences'] = [0] * len(examples['context'])
return model_inputs
def sort_fc_claims(preds, original_claims):
"""
Scores each claim using the formula:
$$ s = p[support] - p[contradict] $$
Returns the claims sorted by this score in descending order
:param preds: The raw logits from ParagraphJointModel for each evidence sample for each claim
:param original_claims: The original generated claims
:return: Sorted claims with their fact checking score
"""
orig_claim_map = {c['id']: c for c in original_claims}
for p in preds:
all_probs = [softmax(p['evidence'][e]['score']) for e in p['evidence']]
score = max(p[1] - p[2] for p in all_probs)
orig_claim_map[p['id']]['score'] = score
return list(sorted([v for v in orig_claim_map.values()], key=lambda x: x['score'], reverse=True))
def compute_metrics(tokenizer, metric, eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [lab.strip() for lab in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
#result = {"rouge": result["score"]}
result = {
'rouge1_low_p': result['rouge1'].low.precision,
'rouge1_low_r': result['rouge1'].low.recall,
'rouge1_low_fmeasure': result['rouge1'].low.fmeasure,
'rouge1_mid_p': result['rouge1'].mid.precision,
'rouge1_mid_r': result['rouge1'].mid.recall,
'rouge1_mid_fmeasure': result['rouge1'].mid.fmeasure,
'rouge1_high_p': result['rouge1'].high.precision,
'rouge1_high_r': result['rouge1'].high.recall,
'rouge1_high_fmeasure': result['rouge1'].high.fmeasure,
'rouge2_low_p': result['rouge2'].low.precision,
'rouge2_low_r': result['rouge2'].low.recall,
'rouge2_low_fmeasure': result['rouge2'].low.fmeasure,
'rouge2_mid_p': result['rouge2'].mid.precision,
'rouge2_mid_r': result['rouge2'].mid.recall,
'rouge2_mid_fmeasure': result['rouge2'].mid.fmeasure,
'rouge2_high_p': result['rouge2'].high.precision,
'rouge2_high_r': result['rouge2'].high.recall,
'rouge2_high_fmeasure': result['rouge2'].high.fmeasure,
'rougeL_low_p': result['rougeL'].low.precision,
'rougeL_low_r': result['rougeL'].low.recall,
'rougeL_low_fmeasure': result['rougeL'].low.fmeasure,
'rougeL_mid_p': result['rougeL'].mid.precision,
'rougeL_mid_r': result['rougeL'].mid.recall,
'rougeL_mid_fmeasure': result['rougeL'].mid.fmeasure,
'rougeL_high_p': result['rougeL'].high.precision,
'rougeL_high_r': result['rougeL'].high.recall,
'rougeL_high_fmeasure': result['rougeL'].high.fmeasure,
'rougeLsum_low_p': result['rougeLsum'].low.precision,
'rougeLsum_low_r': result['rougeLsum'].low.recall,
'rougeLsum_low_fmeasure': result['rougeLsum'].low.fmeasure,
'rougeLsum_mid_p': result['rougeLsum'].mid.precision,
'rougeLsum_mid_r': result['rougeLsum'].mid.recall,
'rougeLsum_mid_fmeasure': result['rougeLsum'].mid.fmeasure,
'rougeLsum_high_p': result['rougeLsum'].high.precision,
'rougeLsum_high_r': result['rougeLsum'].high.recall,
'rougeLsum_high_fmeasure': result['rougeLsum'].high.fmeasure,
}
#prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
#result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 6) for k, v in result.items()}
return result
def generate_claims(model, gen_dset, dl, tokenizer, device):
"""
Run generation using the given model on the given dataset
:param model: BART model to use for generation
:param gen_dset: The original dataset
:param dl: A dataloader to use for generation
:param tokenizer: A tokenizer for the given model
:param device: What device to run on
:return: The set of generated claims to use for the generative model, and the same claims formatted
for input to the fact checking model
"""
# Predict
all_samples = []
j = 0
for b in tqdm(dl):
input_ids = b['input_ids'].to(device)
# Get the number of return sequences for this batch
n_return_sequences = gen_dset['num_return_sequences'][j:j+input_ids.shape[0]]
n_gen_seq = max(n_return_sequences)
j += input_ids.shape[0]
# Generate the max number of needed sequences for the batch
samples = model.generate(
input_ids,
max_length=tokenizer.model_max_length,
early_stopping=True,
do_sample=True,
num_return_sequences=n_gen_seq,
top_k=n_gen_seq
)
samples = samples.reshape((input_ids.shape[0], n_gen_seq, -1))
# Just get the number of sequences needed for each sample
all_samples.extend([s[:n_seq] for s,n_seq in zip(list(samples.detach().cpu().numpy()), n_return_sequences)])
fc_claim_inputs = []
generated_claims = []
count = defaultdict(int)
for id, context, claims, evidence, orig_context, citance, paper_id, num_return_sequences in zip(gen_dset['doc_id'],
gen_dset['context'],
all_samples,
gen_dset['evidence'],
gen_dset['orig_context'],
gen_dset['citance'],
gen_dset['paper_id'],
gen_dset['num_return_sequences']):
gen_claims = set([tokenizer.decode(c, skip_special_tokens=True, clean_up_tokenization_spaces=False) for c in claims])
for c in gen_claims:
n = count[id]
generated_claims.append(
{'id': f"{id}_{n}", 'context': context,
'generated_claim': c, 'evidence': evidence, 'orig_context': orig_context, 'citance': citance,
'paper_id': paper_id, 'num_return_sequences': num_return_sequences})
fc_claim_inputs.append({'id': f"{id}_{n}", 'claim': c, 'evidence': {}, 'cited_doc_ids': evidence,
'retrieved_doc_ids': evidence})
count[id] += 1
return generated_claims, fc_claim_inputs
if __name__ == '__main__':
parser = HfArgumentParser((DataAndModelArguments, Seq2SeqTrainingArguments))
dm_args, training_args = parser.parse_args_into_dataclasses()
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
seed = training_args.seed
model_name = dm_args.model_name
n_gpu = training_args.n_gpu
should_log = dm_args.should_log
train_dset_name = dm_args.train_dset
val_dset_name = dm_args.val_dset
enforce_reproducibility(seed)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if should_log else logging.WARN)
# Set the verbosity to info of the Transformers logger (on main process only):
if should_log:
transformers.utils.logging.set_verbosity_info()
logger.info(f"Training/evaluation parameters {training_args}")
# FC model setup
fc_tokenizer = AutoTokenizer.from_pretrained(dm_args.fc_model_name)
fc_model = JointParagraphClassifier(dm_args.fc_model_name, 1024,
0.0)
state_dict = torch.load(dm_args.fc_model_checkpoint)
fc_model.load_state_dict(state_dict, strict=False)
# NLI model for negative claim generation
nli = pipeline('sentiment-analysis', model='roberta-large-mnli', return_all_scores=True, device=0)
# Language model for negative claim generation
lm = AutoModelForCausalLM.from_pretrained('gpt2')
lm_tk = AutoTokenizer.from_pretrained('gpt2')
nlp = spacy.load('en_core_sci_md')
# Load data
# Create train/val datasets and processor
tokenizer = AutoTokenizer.from_pretrained(model_name)
train_preprocessor = partial(data_preprocess, tokenizer, train_dset_name)
train_dset_base = load_dataset('json', data_files=[train_dset_name])
train_dset = train_dset_base.map(train_preprocessor, batched=True)['train']
val_preprocessor = partial(data_preprocess, tokenizer, val_dset_name)
val_dset_base = load_dataset('json', data_files=[val_dset_name])
val_dset = val_dset_base.map(val_preprocessor, batched=True)['train']
metric = load_metric('rouge')
with open(dm_args.predict_dset) as f:
citances = [json.loads(l) for l in f]
# Prepare prediction input
prediction_data = defaultdict(list)
for citance in tqdm(citances):
prediction_data['doc_id'].append(citance['doc_id'])
prediction_data['paper_id'].append(citance['paper_id'])
prediction_data['orig_context'].append(citance['context'])
prediction_data['citance'].append(citance['text'])
prediction_data['context'].append(citance['generation_context'])
prediction_data['claims'].append("")
prediction_data['evidence'].append(citance['evidence'])
prediction_data['num_return_sequences'].append(citance['num_return_sequences'])
# Create predict dset
pred_preprocessor = partial(data_preprocess, tokenizer, 'citeworth')
gen_dset_base = Dataset.from_dict(prediction_data)
gen_dset = gen_dset_base.map(pred_preprocessor, batched=True)
# Run on the final claims for annotation
with open(dm_args.test_dset) as f:
test_citances = [json.loads(l) for l in f]
# Prepare prediction input
test_data = defaultdict(list)
for citance in tqdm(test_citances):
test_data['doc_id'].append(citance['doc_id'])
test_data['paper_id'].append(citance['paper_id'])
test_data['orig_context'].append(citance['context'])
test_data['citance'].append(citance['text'])
test_data['context'].append(citance['generation_context'])
test_data['claims'].append("")
test_data['evidence'].append(citance['evidence'])
test_data['num_return_sequences'].append(citance['num_return_sequences'])
# Create test dset
test_preprocessor = partial(data_preprocess, tokenizer, 'citeworth')
test_dset_base = Dataset.from_dict(test_data)
test_dset = test_dset_base.map(test_preprocessor, batched=True)
final_claims = []
if len(gen_dset) == 0:
break
if not os.path.exists(f"{dm_args.output_claim_dir}"):
os.makedirs(f"{dm_args.output_claim_dir}")
save_dir = f"{dm_args.output_claim_dir}/{epoch}"
# Create the model
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=-100,
padding='longest'
)
trainer = CustomTrainer(
model=model,
args=training_args,
| |
font-size: 32px !important;
line-height: 32px !important;
}
}
/* ANDROID CENTER FIX */
div[style*="margin: 16px 0;"] { margin: 0 !important; }
</style>
</head>
<body style="background-color: #f4f4f4; margin: 0 !important; padding: 0 !important;">
<!-- HIDDEN PREHEADER TEXT -->
<div style="display: none; font-size: 1px; color: #fefefe; line-height: 1px; font-family: 'Lato', Helvetica, Arial, sans-serif; max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden;">
Latest Progress.
</div>
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<!-- LOGO -->
<tr>
<td bgcolor="#FFA73B" align="center">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<tr>
<td align="center" valign="top" style="padding: 40px 10px 40px 10px;">
<a href="http://litmus.com" target="_blank">
</a>
</td>
</tr>
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
</table>
<![endif]-->
</td>
</tr>
<!-- HERO -->
<tr>
<td bgcolor="#FFA73B" align="center" style="padding: 0px 10px 0px 10px;">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<tr>
<td bgcolor="#ffffff" align="center" valign="top" style="padding: 40px 20px 20px 20px; border-radius: 4px 4px 0px 0px; color: #111111; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 48px; font-weight: 400; letter-spacing: 4px; line-height: 48px;">
<h1 style="font-size: 48px; font-weight: 400; margin: 0;">Experiment Complete</h1>
</td>
</tr>
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
</table>
<![endif]-->
</td>
</tr>
<!-- COPY BLOCK -->
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 40px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;" >
<p style="margin: 0;">An experiment has been completed. New experiment options. </p>
</td>
</tr>
<!-- BULLETPROOF BUTTON -->
<!-- <tr>
<td bgcolor="#ffffff" align="left">
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td bgcolor="#ffffff" align="center" style="padding: 20px 30px 60px 30px;">
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td align="center" style="border-radius: 3px;" bgcolor="#ec6d64"><a href="https://litmus.com" target="_blank" style="font-size: 20px; font-family: Helvetica, Arial, sans-serif; color: #ffffff; text-decoration: none; color: #ffffff; text-decoration: none; padding: 15px 25px; border-radius: 2px; border: 1px solid #ec6d64; display: inline-block;">Update Payment Info</a></td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr> -->
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
</table>
<![endif]-->
</td>
</tr>
<!-- COPY CALLOUT -->
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<!-- HEADLINE -->
<tr>
<td bgcolor="#111111" align="left" style="padding: 40px 30px 20px 30px; color: #ffffff; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;" >
<h2 style="font-size: 24px; font-weight: 400; margin: 0;">Experiment Results</h2>
</td>
</tr>
<!-- COPY -->
<tr>
<td bgcolor="#111111" align="left" style="padding: 0px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;" >
<p style="margin: 0;">
"""
exp_complete_2_eng = \
"""
</p>
</td>
</tr>
<!-- COPY -->
<!-- <tr>
<td bgcolor="#111111" align="left" style="padding: 0px 30px 40px 30px; border-radius: 0px 0px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;" >
<p style="margin: 0;"><a href="http://litmus.com" target="_blank" style="color: #ec6d64;">Learn more about annual accounts</a></p>
</td>
</tr> -->
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
</table>
<![endif]-->
</td>
</tr>
<!-- SUPPORT CALLOUT -->
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 30px 10px 0px 10px;">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<!-- HEADLINE -->
<tr>
<td bgcolor="#FFECD1" align="center" style="padding: 30px 30px 30px 30px; border-radius: 4px 4px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;" >
<h2 style="font-size: 20px; font-weight: 400; color: #111111; margin: 0;">Spotting the programmer? </h2>
<!-- <p style="margin: 0;"><a href="http://litmus.com" target="_blank" style="color: #ec6d64;">We’re here, ready to talk</a></p> -->
<p style="margin: 0;">Tell him/her to run new experiments :-)</p>
</td>
</tr>
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
</table>
<![endif]-->
</td>
</tr>
<!-- FOOTER -->
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<!-- NAVIGATION -->
<tr>
<td bgcolor="#f4f4f4" align="left" style="padding: 30px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;" >
<p style="margin: 0;">
<!-- <a href="http://litmus.com" target="_blank" style="color: #111111; font-weight: 700;">Dashboard</a> - -->
<!-- <a href="http://litmus.com" target="_blank" style="color: #111111; font-weight: 700;">Billing</a> - -->
<!-- <a href="http://litmus.com" target="_blank" style="color: #111111; font-weight: 700;">Help</a> -->
</p>
</td>
</tr>
<!-- PERMISSION REMINDER -->
<!-- <tr>
<td bgcolor="#f4f4f4" align="left" style="padding: 0px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;" >
<p style="margin: 0;">You received this email because we had problems billing your account. If it looks weird, <a href="http://litmus.com" target="_blank" style="color: #111111; font-weight: 700;">view it in your browser</a>.</p>
</td>
</tr> -->
<!-- UNSUBSCRIBE -->
<!-- <tr>
<td bgcolor="#f4f4f4" align="left" style="padding: 0px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;" >
<p style="margin: 0;">If these emails get annoying, please feel free to <a href="http://litmus.com" target="_blank" style="color: #111111; font-weight: 700;">unsubscribe</a>.</p>
</td>
</tr> -->
<!-- ADDRESS -->
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;" >
<p style="margin: 0;">Heimdall Watchtower<br>Continuous monitoring at your service</p>
</td>
</tr>
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
</table>
<![endif]-->
</td>
</tr>
</table>
</body>
</html>
"""
exp_progress_1_eng = \
"""
<!DOCTYPE html>
<html>
<head>
<title></title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<style type="text/css">
/* FONTS */
@media screen {
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v11/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 700;
src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v11/qdgUG4U09HnJwhYI-uK18wLUuEpTyoUstqEm5AMlJo4.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 400;
src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v11/RYyZNoeFgb0l7W3Vu1aSWOvvDin1pK8aKteLpeZ5c0A.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 700;
src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v11/HkF_qI1x_noxlxhrhMQYELO3LdcAZYWl9Si6vvxL-qU.woff) format('woff');
}
}
/* CLIENT-SPECIFIC STYLES */
body, table, td, a { -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; }
table, td { mso-table-lspace: 0pt; mso-table-rspace: 0pt; }
img { -ms-interpolation-mode: bicubic; }
/* RESET STYLES */
img { border: 0; height: auto; line-height: 100%; outline: none; text-decoration: none; }
table { border-collapse: collapse !important; }
body { height: 100% !important; margin: 0 !important; padding: 0 !important; width: 100% !important; }
/* iOS BLUE LINKS */
a[x-apple-data-detectors] {
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
/* MOBILE STYLES */
@media screen and (max-width:600px){
h1 {
font-size: 32px !important;
line-height: 32px !important;
}
}
/* ANDROID CENTER FIX */
div[style*="margin: 16px 0;"] { margin: 0 !important; }
</style>
</head>
<body style="background-color: #f4f4f4; margin: 0 !important; padding: 0 !important;">
<!-- HIDDEN PREHEADER TEXT -->
<div style="display: none; font-size: 1px; color: #fefefe; line-height: 1px; font-family: 'Lato', Helvetica, Arial, sans-serif; max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden;">
Latest Progress.
</div>
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<!-- LOGO -->
<tr>
<td bgcolor="#7c72dc" align="center">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<tr>
<td align="center" valign="top" style="padding: 40px 10px 40px 10px;">
<a href="http://litmus.com" target="_blank">
</a>
</td>
</tr>
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
</table>
<![endif]-->
</td>
</tr>
<!-- HERO -->
<tr>
<td bgcolor="#7c72dc" align="center" style="padding: 0px 10px 0px 10px;">
<!--[if (gte mso 9)|(IE)]>
<table align="center" border="0" cellspacing="0" cellpadding="0" width="600">
<tr>
<td align="center" valign="top" width="600">
<![endif]-->
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="max-width: 600px;" >
<tr>
<td bgcolor="#ffffff" align="center" valign="top" style="padding: 40px 20px 20px 20px; border-radius: 4px 4px 0px 0px; color: #111111; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 48px; font-weight: 400; letter-spacing: 4px; line-height: 48px;">
<h1 style="font-size: 48px; font-weight: 400; margin: 0;">Experiment Progress</h1>
</td>
</tr>
</table>
<!--[if (gte mso 9)|(IE)]>
</td>
</tr>
| |
"""JASMIN Cloud
JASMIN Cloud Provider Interface package - module for handling networking
functionality
"""
__author__ = "<NAME>"
__date__ = "24/03/14"
__copyright__ = "(C) 2014 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__revision__ = "$Id$"
from os import path
import logging
import xml.etree.ElementTree as ET
import iptools
from libcloud.compute.providers import Provider, DRIVERS, get_driver
from libcloud.compute.drivers.vcloud import (get_url_path, fixxpath,
DEFAULT_TASK_COMPLETION_TIMEOUT)
from jasmincloud.provider.vcloud.network.edge_gateway import (NatRule,
ETreeNatRule)
from jasmincloud.provider import utils
import jasmincloud.provider.utils.etree as et_utils
log = logging.getLogger(__name__)
def _log_etree_elem(elem, level=logging.DEBUG):
'''Helper function - Log serialisation of an ElementTree Element'''
if log.getEffectiveLevel() <= level:
log.debug(ET.tostring(elem))
class EdgeGatewayClientError(Exception):
'''Generic exception class for EdgeGatewayClient'''
class EdgeGatewayClientConfigError(EdgeGatewayClientError):
'''Error with configuration of client request'''
class EdgeGatewayResponseParseError(EdgeGatewayClientError):
'''Error parsing response from vCD web server'''
class EdgeGatewayRequestedResourcesInUseError(EdgeGatewayClientError):
'''A resource such as an IP address has been requested which is unavailable
because it is already in use'''
class EdgeGatewayClientResourceNotFound(EdgeGatewayClientError):
'''Requested resource not found in Edge Gateway configuration retrieved'''
class EdgeGatewayClient(object):
'''Retrieve, parse and update vCloud Edge Device configuration
Edge Gateways provide organisational VDCs with routed connections to the
outside
:cvar SETTINGS_SECTION_NAME: section in config file to read parameters from
- applies to from_settings_file classmethod only
'''
VCD_XML_NS = et_utils.VCD_XML_NS
SETTINGS_GLOBAL = 'EdgeGatewayClient'
SETTINGS_ADD_FIREWALL_RULES = 'EdgeGatewayClient.add_firewall_rules'
SETTINGS_RM_FIREWALL_RULES = 'EdgeGatewayClient.remove_firewall_rules'
SETTINGS_ROUTE_HOST = 'EdgeGatewayClient.set_host_routing'
SETTINGS_RM_NAT_RULES = 'EdgeGatewayClient.remove_nat_rules'
SETTINGS_CANCEL_TASKS = 'EdgeGatewayClient.cancel_tasks'
SETTINGS_SECTION_NAMES = (
SETTINGS_GLOBAL,
SETTINGS_ADD_FIREWALL_RULES,
SETTINGS_RM_FIREWALL_RULES,
SETTINGS_ROUTE_HOST,
SETTINGS_RM_NAT_RULES
)
VCD_API_VERS = '5.5'
DEFAULT_PORT = 443
# VDC Edge Gateway discovery related terms
LINK_TAG = 'Link'
LINK_ATTR_TAG = 'href'
REL_ATTR_TAG = 'rel'
EDGE_GATEWAYS_LINK_REL = 'edgeGateways'
EDGE_GATEWAY_REC_TAG = 'EdgeGatewayRecord'
CONFIG_EDGE_GATEWAY_REL = 'edgeGateway:configureServices'
GATEWAY_IFACE_TAG = 'GatewayInterface'
def __init__(self):
self.driver = None
self.settings = {}
@classmethod
def from_connection(cls, *arg, **kwarg):
'''Instantiate and make a connection to the vCD API'''
obj_ = cls()
obj_.connect(*arg, **kwarg)
return obj_
@classmethod
def from_settings_file(cls, settings_filepath):
'''Instantiate from settings in a configuration file
'''
obj_ = cls()
obj_.parse_settings_file(settings_filepath)
return obj_
def connect(self, username, password, hostname, port=DEFAULT_PORT,
api_version=VCD_API_VERS):
'''Create vCloud driver and authenticated connection'''
# Need this explicit check to workaround bug in libcloud vCD driver
# If no password is set, it's omitted from the argument list such
# the argument order is shuffled up. The hostname gets set to the port
# number!
if password is None:
raise TypeError('Password not set')
driver_cls = get_driver(Provider.VCLOUD)
self.driver = driver_cls(username, password, host=hostname,
api_version=api_version, port=port)
def connect_from_settings(self):
'''Connect using settings read from config file'''
settings = self.settings[self.__class__.SETTINGS_GLOBAL]
self.connect(settings['username'], settings['password'],
settings['hostname'], port=settings['port'],
api_version=settings['api_version'])
def parse_settings_file(self, settings_filepath):
'''Get settings needed for initialising the vCD driver from a config
file
'''
cls = self.__class__
cfg = utils.CaseSensitiveConfigParser()
if not path.exists(settings_filepath):
raise IOError('Configuration file not found %r' % settings_filepath)
cfg.read(settings_filepath)
for section_name in cfg.sections():
if section_name == cls.SETTINGS_GLOBAL:
if cfg.has_option(section_name, 'driver_path'):
driver_path = cfg.get(section_name, 'driver_path')
else:
driver_path = None
if cfg.has_option(section_name, 'password'):
password = cfg.get(section_name, 'password')
else:
password = None
self.settings[section_name] = {
'driver_path': driver_path,
'username': cfg.get(section_name, 'username'),
'password': password,
'hostname': cfg.get(section_name, 'hostname'),
'port': cfg.getint(section_name, 'port'),
'api_version': cfg.get(section_name, 'api_version'),
'cacert_filepath': cfg.get(section_name, 'cacert_filepath'),
'verify_ssl_certs': cfg.getboolean(section_name,
'verify_ssl_certs'),
'vdc_name': cfg.get(section_name, 'vdc_name'),
'edgegateway_name': cfg.get(section_name,
'edgegateway_name')
}
if self.settings[section_name]['driver_path']:
driver_path = self.settings[section_name]['driver_path']
DRIVERS[Provider.VCLOUD] = tuple(driver_path.rsplit('.', 1))
if self.settings[section_name]['verify_ssl_certs'] == False:
# This will switch off verification of the server's identity
# potentially allowing credentials to be passed to an
# unauthenticated 3rd party. Make sure you know what you
# doing!
from libcloud import security
security.VERIFY_SSL_CERT = False
elif section_name == cls.SETTINGS_ADD_FIREWALL_RULES:
firewall_settings = {}
if cfg.has_option(section_name, 'rule_id'):
firewall_settings['rule_id'] = cfg.getint(section_name,
'rule_id')
if cfg.has_option(section_name, 'rule_is_enabled'):
firewall_settings['rule_is_enabled'] = cfg.getboolean(
section_name,
'rule_is_enabled')
if cfg.has_option(section_name, 'match_on_translate'):
firewall_settings['match_on_translate'] = cfg.getboolean(
section_name,
'match_on_translate')
if cfg.has_option(section_name, 'description'):
firewall_settings['description'] = cfg.get(section_name,
'description')
if cfg.has_option(section_name, 'policy'):
firewall_settings['policy'] = cfg.get(section_name,
'policy')
if cfg.has_option(section_name, 'protocols'):
protocols = cfg.get(section_name, 'protocols').split()
for protocol in protocols:
k, v = protocol.split(':')
firewall_settings['protocols'][k.strip()
] = v.strip().strip(',')
if cfg.has_option(section_name, 'port'):
firewall_settings['port'] = cfg.getint(section_name,
'port')
if cfg.has_option(section_name, 'dest_port_range'):
firewall_settings['dest_port_range'] = cfg.getint(
section_name,
'dest_port_range')
if cfg.has_option(section_name, 'dest_ip'):
firewall_settings['dest_ip'] = cfg.get(section_name,
'dest_ip')
if cfg.has_option(section_name, 'src_port'):
firewall_settings['src_port'] = cfg.getint(section_name,
'src_port')
if cfg.has_option(section_name, 'src_port_range'):
firewall_settings['src_port_range'] = cfg.getint(
section_name,
'src_port_range')
if cfg.has_option(section_name, 'src_ip'):
firewall_settings['src_ip'] = cfg.get(section_name,
'src_ip')
if cfg.has_option(section_name, 'direction'):
firewall_settings['direction'] = cfg.get(section_name,
'direction')
if cfg.has_option(section_name, 'enable_logging'):
firewall_settings['enable_logging'] = cfg.getboolean(
section_name,
'enable_logging')
self.settings[section_name] = firewall_settings
elif section_name == cls.SETTINGS_RM_FIREWALL_RULES:
self.settings[section_name] = {
'firewall_rule_ids': [
int(i.strip())
for i in cfg.get(section_name,
'firewall_rule_ids').split(',')
]
}
elif section_name == cls.SETTINGS_ROUTE_HOST:
self.settings[section_name] = {
'iface_name': cfg.get(section_name, 'iface_name'),
'internal_ip': cfg.get(section_name, 'internal_ip'),
'external_ip': cfg.get(section_name, 'external_ip'),
}
elif section_name == cls.SETTINGS_RM_NAT_RULES:
self.settings[section_name] = {
'nat_rule_ids': [
int(i.strip())
for i in cfg.get(section_name,
'nat_rule_ids').split(',')
]
}
elif section_name == cls.SETTINGS_CANCEL_TASKS:
if cfg.has_option(section_name, 'task_uris'):
task_uris_ = cfg.get(section_name, 'task_uris').split(',')
self.settings[section_name] = {
'task_uris': [i.strip() for i in task_uris_]
}
else:
self.settings[section_name] = {'task_uris': None}
def get_config(self, vdc_name=None, names=None):
'''Retrieve configurations for each Edge Gateway in a given
Organisational VDC
:param vdc_name: name of VDC to retrieve Edge Gateway configurations for
:param names: names of the Edge Gateway configurations to retrieve. If
none given, retrieve all the ones found
'''
if vdc_name is not None:
vdc_id = self.get_vdc_uri(vdc_name)
if vdc_id is None:
raise EdgeGatewayClientConfigError('No VDC found with requested'
' name %r' % vdc_name)
else:
# Default to the first ID found in the returned list
vdc_id = self.driver.vdcs[0].id
# Find out the Edge Gateway URIs for this VDC
edgegateway_uri = self.get_vdc_edgegateways_uri(vdc_id)
# Resolve to retrieve the Edge Gateway Records
edgegateway_recs = self.get_edgegateway_recs(edgegateway_uri)
edgegateway_configs = []
if names is None:
edgegateway_configs = [self._get_edgegateway_from_uri(
edgegateway_rec.href)
for edgegateway_rec in edgegateway_recs]
else:
edgegateway_configs = [self._get_edgegateway_from_uri(
edgegateway_rec.href)
for edgegateway_rec in edgegateway_recs
if edgegateway_rec.name in names]
return edgegateway_configs
def post_config(self, gateway, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT,
cancel_after_timeout=False):
'''Despatch updated Edge Gateway configuration
:param gateway: new configuration to posted to the Edge Gateway
'''
update_uri = self._get_edgegateway_update_uri(gateway)
gateway_service_config_xml = ET.tostring(
gateway.configuration.edge_gateway_service_configuration._elem)
res = self.driver.connection.request(get_url_path(update_uri),
method='POST',
data=gateway_service_config_xml)
if res.status < 200 or res.status >= 300:
log.error('Error sending Edge Gateway configuration to %r: %r:',
update_uri, ET.tostring(res.object))
response = et_utils.obj_from_elem_walker(res.object)
self.driver._wait_for_task_completion(response.href,
timeout=timeout)
if cancel_after_timeout:
log.info('Task cancelled following timeout')
return response
def cancel_tasks(self, gateway, task_uris=None):
'''Cancel queued tasks
'''
if not hasattr(gateway, 'tasks'):
return []
if task_uris is None:
task_uris = [task.href for task in gateway.tasks.task]
try:
for task_uri in task_uris:
self.driver.connection.request(task_uri + '/action/cancel',
method='POST')
except Exception as e:
log.error('Error cancelling task %r:', task_uri)
for line in ET.tostringlist(e.args[0]):
log.error(line)
raise
return task_uris
def _get_elems(self, uri, xpath):
'''Helper method - Get XML elements from a given URI and XPath search
over returned XML content
:var uri: URI to retrieve XML response from
:var xpath: XPath to search returned XML content with. It can contain
the {} delimited namespace or else the default vCloud one is assumed
:return: ElementTree Element contain search results
'''
res = self.driver.connection.request(get_url_path(uri))
_log_etree_elem(res.object)
if xpath.startswith(et_utils.NS_START_DELIM):
return res.object.findall(xpath)
else:
return res.object.findall(fixxpath(res.object, xpath))
def get_vdc_uri(self, vdc_name):
'''Match VDC URI to input name
:return: VDC URI or None if not found
'''
for vdc in self.driver.vdcs:
if vdc.name == vdc_name:
return vdc.id
def get_vdc_edgegateways_uri(self, vdc_uri):
'''Get VDC Edge Gateways query URI for the Given VDC URI'''
for link in self._get_elems(vdc_uri, self.__class__.LINK_TAG):
rel_tag = link.get(self.__class__.REL_ATTR_TAG)
if rel_tag == self.__class__.EDGE_GATEWAYS_LINK_REL:
return link.get(self.__class__.LINK_ATTR_TAG)
def get_edgegateway_recs(self, edgegateway_uri):
'''Retrieve Edge Gateway Records from the Edge Gateway query URI
'''
res = self.driver.connection.request(get_url_path(edgegateway_uri))
_log_etree_elem(res.object)
edgegateway_rec_elems = res.object.findall(
fixxpath(res.object, self.__class__.EDGE_GATEWAY_REC_TAG))
edgegateway_recs = [et_utils.obj_from_elem_walker(edgegateway_rec_elem)
for edgegateway_rec_elem in edgegateway_rec_elems]
return edgegateway_recs
def _get_edgegateway_from_uri(self, edgegateway_rec_uri):
res = self.driver.connection.request(get_url_path(edgegateway_rec_uri))
_log_etree_elem(res.object)
gateway = et_utils.obj_from_elem_walker(res.object)
# Augment gateway object with explicit reference to ElementTree elem
#gateway._elem = res.object
return gateway
@staticmethod
| |
'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
if 'message_id' in params:
path_params['messageId'] = params['message_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebChatMessage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_chat_messages(self, conversation_id, **kwargs):
"""
Get the messages of a chat conversation.
The current user must be involved with the conversation to get its messages.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_chat_messages(conversation_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:param str after: If specified, get the messages chronologically after the id of this message
:param str before: If specified, get the messages chronologically before the id of this message
:param str sort_order: Sort order
:param int max_results: Limit the returned number of messages, up to a maximum of 100
:return: WebChatMessageEntityList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id', 'after', 'before', 'sort_order', 'max_results']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_chat_messages" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `get_conversations_chat_messages`")
resource_path = '/api/v2/conversations/chats/{conversationId}/messages'.replace('{format}', 'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
query_params = {}
if 'after' in params:
query_params['after'] = params['after']
if 'before' in params:
query_params['before'] = params['before']
if 'sort_order' in params:
query_params['sortOrder'] = params['sort_order']
if 'max_results' in params:
query_params['maxResults'] = params['max_results']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebChatMessageEntityList',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_chat_participant_wrapup(self, conversation_id, participant_id, **kwargs):
"""
Get the wrap-up for this conversation participant.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_chat_participant_wrapup(conversation_id, participant_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:param str participant_id: participantId (required)
:param bool provisional: Indicates if the wrap-up code is provisional.
:return: AssignedWrapupCode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id', 'participant_id', 'provisional']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_chat_participant_wrapup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `get_conversations_chat_participant_wrapup`")
# verify the required parameter 'participant_id' is set
if ('participant_id' not in params) or (params['participant_id'] is None):
raise ValueError("Missing the required parameter `participant_id` when calling `get_conversations_chat_participant_wrapup`")
resource_path = '/api/v2/conversations/chats/{conversationId}/participants/{participantId}/wrapup'.replace('{format}', 'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
if 'participant_id' in params:
path_params['participantId'] = params['participant_id']
query_params = {}
if 'provisional' in params:
query_params['provisional'] = params['provisional']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AssignedWrapupCode',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_chat_participant_wrapupcodes(self, conversation_id, participant_id, **kwargs):
"""
Get list of wrapup codes for this conversation participant
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_chat_participant_wrapupcodes(conversation_id, participant_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:param str participant_id: participantId (required)
:return: list[WrapupCode]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id', 'participant_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_chat_participant_wrapupcodes" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `get_conversations_chat_participant_wrapupcodes`")
# verify the required parameter 'participant_id' is set
if ('participant_id' not in params) or (params['participant_id'] is None):
raise ValueError("Missing the required parameter `participant_id` when calling `get_conversations_chat_participant_wrapupcodes`")
resource_path = '/api/v2/conversations/chats/{conversationId}/participants/{participantId}/wrapupcodes'.replace('{format}', 'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
if 'participant_id' in params:
path_params['participantId'] = params['participant_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[WrapupCode]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_chats(self, **kwargs):
"""
Get active chat conversations for the logged in user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_chats(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ChatConversationEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_chats" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/conversations/chats'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChatConversationEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_cobrowsesession(self, conversation_id, **kwargs):
"""
Get cobrowse conversation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_cobrowsesession(conversation_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:return: CobrowseConversation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_cobrowsesession" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `get_conversations_cobrowsesession`")
resource_path = '/api/v2/conversations/cobrowsesessions/{conversationId}'.replace('{format}', 'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# | |
the key associated
with an application. See :meth:`change_key` for details."""
#: the maximum age of a key, which is the number of times the key
#: can be changed before the original key is considered too old to
#: be used for decryption.
MAX_AGE = 100
def __init__(self, key_num, key, key_set, when=None):
self.lock = threading.RLock()
self.key_set = key_set
self.key_num = key_num
self.key = key
self.ciphers = {key_num: self.new_cipher(key)}
if when:
# we need to find a key that hasn't expired
with key_set.open() as keys:
t = edm.EDMValue.from_type(edm.SimpleType.DateTime)
t.set_from_value(time.time())
filter = odata.CommonExpression.from_str(
"Expires gte :t", {'t': t})
keys.set_filter(filter)
# Only interested in keys that haven't expired
old_keys = keys.values()
if not old_keys:
raise RuntimeError("AppCipher: no current key")
old_key = old_keys[0]
self.old_num = old_key['KeyNum'].value
self.old_key = self.decrypt(old_key['KeyString'])
self.old_expires = when.get_unixtime()
self.ciphers[self.old_num] = self.new_cipher(self.old_key)
else:
self.old_num = None
self.old_key = None
self.old_expires = None
def new_cipher(self, key):
"""Returns a new cipher object with the given key
The default implementation creates a plain-text 'cipher' and is
not suitable for secure use of encrypt/decrypt but, with a
sufficiently good key, may still be used for hashing."""
return PlainTextCipher(key)
def change_key(self, key_num, key, when):
"""Changes the key of this application.
key_num
The number given to the new key, must differ from the last
:attr:`MAX_AGE` key numbers.
key
A binary string containing the new application key.
when
A fully specified :class:`pyslet.iso8601.TimePoint` at which
point the new key will come into effect.
Many organizations have a policy of changing keys on a routine
basis, for example, to ensure that people who have had temporary
access to the key only have temporary access to the data it
protects. This method makes it easier to implement such a
policy for applications that use the AppCipher class.
The existing key is encrypted with the new key and a record is
written to the :attr:`key_set` to record the *existing* key
number, the encrypted key string and the *when* time, which is
treated as an expiry time in this context.
This procedure ensures that strings encrypted with an old key
can always be decrypted because the value of the old key can be
looked up. Although it is encrypted, it will be encrypted with
a new(er) key and the procedure can be repeated as necessary
until a key encrypted with the newest key is found.
The key change process then becomes:
1. Start a utility process connected to the application's
entity container using the existing key and then call the
change_key method. Pass a value for *when* that will give
you time to reconfigure all AppCipher clients. Assuming the
key change is planned, a time in hours or even days ahead
can be used.
2. Update or reconfigure all existing applications so that they
will be initialised with the new key and the same value for
*when* next time they are restarted.
3. Restart/refresh all running applications before the change
over time. As this does not need to be done simultaneously,
a load balanced set of application servers can be cycled on
a schedule to ensure continuous running).
Following a key change the entity container will still contain
data encrypted with old keys and the architecture is such that
compromise of a key is sufficient to read all encrypted data
with that key and all previous keys. Therefore, changing the
key only protects new data.
In situations where policy dictates a key change it might make
sense to add a facility to the application for re-encrypting
data in the data store by going through a
read-decrypt/encrypt-write cycle with each protected data field.
Of course, the old key could still be used to decrypt this
information from archived backups of the data store.
Alternatively, if the protected data is itself subject to change
on a routine basis you may simply rely on the natural turnover
of data in the application. The strategy you choose will depend
on your application.
The :attr:`MAX_AGE` attribute determines the maximum number of
keys that can be in use in the data set simultaneously.
Eventually you will have to update encrypted data in the data
store."""
with self.lock:
self.old_num = self.key_num
self.old_key = self.key
self.old_expires = when.get_unixtime()
# we should already have a cipher for this key
self.key_num = key_num
self.key = key
cipher = self.ciphers[key_num] = self.new_cipher(key)
# we can't use the encrypt method here as we want to force
# use of the new key
old_key_encrypted = "%i:%s" % (
key_num, force_ascii(base64.b64encode(cipher.encrypt(
self.old_key))))
with self.key_set.open() as keys:
e = keys.new_entity()
e.set_key(self.old_num)
e['KeyString'].set_from_value(old_key_encrypted)
e['Expires'].set_from_value(when)
try:
keys.insert_entity(e)
except edm.ConstraintError:
# Presumably this entity already exists, possible race
# condition on change_key - load the entity from the old
# key number to raise KeyError if not
e = keys[self.old_num]
def _get_current_cipher(self):
if self.old_expires:
if time.time() > self.old_expires:
# the old key has finally expired
self.old_num = None
self.old_key = None
self.old_expires = None
else:
# use the old key
return self.old_num, self.ciphers[self.old_num]
return self.key_num, self.ciphers[self.key_num]
def _get_cipher(self, num):
stack = [(num, None, None)]
while stack:
key_num, key_data, cipher_num = stack.pop()
cipher = self.ciphers.get(key_num, None)
if cipher is None:
stack.append((key_num, key_data, cipher_num))
with self.key_set.open() as collection:
try:
e = collection[key_num]
old_key_num, old_key_data = self._split_data(
e['KeyString'].value)
if len(stack) > self.MAX_AGE:
raise KeyError
stack.append((old_key_num, old_key_data, key_num))
except KeyError:
raise RuntimeError("AppCipher: key too old")
elif key_data:
with self.lock:
new_data = cipher.decrypt(key_data)
if cipher_num is not None:
self.ciphers[cipher_num] = self.new_cipher(new_data)
else:
return cipher
def encrypt(self, data):
"""Encrypts data with the current key.
data
A binary input string.
Returns a character string of ASCII characters suitable for
storage."""
with self.lock:
num, cipher = self._get_current_cipher()
return "%i:%s" % (
num, force_ascii(base64.b64encode(cipher.encrypt(data))))
def decrypt(self, data):
"""Decrypts data.
data
A character string containing the encrypted data
Returns a binary string containing the decrypted data."""
key_num, data = self._split_data(data)
cipher = self._get_cipher(key_num)
return cipher.decrypt(data)
def sign(self, message):
"""Signs a message with the current key.
message
A binary message string.
Returns a character string of ASCII characters containing a
signature of the message. It is recommended that character
strings are encoded using UTF-8 before signing."""
with self.lock:
num, cipher = self._get_current_cipher()
salt = os.urandom(4)
hash = cipher.hash(salt + message)
return "%i-%s-%s" % (num, force_ascii(binascii.hexlify(salt)),
force_ascii(binascii.hexlify(hash)))
def check_signature(self, signature, message=None):
"""Checks a signature returned by sign
signature
The ASCII signature to be checked for validity.
message
A binary message string. This is optional, if None then the
message will be extracted from the signature string
(reversing ascii_sign).
On success the method returns the validated message (a binary
string) and on failure it raises ValueError."""
num, salt, hash, smessage = self._split_signature(signature)
try:
num = int(num)
salt = binascii.unhexlify(salt)
hash = binascii.unhexlify(hash)
if smessage:
smessage = unescape_data(smessage)
if message:
# must match exactly!
if message != smessage:
raise ValueError
else:
message = smessage
with self.lock:
cipher = self._get_cipher(num)
if cipher is None:
return ValueError
if cipher.hash(salt + message) == hash:
return message
else:
raise ValueError
except TypeError:
raise ValueError
def ascii_sign(self, message):
"""Signs a message with the current key
message
A binary message string
The difference between ascii_sign and sign is that ascii_sign
returns the entire message, including the signature, as a
URI-encoded character string suitable for storage and/or
transmission.
The message is %-encoded (as implemented by
:func:`pyslet.rfc2396.escape_data`). You may apply the
corresponding unescape data function to the entire string to get
a binary string that *contains* an exact copy of the original
data."""
return "%s-%s" % (self.sign(message), escape_data(message))
def _split_data(self, data):
data = data.split(':')
if len(data) != 2 or not data[0].isdigit():
raise ValueError
key_num = int(data[0])
try:
data = base64.b64decode(data[1])
except TypeError:
raise ValueError
return key_num, data
def _split_signature(self, signature):
result = []
pos = 0
while True:
if len(result) == 3:
result.append(signature[pos:])
return result
new_pos = signature.find('-', pos)
if new_pos < 0:
result.append(signature[pos:])
while len(result) < 4:
result.append('')
return result
result.append(signature[pos:new_pos])
pos | |
object follow the student progression
"""
ability = models.CharField(
max_length=255,
verbose_name=_("Ability"),
help_text=_("A label that indicates the abilities validated by the learner."),
blank=False,
null=False
)
language = models.CharField(
max_length=20,
choices=get_translated_languages(),
verbose_name=_("Language"),
help_text=_("The language in which the course_objective is written in.")
)
author = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
related_name="created_objectives",
verbose_name=_("Author"),
help_text=_("The course_objective’s author.")
)
validators = models.ManyToManyField(
get_user_model(),
through="ValidationOnObjective",
related_name="validation_on_objective",
verbose_name=_("Students validators"),
help_text=_("The user that can validate the course_objective.")
)
objects = ObjectiveManager()
slug = models.SlugField(unique=True)
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_("Published the…"))
updated = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name=_("Last updated the…"))
def slug_generator(self):
return self.ability
def add_validator(self, student: get_user_model()) -> None:
"""
Add a new student in the list of students that validated the course_objective.
:param student: the student that wants to set this course_objective as validated
:type student: get_user_model()
:raises learning.exc.ObjectiveIsAlreadyValidated: when the student already validated this course_objective.
"""
student_already_validated = student in self.validators.all()
if student_already_validated:
raise learning.exc.ObjectiveIsAlreadyValidated(
_("The student {student} has already validated this course_objective.") % {"student": student}
)
self.validations.create(student=student)
def remove_validator(self, student: get_user_model()) -> None:
"""
Remove the given student from the list of students that validated the course_objective.
:param student: the user student to remove
:type student: get_user_model()
:raises learning.exc.ObjectiveIsNotValidated: when the student did not validate the course_objective.
"""
student_did_not_already_validate = student in self.validators.all()
if not student_did_not_already_validate:
raise learning.exc.ObjectiveIsNotValidated(
_("The student %(student)s has not validated this course_objective yet.It cannot be removed "
"from students that validated the course_objective.") % {"student": student}
)
self.validations.filter(student=student).delete()
def save(self, force_insert=False, force_update=False, using=None, update_fields=None) -> None:
"""
save() method is overridden to generate the slug field.
:return:
"""
if Objective.objects.filter(ability=self.ability):
raise learning.exc.ObjectiveAlreadyExists(
_("The course_objective that you are trying to create already exists.")
)
self.slug = generate_slug_for_model(Objective, self)
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return self.ability
def clean(self):
if len(self.ability) == 0:
raise learning.exc.ObjectiveAbilityCannotBeEmpty(_("The course_objective ability label cannot be empty."))
class ValidationOnObjective(models.Model):
"""
A intermediary object to represent the validation on each course_objective.
"""
objective = models.ForeignKey(
Objective,
on_delete=models.CASCADE,
verbose_name=_("Objective"),
related_name="validations"
)
student = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
verbose_name=_("Student"),
related_name="validations"
)
slug = models.SlugField(
unique=True
)
validated_the = models.DateTimeField(
auto_now_add=True,
auto_now=False,
verbose_name=_("Validated the…")
)
def slug_generator(self):
return f"{self.objective.ability}-{str(self.student)}"
def save(self, force_insert=False, force_update=False, using=None, update_fields=None) -> None:
"""
save() method is overridden to generate the slug field.
"""
self.slug = generate_slug_for_model(ValidationOnObjective, self)
super().save(force_insert, force_update, using, update_fields)
# noinspection PyAbstractClass
class BasicModelManager(models.Manager):
"""
This is the basic manager used in CourseManager, ResourceManager and ActivityManager.
"""
@abc.abstractmethod
def public(self, **kwargs) -> QuerySet:
"""
The BasicModel instances that are known to be public, so, that can be displayed to anyone without any required
permission right.
:param kwargs: kwargs that can contain a key “query” to filter name and description
:type kwargs: dict
:return: the public BasicModel’s instances
:rtype: QuerySet
"""
@abc.abstractmethod
def recommendations_for(self, user: get_user_model(), **kwargs) -> QuerySet:
"""
The BasicModel instance that are recommended for a specific user.
:param kwargs: kwargs that can contain a key “query” to filter name and description
:type kwargs: dict
:param user: the user for which to get recommended entities.
:type: get_user_model()
:return: the recommended instances for the user.
:rtype: QuerySet
"""
# noinspection PyMethodMayBeStatic
def _filter_with_query(self, queryset: QuerySet, query: str) -> QuerySet:
"""
Filter a Object queryset with a query string. This filters name and description.
.. note:: FIXME: this should maybe return an exception if it’s not working properly.
:param queryset: the original queryset to filter
:type queryset: QuerySet
:param query: the query string
:type query: str
:return: a new queryset based on the original but filtered using the query parameter
:rtype: QuerySet
"""
if queryset and query:
queryset = queryset.filter(Q(name__icontains=query) | Q(description__icontains=query))
return queryset
def written_by(self, author: get_user_model(), **kwargs) -> QuerySet:
"""
Get all objects written by the author. It sorts the results according to the updated property.
:param author: a user that wrote courses
:type author: get_user_model()
:param kwargs: kwargs that can contain a key “query” to filter name and description
:type kwargs: dict
:return: all objects written by the author given in parameter
:rtype: QuerySet
"""
qs = super().get_queryset().filter(author=author).exclude(favourite_for=author)
return self._filter_with_query(qs, kwargs.get("query", "")).order_by("-updated")
def taught_by(self, teacher: get_user_model(), **kwargs) -> QuerySet:
"""
Get all objects taught by (as author or collaborator) a teacher.
:param teacher: a user that teachers in objects
:type teacher: get_user_model()
:param kwargs: kwargs that can contain a key “query” to filter name and description
:type kwargs: dict
:return: all objects taught by the teacher
:rtype: QuerySet
"""
qs = super().get_queryset().filter(Q(author=teacher) | Q(collaborators=teacher))
return self._filter_with_query(qs, kwargs.get("query", "")).distinct()
def favourites_for(self, user: get_user_model()) -> QuerySet:
"""
Get a Queryset object that includes all the entities set a favourite for a specific user.
:param user: the user for which to get favourite entities
:type user: get_user_model()
:return: the QuerySet of favourite entities
:rtype: QuerySet
"""
return super().get_queryset().filter(favourite_for=user)
def teacher_favourites_for(self, user: get_user_model(), **kwargs) -> QuerySet:
"""
Get a Queryset object that includes all the entities set a favourite for a specific user.
:param user: the user for which to get favourite entities
:type user: get_user_model()
:return: the QuerySet of favourite entities
:rtype: QuerySet
"""
qs = super().get_queryset().filter(Q(author=user) | Q(collaborators=user), favourite_for=user)
return self._filter_with_query(qs, kwargs.get("query", "")).distinct()
def student_favourites_for(self, user: get_user_model()) -> QuerySet:
"""
Get a Queryset object that includes all the entities set a favourite for a specific user.
:param user: the user for which to get favourite entities
:type user: get_user_model()
:return: the QuerySet of favourite entities
:rtype: QuerySet
"""
return super().get_queryset().filter(favourite_for=user, students=user)
class BasicModelMixin(ObjectPermissionManagerMixin, models.Model):
"""
This is the basic model used in Course, Resource and Activity. This groups fields in common.
"""
@property
@abc.abstractmethod
def author(self) -> get_user_model():
"""
Get the entity author. It is often defined explicitly in subclasses using a foreign key. This property, here,
ensures that calling it from this class with not raise any syntax error.
:return: the entity author
:rtype: get_user_model()
"""
@property
@abc.abstractmethod
def collaborators(self) -> QuerySet:
"""
Get the entity collaborators. It is often defined explicitly in subclasses using foreign keys. This property,
here, ensures that calling it from this class with not raise any syntax error.
:return: the entity author
:rtype: str
"""
name = models.CharField(
max_length=255,
verbose_name=_("Name"),
help_text=_("A title that clearly indicates the theme you are writing about.")
)
description = models.TextField(
blank=True,
verbose_name=_("Description")
)
language = models.CharField(
max_length=20,
choices=get_translated_languages(),
verbose_name=_("Language"),
help_text=_("The language in which the entity is written in."),
default=get_language()
)
tags = TaggableManager(
help_text=_("A set of coma separated keywords that describe the theme and permits this content to be found by "
"browsing or searching.")
)
favourite_for = models.ManyToManyField(
get_user_model(),
related_name="+", # no reverse relation
verbose_name=_("Favorite for users"),
)
"""
Auto-generated fields
"""
slug = models.SlugField(unique=True)
# noinspection PyArgumentEqualDefault
published = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_("Published the…"))
# noinspection PyArgumentEqualDefault
updated = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name=_("Last updated the…"))
def slug_generator(self) -> str:
"""
Get the slug generator for this entity. It is the attribute that will be used to generate the object slug.
.. note:: See the generate_slug_for_model() function to know how it work. It is used in the save() method of
models to dynamically generate their slugs.
:return: the slug generator
:rtype: str
"""
return self.name
@property
@abc.abstractmethod
def object_collaborators(self):
"""
The collaborators related name (as declared in Course, Activity or Resource)
:return: The RelatedManager that references the object collaborators
:rtype: RelatedManager
"""
raise NotImplementedError()
@property
@abc.abstractmethod
def object_objectives(self):
"""
The objectives related name (as declared in Course, Activity or Resource)
:return: The RelatedManager that references the object objectives
:rtype: RelatedManager
"""
raise NotImplementedError()
@property
@abc.abstractmethod
def linked_objects(self) -> Generator["BasicModelMixin", None, None]:
"""
The included objects in this object. For instance, a course has activities, while an activity has resources.
:return: The Generator that references the included objects
:rtype: Generator
"""
raise NotImplementedError()
def add_collaborator(self, collaborator: get_user_model(), role: CollaboratorRole) -> "ObjectCollaboratorMixin":
"""
Add a collaborator on the object
:raises UserIsAlreadyAuthor: when the user is already the author on the object
:raises UserIsAlreadyCollaborator: when the user is already a collaborator on the object
:param collaborator: the collaborator to | |
+ str(e)
# prop = p_qdata_from_Q(round_count, q_num, author, Q, filename)
# size, block_num, _, _, _, num_lines = Q
# # 既存のエンティティを取り出す
# query = query_q_data(round_count=round_count, q_num=q_num, author=author)
# num = 0
# for ent in query.fetch(): # 存在するなら、高々、1個
# ent.update(prop)
# client.put(ent)
# num += 1
# if num == 0:
# msg = 'Not updated. You should have used POST instead of PUT?'
# elif num == 1:
# msg = 'Update OK'
# else:
# msg = 'Updated %d entities. May be internal system error' % num
# return True, msg, size, block_num, num_lines
def delete_user_Q_data(round_count: int, q_num: int, author: str) -> str:
"""
round数、qnum、authorを指定して、問題データをデータベースから削除する
Parameters
----------
round_count : int
round数
q_num : int
問題番号
author : str
問題作成者
Returns
-------
処理結果のメッセージ : str
"""
# 既存のエンティティを取り出す
query = query_q_data(round_count=round_count, q_num=q_num, author=author)
num = 0
for ent in query.fetch(): # 存在するなら、高々、1個
client.delete(ent.key)
num += 1
if num == 0:
msg = 'DELETE None'
elif num == 1:
msg = "DELETE /user/%s/Q/%d" % (author, q_num)
else:
msg = "DELETE /user/%s/Q/%d * %d times" % (author, q_num, num)
return msg
def get_Q_data(round_count: int, q_num: int) -> dict:
"""
出題の番号を指定して、Question問題データをデータベースから取り出す。
Parameters
----------
round_count : int
round数
q_num : int
問題番号
"""
qla = admin_Q_list_get(round_count)
if qla is None:
return None
# q_numは1から始まる整数なので、配列のインデックスとは1だけずれる
qn = q_num - 1
q_key_list = qla['q_key_list']
if qn < 0 or len(q_key_list) <= qn:
return None
key = q_key_list[qn]
return dict(client.get(key))
def create_all_Q_in_one(round_count: int) -> datastore.entity.Entity:
"""
すべてのQデータをZip形式アーカイブにする。
Parameters
----------
round_count : int
round数
"""
f = io.BytesIO()
z = zipfile.ZipFile(f, mode='w', compression=zipfile.ZIP_DEFLATED)
qla = admin_Q_list_get(round_count)
for i, qnum in enumerate(qla['qnum_list']):
qkey = qla['q_key_list'][i]
q = client.get(qkey)
"""
{'text': 'SIZE 72X72\r\nBLOCK_NUM 2\r\n\r\nBLOCK#1 1X4\r\n1\r\n2\r\n+\r\n+\r\n\r\nBLOCK#2 1X4\r\n1\r\n+\r\n+\r\n2\r\n',
'linenum': 2,
'filename': 'sample_3_Q.txt',
'cols': 72,
'date': datetime.datetime(2021, 8, 25, 11, 22, 58, 900644, tzinfo=<UTC>),
'round': 999,
'rows': 72,
'blocknum': 2,
'qnum': 3,
'author': 'ADC-0'}
"""
z.writestr(f'Q{qnum}.txt', q['text'])
z.close()
bin_data = f.getvalue()
f.close()
key = client.key('q_zip', round_count)
qzip = datastore.Entity(key, exclude_from_indexes=['date', 'zip'])
qzip.update({'date': datetime.utcnow(),
'zip': bin_data})
client.put(qzip)
return qzip
def get_all_Q_in_one(round_count: int) -> datastore.entity.Entity:
"""
すべてのQデータが入ったZip形式アーカイブデータを返す。
Parameters
----------
round_count : int
round数
Returns
-------
datastore.entity.Entity
データが未登録のときはNone
{'date': datetime, 'zip': bytes}
"""
key = client.key('q_zip', round_count)
return client.get(key)
def delete_all_Q_in_one(round_count: int):
"""
すべてのQデータが入ったZip形式アーカイブデータを削除する。
Parameters
----------
round_count : int
round数
"""
key = client.key('q_zip', round_count)
client.delete(key)
def p_qdata_from_Q(round_count: int, q_num: int , author: str, Q: tuple, filename: str = '') -> dict:
"""
問題データのプロパティを作る。
Parameters
----------
round_count : int
round数
q_num : int
問題番号
author : str
問題データ作成者の名前
Q : tuple
return value of adc2019.read_Q()
filename : str, default ''
問題データのファイル名
Returns
-------
プロパティ : dict
"""
#size, block_num, block_size, block_data, block_type, num_lines = Q
size, block_num, _, _, _, num_lines = Q
# 正規化した問題テキストデータ(改行コードなど)
q_text2 = adc2019.generate_Q_data(Q)
return {'round': round_count, # int
'qnum': q_num, # int
'text': q_text2, # string
'blocknum': block_num, # int
'cols': size[0], # int
'rows': size[1], # int
'linenum': num_lines, # int
'author': author, # string
'filename': filename, # string
'date': datetime.utcnow()}
def p_adata_from_A(round_count: int, a_num: int, owner: str, A: tuple, a_text: str, check_result: bool, quality: float, ainfo: dict):
"""
回答データのプロパティを作る。
Parameters
----------
round_count : int
round数
a_num : int
問題番号
owner : str
ユーザー名
A : tuple
return value of adc2019.read_A()
a_text: str
A text
check_result : bool
true = 正解
quality : float
階の品質
ainfo : dict
key = 'cpu_sec', 'mem_byte', 'misc_text'
Returns
-------
プロパティ : dict
"""
if A is None:
size, ban_data, block_pos = [], [[]], []
else:
size, ban_data, block_pos = A
size2 = list(size)
# ban_data2 = [row.tolist() for row in ban_data] # list of list
ban_data2 = ban_data.ravel().tolist() # list (flatten)
block_pos2 = np.array(block_pos[1:]).ravel().tolist() # list (flatten)
return {'round': round_count, # int
'anum': a_num, # int
'text': a_text, # string
'owner': owner, # string
'size': size2, # [int, int]
'ban_data': ban_data2, # [[int, ...], [...], ... ]
'block_pos': block_pos2, # [[int, int], [...], ...]
'judge': check_result, # bool
'quality': quality, # float
'ainfo': ainfo, # dict
'date': datetime.utcnow()}
# class QuestionListAll():
# """
# コンテスト用の、出題問題リスト。Repeated Propetiyにしてみた
# qs = ndb.KeyProperty(kind=Question, repeated=True)
# text_admin = ndb.StringProperty('a', indexed=False)
# text_user = ndb.StringProperty('u', indexed=False)
# date = ndb.DateTimeProperty(auto_now_add=True)
# """
# class Answer():
# """
# 回答データ
# anum = ndb.IntegerProperty(indexed=True)
# text = ndb.StringProperty(indexed=False)
# owner = ndb.StringProperty(indexed=True)
# date = ndb.DateTimeProperty(auto_now_add=True)
# # 回答データの補足情報
# cpu_sec = ndb.FloatProperty(indexed=False)
# mem_byte = ndb.IntegerProperty(indexed=False)
# misc_text = ndb.StringProperty(indexed=False)
# result = ndb.StringProperty() # 採点結果
# judge = ndb.IntegerProperty() # True=1=正解, False=0=不正解
# q_factor = ndb.FloatProperty() # 解の品質
# """
def log(username: str, what: str):
"""
logを記録する。
Parameters
----------
username : str
ユーザ名
what : str
ログメッセージ
"""
d = {'username': username,
'what': what,
'timestamp': datetime.utcnow()}
entity = datastore.Entity(key=client.key('log'), exclude_from_indexes=['what'])
entity.update(d)
client.put(entity)
def log_get_or_delete(username=None, fetch_num=100, when=None, delete=False):
"""
logを、取り出す、または、消去する。
"""
query = client.query(kind='log')
query.order = ['-timestamp']
if username:
query.add_filter('username', '=', username)
if when:
before = datetime.utcnow() - when
# print('before=', before)
query.add_filter('timestamp', '>', before)
q = query.fetch(limit=fetch_num)
results = []
for i in q:
tmp = {'date': gae_datetime_JST(i['timestamp']),
'username': i['username'],
'what': i['what']}
results.append( tmp )
if delete:
client.delete(i.key)
return results
"""
Time keeper
時計の時刻に基づいて、状態遷移させる。
"""
def timekeeper_key() -> datastore.key.Key:
"""
Time keeperのCloud Datastore key
- kind: 'clock'
- name: 1 (とくに意味はない)
"""
return client.key('clock', 1)
def timekeeper_prop(dt: datetime = None, state: str = 'init', enabled: int = 1, round_counter: int = 1, test_mode: bool = True, view_score_mode: bool = True, log_to_datastore: bool = False) -> dict:
"""
property of timekeeper (kind: 'clock')
Parameters
----------
dt : datetime, default None
last update time. When None, datetime.utcnow() is used.
state : str, default 'init'
state. ['init', 'im0', 'Qup', 'im1', 'Aup', 'im2']
enabled : int, default 1
enabled flag. 0=disabled, 1=enabled
round_counter : int, default 1
round counter. 1, 2, ...
test_mode : bool, default True
appconfig.TEST_MODE
view_score_mode : bool, default True
appconfig.VIEW_SCORE_MODE
log_to_datastore: bool, default False
appconfig.LOG_TO_DATASTORE
Returns
-------
dict
"""
assert adcutil.valid_state(state)
if dt is None:
dt = datetime.utcnow()
return {'lastUpdate': dt,
'state': state,
'enabled': enabled,
'round': round_counter,
'test_mode': test_mode,
'view_score_mode': view_score_mode,
'log_to_datastore': log_to_datastore}
def timekeeper_clk() -> datastore.entity.Entity:
"""
clkの値を返す。もし存在しない場合は、新規作成する。
Returns
-------
datastore.entity.Entity
"""
clk = None
key = timekeeper_key()
with client.transaction():
clk = client.get(key)
if clk is None:
p = timekeeper_prop()
clk = datastore.Entity(key=key)
clk.update(p)
client.put(clk)
return clk
def timekeeper_check() -> (str, str):
"""
timekeeperがenabledのとき、
前回時刻と今回時刻から、状態遷移させるかどうか、の判定を行う。
Returns
-------
new_state : str
old_state : str
"""
new_state = None
old_state = None
clk = timekeeper_clk()
with client.transaction():
if clk['enabled'] == 0:
return clk['state'], clk['state']
now = datetime.utcnow()
same_slot, new_state = timekeeper_transition(clk['lastUpdate'], now, clk['state'])
old_state = clk['state']
if not same_slot or clk['state'] != new_state:
clk['lastUpdate'] = now
clk['state'] = new_state
client.put(clk)
logging.debug('TimeKeeper: state change: %s', str(clk))
return new_state, old_state
def timekeeper_enabled(new_value: int = None) -> int:
"""
timekeeperのenabledの値を、取得する、または、設定する。
Parameters
----------
new_value : int, default None
Noneのときは、値を取得する。
Noneでないときは、値を設定する。
Returns
-------
enabledの値 : int
"""
clk = timekeeper_clk()
if new_value is None:
return clk['enabled']
else:
if new_value == 0:
enabled = 0
else:
enabled = 1
if enabled != clk['enabled']:
clk['enabled'] = enabled
clk['lastUpdate'] = datetime.utcnow()
client.put(clk)
return enabled
def timekeeper_state(new_value :str = None) -> str:
"""
timekeeperのstateの値を、取得する、または、設定する。
Parameters
----------
new_value : str, default None
Noneのときは、値を取得する。
Noneでないときは、値を設定する。
Returns
-------
stateの値 : str
"""
clk = timekeeper_clk()
if new_value is None:
return clk['state']
else:
if adcutil.valid_state(new_value):
if new_value != clk['state']:
clk['state'] = new_value
clk['lastUpdate'] = datetime.utcnow()
client.put(clk)
return clk['state']
def timekeeper_round(new_value :int = None) -> int:
"""
timekeeperのroundカウンタの値を、取得する、または、設定する。
Parameters
----------
new_value : int, default None
Noneのときは、値を取得する。
Noneでないときは、値を設定する。
Returns
-------
roundカウンタの値 : int
"""
clk = timekeeper_clk()
if new_value is None:
return clk.get('round')
else:
if new_value != clk.get('round'):
clk['round'] = new_value
clk['lastUpdate'] = datetime.utcnow()
client.put(clk)
return clk.get('round')
def timekeeper_mode_common(key: str, new_value: bool = None) -> bool:
"""
timekeeperのtest_mode, view_score_mode, log_to_datastoreの値を、
取得する、または、設定する。
Parameters
----------
key : str
'test_mode', 'view_score_mode', 'log_to_datastore'のどれかを指定する。
new_value : bool, default None
Noneのときは、値を取得する。
Noneでないときは、値を設定する。
Returns
-------
現在設定されている値 : bool
"""
clk = timekeeper_clk()
if new_value is None:
return clk.get(key)
else:
if new_value != clk.get(key):
clk[key] = new_value
clk['lastUpdate'] = datetime.utcnow()
client.put(clk)
return clk.get(key)
def timekeeper_test_mode(new_value: bool = None) -> bool:
"""
timekeeperのtest_modeの値を、取得する、または、設定する。
Parameters
----------
new_value : bool, default None
Noneのときは、値を取得する。
Noneでないときは、値を設定する。
Returns
-------
test_modeの値 : bool
"""
return timekeeper_mode_common('test_mode', new_value)
def timekeeper_view_score_mode(new_value: bool = None) -> bool:
"""
timekeeperのview_score_modeの値を、取得する、または、設定する。
Parameters
----------
new_value : bool, default None
Noneのときは、値を取得する。
Noneでないときは、値を設定する。
| |
[ \
[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4]], \
[[0, 0], [0, 1], [0, 2], [1, 2], [1, 3]], \
[[0, 0], [0, 1], [1, 1], [1, 2], [1, 3], [2, 3]], \
[[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [2, 3]], \
[[0, 0], [1, 1], [2, 2]], \
[[0, 0], [1, 0], [1, 1], [2, 1], [2, 2], [3, 2]], \
[[0, 0], [1, 0], [1, 1], [2, 1], [3, 1], [3, 2]], \
[[0, 0], [1, 0], [2, 0], [2, 1], [3, 1]], \
[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]], \
[[0, 0], [1, 0], [2, 0], [2,-1], [3,-1]], \
[[0, 0], [1, 0], [1,-1], [2,-1], [3,-1], [3,-2]], \
[[0, 0], [1, 0], [1,-1], [2,-1], [3,-1], [3,-2]], \
[[0, 0], [1,-1], [2,-2]], \
[[0, 0], [0,-1], [1,-1], [1,-2], [2,-2], [2,-3]], \
[[0, 0], [0,-1], [1,-1], [1,-2], [1,-3], [2,-3]], \
[[0, 0], [0,-1], [0,-2], [1,-2], [1,-3]], \
[[0, 0], [0,-1], [0,-2], [0,-3], [0,-4]], \
[[0, 0], [0,-1], [0,-2], [-1,-2], [-1,-3]], \
[[0, 0], [0,-1], [-1,-1], [-1,-2], [-1,-3], [-2, -3]], \
[[0, 0], [0,-1], [-1,-1], [-1,-2], [-2,-2], [-2, -3]], \
[[0, 0], [-1,-1], [-2,-2]], \
[[0, 0], [-1, 0], [-1,-1], [-2,-1], [-2,-2], [-3,-2]], \
[[0, 0], [-1, 0], [-1,-1], [-2,-1], [-3,-1], [-3,-2]], \
[[0, 0], [-1, 0], [-2, 0], [-2,-1], [-3,-1]], \
[[0, 0], [-1, 0], [-2, 0], [-3, 0], [-4, 0]], \
[[0, 0], [-1, 0], [-2, 0], [-2, 1], [-3, 1]], \
[[0, 0], [-1, 0], [-1, 1], [-2, 1], [-3, 1], [-3, 2]], \
[[0, 0], [-1, 0], [-1, 1], [-2, 1], [-2, 2], [-3, 2]], \
[[0, 0], [-1, 1], [-2, 2]], \
[[0, 0], [0, 1], [-1, 1], [-1, 2], [-2, 2], [-2, 3]], \
[[0, 0], [0, 1], [-1, 1], [-1, 2], [-1, 3], [-2, 3]], \
[[0, 0], [0, 1], [ 0, 2], [-1, 2], [-1, 3]], \
]
for (O,E), ID, P in zip(origin_and_end, cell_indices, cell_points):
for i, p in zip(ID, P):
computed = sqc.active_pixel_index(O, p, E)
message = 'Point ' + str(p) + ' in ' + str((O, E)) + \
' expected at ' + str(i) + ' but computed at ' + \
str(computed) + ': ' + str(zip(P, ID))
assert i == computed, message
def test_visibility_basic_array(self):
return
DEM_size = 31
elevation = np.zeros((DEM_size, DEM_size))
nodata = -1
viewpoint = (DEM_size/2, DEM_size/2)
elevation[viewpoint[0]+1, viewpoint[1]+1] = 2.
obs_elev = 1.0
tgt_elev = 0.0
max_dist = 4
cell_size = 5.0
refraction_coeff = 0.13
#alg_version = 'python'
alg_version = 'cython'
visibility = sqc.compute_viewshed(elevation, nodata, viewpoint, \
obs_elev, tgt_elev, max_dist, cell_size, refraction_coeff, \
alg_version)
visibility[visibility > 0] = 1
visibility[visibility < 0] = 0
visibility[DEM_size/2, DEM_size/2] = 2
#print(visibility.astype(int))
def test_cython_vs_python_on_default_1_pt_data(self):
return
args_uri = "../../ScenicQuality/tests/default-1-pt/run_parameters_default-1-pt.json"
with open(args_uri) as args_file:
args = json.load(args_file)
sq.execute(args)
reference_uri = "../../ScenicQuality/tests/default-1-pt/python/output/vshed.tif"
reference_raster = gdal.Open(reference_uri)
message = "Cannot open " + reference_uri
assert reference_raster is not None, message
reference_band = reference_raster.GetRasterBand(1)
reference_array = reference_band.ReadAsArray()
computed_uri = "../../ScenicQuality/tests/default-1-pt/cython/output/vshed.tif"
computed_raster = gdal.Open(computed_uri)
message = "Cannot open " + computed_uri
assert computed_raster is not None, message
computed_band = computed_raster.GetRasterBand(1)
computed_array = computed_band.ReadAsArray()
difference = np.sum(np.absolute(reference_array - computed_array))
message = "Computed viewshed " + computed_uri + \
" doesn't correspond to " + reference_uri + '. diff = ' + \
str(difference)
assert difference == 0.0, message
def test_cython_vs_python_on_default_data(self):
return
args_uri = "../../ScenicQuality/tests/default-data/run_parameters_default-data.json"
with open(args_uri) as args_file:
args = json.load(args_file)
sq.execute(args)
reference_uri = "../../ScenicQuality/tests/default-data/python/output/vshed.tif"
reference_raster = gdal.Open(reference_uri)
message = "Cannot open " + reference_uri
assert reference_raster is not None, message
reference_band = reference_raster.GetRasterBand(1)
reference_array = reference_band.ReadAsArray()
computed_uri = "../../ScenicQuality/tests/default-data/cython/output/vshed.tif"
computed_raster = gdal.Open(computed_uri)
message = "Cannot open " + computed_uri
assert computed_raster is not None, message
computed_band = computed_raster.GetRasterBand(1)
computed_array = computed_band.ReadAsArray()
difference = np.sum(np.absolute(reference_array - computed_array))
message = "Computed viewshed " + computed_uri + \
" doesn't correspond to " + reference_uri + '. diff = ' + \
str(difference)
assert difference == 0.0, message
def test_cython_vs_python_on_block_island(self):
return
#args_uri = "../../ScenicQuality/tests/block-island/run_parameters_block-island_10m_1pt.json"
#args_uri = "../../ScenicQuality/tests/block-island/run_parameters_block-island_10m.json"
#args_uri = "../../ScenicQuality/tests/block-island/run_parameters_block-island_50m_year_round_houses.json"
args_uri = "../../ScenicQuality/tests/block-island/run_parameters_block-island.json"
with open(args_uri) as args_file:
args = json.load(args_file)
for entry in args:
print('entry', entry, args[entry], type(args[entry]))
sq.execute(args)
reference_uri = "../../ScenicQuality/tests/block-island/python/output/vshed.tif"
reference_raster = gdal.Open(reference_uri)
message = "Cannot open " + reference_uri
assert reference_raster is not None, message
reference_band = reference_raster.GetRasterBand(1)
reference_array = reference_band.ReadAsArray()
computed_uri = "../../ScenicQuality/tests/block-island/cython/output/vshed.tif"
computed_raster = gdal.Open(computed_uri)
message = "Cannot open " + computed_uri
assert computed_raster is not None, message
computed_band = computed_raster.GetRasterBand(1)
computed_array = computed_band.ReadAsArray()
difference = np.sum(np.absolute(reference_array - computed_array))
message = "Computed viewshed " + computed_uri + \
" doesn't correspond to " + reference_uri + '. diff = ' + \
str(difference)
assert difference == 0.0, message
def test_distance_on_block_island(self):
return
args_uri = "../../ScenicQuality/tests/block-island/run_parameters_block-island_distance.json"
with open(args_uri) as args_file:
args = json.load(args_file)
for entry in args:
print('entry', entry, args[entry], type(args[entry]))
sq.execute(args)
reference_uri = "../../ScenicQuality/tests/block-island/cython_distance/output/vshed.tif"
reference_raster = gdal.Open(reference_uri)
message = "Cannot open " + reference_uri
assert reference_raster is not None, message
reference_band = reference_raster.GetRasterBand(1)
reference_array = reference_band.ReadAsArray()
computed_uri = "../../ScenicQuality/tests/block-island/cython/output/vshed.tif"
computed_raster = gdal.Open(computed_uri, gdal.GA_Update)
message = "Cannot open " + computed_uri
assert computed_raster is not None, message
computed_band = computed_raster.GetRasterBand(1)
computed_array = computed_band.ReadAsArray()
difference = np.absolute(reference_array - computed_array)
differences = np.where(difference)
difference[differences] /= reference_array[differences]
significant = np.where(difference[differences] > 1e-7)
differences = (differences[0][significant], differences[1][significant])
print('significant', significant[0].size, difference[differences])
difference = np.sum(difference[differences])
ref_sum = np.sum(reference_array)
comp_sum = np.sum(computed_array)
message = "Computed viewshed " + computed_uri + ' ' + str(comp_sum) + \
" doesn't correspond to " + reference_uri + ' ' + str(ref_sum) + \
'. ' + str(differences) + ' differences = ' + str(difference)
assert difference == 0.0, message
def test_polynomial_valuation_on_block_island(self):
#return
args_uri = "../../ScenicQuality/tests/block-island/run_parameters_block-island_polynomial.json"
with open(args_uri) as args_file:
args = json.load(args_file)
for entry in args:
print('entry', entry, args[entry], type(args[entry]))
sq.execute(args)
reference_uri = "../../ScenicQuality/tests/block-island/cython_polynomial/output/vshed.tif"
reference_raster = gdal.Open(reference_uri)
message = "Cannot open " + reference_uri
assert reference_raster is not None, message
reference_band = reference_raster.GetRasterBand(1)
reference_array = reference_band.ReadAsArray()
computed_uri = "../../ScenicQuality/tests/block-island/cython/output/vshed.tif"
computed_raster = gdal.Open(computed_uri, gdal.GA_Update)
message = "Cannot open " + computed_uri
assert computed_raster is not None, message
computed_band = computed_raster.GetRasterBand(1)
computed_array = computed_band.ReadAsArray()
difference = np.sum(np.absolute(reference_array - computed_array))
#if difference:
# computed_band.WriteArray(reference_array - computed_array)
message = "Computed viewshed " + computed_uri + \
" doesn't correspond to " + reference_uri + '. diff = ' + \
str(difference)
assert difference == 0.0, message
def test_logarithmic_valuation_on_block_island(self):
return
args_uri = "../../ScenicQuality/tests/block-island/run_parameters_block-island_log.json"
with open(args_uri) as args_file:
args = json.load(args_file)
for entry in args:
print('entry', entry, args[entry], type(args[entry]))
sq.execute(args)
reference_uri = "../../ScenicQuality/tests/block-island/cython_log/output/vshed.tif"
reference_raster = gdal.Open(reference_uri)
message = "Cannot open " + reference_uri
assert reference_raster is not None, message
reference_band = reference_raster.GetRasterBand(1)
reference_array = reference_band.ReadAsArray()
computed_uri = "../../ScenicQuality/tests/block-island/cython/output/vshed.tif"
computed_raster = gdal.Open(computed_uri)
message = "Cannot open " + computed_uri
assert computed_raster is not None, message
computed_band = computed_raster.GetRasterBand(1)
computed_array = computed_band.ReadAsArray()
difference = np.sum(np.absolute(reference_array - computed_array))
message = "Computed viewshed " + computed_uri + \
" doesn't correspond to " + reference_uri + '. diff = ' + \
str(difference)
assert difference == 0.0, message
def test_visibility_simple_obstacles(self):
return
obs_elev = 1.0
tgt_elev = 0.0
max_dist = -1.0
coefficient = 1.0
height = 0.0
refraction_coeff = 0.13
base_dem_uri = "../../AQ_Rob/Block_Island_fast_alg/SQ/bi_100meters/hdr.adf"
base_dem_nodata = raster_utils.get_nodata_from_uri(base_dem_uri)
raster = gdal.Open(base_dem_uri)
band = raster.GetRasterBand(1)
base_array = band.ReadAsArray()
(rows, cols) = base_array.shape
band = None
raster = None
cell_size = raster_utils.get_cell_size_from_uri(base_dem_uri)
GT = raster_utils.get_geotransform_uri(base_dem_uri)
iGT = gdal.InvGeoTransform(GT)[1]
flat_dem_uri = "flat_dem.tif"
structure_uri = "../../AQ_Rob/Block_Island_fast_alg/SQ/1_pt/e911_132.shp"
shapefile = ogr.Open(structure_uri)
assert shapefile is not None
layer = shapefile.GetLayer(0)
assert layer is not None
feature = layer.GetFeature(0)
field_count = feature.GetFieldCount()
# Check for feature information (radius, coeff, height)
for field in range(field_count):
field_def = feature.GetFieldDefnRef(field)
field_name = field_def.GetNameRef()
if (field_name.upper() == 'RADIUS2') or \
(field_name.upper() == 'RADIUS'):
max_dist = abs(int(feature.GetField(field)))
assert max_dist is not None, "max distance can't be None"
max_dist = int(max_dist/cell_size)
if field_name.lower() == 'coeff':
coefficient = float(feature.GetField(field))
assert coefficient is not None, "feature coeff can't be None"
if field_name.lower() == 'offseta':
obs_elev = float(feature.GetField(field))
assert obs_elev is | |
<gh_stars>0
__author__ = '<NAME>'
"""
Intended for processing of 80s monosome-seq data from defined RNA pools
Based on <NAME>'s original RBNS pipeline, available on github
"""
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42 #leaves most text as actual text in PDFs, not outlines
import os
import argparse
import subprocess
import ms_settings
import ms_utils
import ms_lib
import ms_qc
import ms_plotting
from collections import defaultdict
import numpy as np
import scipy.stats as stats
class mse:
def __init__(self, settings, threads):
self.threads = threads
self.settings = settings
self.remove_adaptor()
self.trim_reference_pool_fasta()
self.build_bowtie_index()
self.map_reads()
self.initialize_libs()
def initialize_libs(self):
self.settings.write_to_log('initializing libraries, counting reads')
ms_utils.make_dir(self.rdir_path('sequence_counts'))
self.libs = []
ms_utils.parmap(lambda lib_settings: ms_lib.initialize_pool_sequence_mappings(self.settings, lib_settings),
self.settings.iter_lib_settings(), nprocs=self.threads)
map(lambda lib_settings: self.initialize_lib(lib_settings), self.settings.iter_lib_settings())
self.settings.write_to_log('initializing libraries, counting reads, done')
self.monosome_libs = [self.find_lib_by_sample_name(sample_name) for
sample_name in self.settings.get_property('monosome_libraries')]
self.mrnp_libs = [self.find_lib_by_sample_name(sample_name) for
sample_name in self.settings.get_property('mrnp_libraries')]
self.total_libs = [self.find_lib_by_sample_name(sample_name) for
sample_name in self.settings.get_property('total_libraries')]
self.input_libs = [self.find_lib_by_sample_name(sample_name) for
sample_name in self.settings.get_property('total_libraries')]
def find_lib_by_sample_name(self, sample_name):
for lib in self.libs:
if lib.lib_settings.sample_name == sample_name:
return lib
assert False #if this triggers, your settings file is broken.
def initialize_lib(self, lib_settings):
lib = ms_lib.ms_Lib(self.settings, lib_settings)
self.libs.append(lib)
def needs_calculation(self, lib_settings, count_type, k):
if self.settings.get_force_recount(count_type):
return True
return not lib_settings.counts_exist(count_type, k)
def make_tables(self):
ms_utils.make_dir(self.rdir_path('tables'))
self.make_counts_table()
self.make_counts_table(fractional=True)
self.make_monosome_recruitment_table()
self.write_sequence_subset(0, read_cutoff=128)
self.write_sequence_subset(0.8, read_cutoff=128)
self.write_sequence_subset(0.7, read_cutoff=128)
for anno_filename in self.settings.get_property('matched_set_annotations'):
self.make_matched_recruitment_change_table(anno_filename,
read_cutoff=self.settings.get_property('comparison_read_cutoff'))
def make_plots(self):
ms_utils.make_dir(self.rdir_path('plots'))
#ms_plotting.all_library_rpm_scatter(self)
#ms_plotting.monosome_over_mrnp_reproducibility(self)
#ms_plotting.monosome_over_total_reproducibility(self)
#ms_plotting.monosome_over_mrnp_plus_monosome_reproducibility(self)
for anno_filename in self.settings.get_property('matched_set_annotations'):
ms_plotting.plot_recruitment_violins(self, anno_filename,
read_cutoff=self.settings.get_property('comparison_read_cutoff'))
'''
ms_plotting.recruitment_change_rank_value_plot_static(self, anno_filename,
read_cutoff=self.settings.get_property('comparison_read_cutoff'))
ms_plotting.reverse_recruitment_change_rank_value_plot_static(self, anno_filename,
read_cutoff=self.settings.get_property('comparison_read_cutoff'))
if self.settings.get_property('make_interactive_plots'):
ms_plotting.recruitment_change_rank_value_plot_interactive(self, anno_filename,
read_cutoff=self.settings.get_property('comparison_read_cutoff'))
ms_plotting.recruitment_fold_change_rank_value_plot_interactive(self, anno_filename,
read_cutoff=self.settings.get_property(
'comparison_read_cutoff'))
'''
def remove_adaptor(self):
if not self.settings.get_property('force_retrim'):
for lib_settings in self.settings.iter_lib_settings():
if not lib_settings.adaptorless_reads_exist():
break
else:
return
if self.settings.get_property('trim_adaptor'):
ms_utils.make_dir(self.rdir_path('adaptor_removed'))
ms_utils.parmap(lambda lib_setting: self.remove_adaptor_one_lib(lib_setting), self.settings.iter_lib_settings(), nprocs = self.threads)
def remove_adaptor_one_lib(self, lib_settings):
lib_settings.write_to_log('adaptor trimming')
"""
-a specifies the 3' adaptor to trim from the forawrd read (read1)
-G specifies the 5' adaptor to trim from the reverse read (read2)
-o is the read1 output file
-p is the read2 output file
"""
if not self.settings.get_property('read2_5p_adaptor_sequence').strip()=='':
command_to_run = 'cutadapt -a %s -G %s --overlap 5 -u %d -U %d -q %d --trim-n --minimum-length %d --pair-filter=both -o %s -p %s %s %s 1>>%s 2>>%s' % (
self.settings.get_property('read1_3p_adaptor_sequence'), self.settings.get_property('read2_5p_adaptor_sequence'),
self.settings.get_property('read1_5p_bases_to_trim'), self.settings.get_property('read2_5p_bases_to_trim'),
self.settings.get_property('quality_cutoff'), self.settings.get_property('min_post_adaptor_length'),
lib_settings.get_adaptor_trimmed_reads()[0], lib_settings.get_adaptor_trimmed_reads()[1],
lib_settings.get_paired_fastq_gz_files()[0], lib_settings.get_paired_fastq_gz_files()[1],
lib_settings.get_log(), lib_settings.get_log())
else:
command_to_run = 'cutadapt -a %s --overlap 5 -u %d -U %d -q %d --trim-n --minimum-length %d --pair-filter=both -o %s -p %s %s %s 1>>%s 2>>%s' % (
self.settings.get_property('read1_3p_adaptor_sequence'),
self.settings.get_property('read1_5p_bases_to_trim'), self.settings.get_property('read2_5p_bases_to_trim'),
self.settings.get_property('quality_cutoff'), self.settings.get_property('min_post_adaptor_length'),
lib_settings.get_adaptor_trimmed_reads()[0], lib_settings.get_adaptor_trimmed_reads()[1],
lib_settings.get_paired_fastq_gz_files()[0], lib_settings.get_paired_fastq_gz_files()[1],
lib_settings.get_log(), lib_settings.get_log())
subprocess.Popen(command_to_run, shell=True).wait()
lib_settings.write_to_log('adaptor trimming done')
def build_bowtie_index(self):
"""
builds a bowtie 2 index from the input fasta file
recommend including barcode+PCR sequences just in case of some no-insert amplicons
"""
self.settings.write_to_log('building bowtie index')
if self.settings.get_property('force_index_rebuild') or not self.settings.bowtie_index_exists():
ms_utils.make_dir(self.rdir_path('bowtie_indices'))
index = self.settings.get_bowtie_index()
subprocess.Popen('bowtie2-build -f --offrate 0 %s %s 1>>%s 2>>%s' % (self.settings.get_trimmed_pool_fasta(),
self.settings.get_bowtie_index(), self.settings.get_log()+'.bwt',
self.settings.get_log()+'.bwt'), shell=True).wait()
self.settings.write_to_log('building bowtie index complete')
def trim_reference_pool_fasta(self):
'''
Trims the reference sequences to the length of the trimmed reads + a buffer
'''
trim_5p = self.settings.get_property('pool_5p_bases_to_trim') #nucleotides to cut from 5' end
trim_3p = self.settings.get_property('pool_3p_bases_to_trim') #nucleotides to cut from 3' end
f = open(self.settings.get_property('pool_fasta'))
g = open(self.settings.get_trimmed_pool_fasta(), 'w')
for line in f:
if not line.strip() == '' and not line.startswith('#'):#ignore empty lines and commented out lines
if line.startswith('>'):#> marks the start of a new sequence
g.write(line)
else:
g.write(self.settings.get_property('pool_prepend')+line.strip()[trim_5p:len(line.strip())-trim_3p]+self.settings.get_property('pool_append')+'\n')
f.close()
g.close()
def map_reads(self):
"""
map all reads using bowtie
:return:
"""
self.settings.write_to_log('mapping reads')
if not self.settings.get_property('force_remapping'):
for lib_settings in self.settings.iter_lib_settings():
if not lib_settings.mapped_reads_exist():
break
else:
return
ms_utils.make_dir(self.rdir_path('mapped_reads'))
ms_utils.make_dir(self.rdir_path('mapping_stats'))
ms_utils.make_dir(self.rdir_path('unmapped_reads'))
ms_utils.parmap(lambda lib_setting: self.map_one_library(lib_setting), self.settings.iter_lib_settings(),
nprocs = self.threads)
self.settings.write_to_log( 'finished mapping reads')
def map_one_library(self, lib_settings):
lib_settings.write_to_log('mapping_reads')
subprocess.Popen('bowtie2 -q --very-sensitive-local --norc --no-mixed --no-overlap --no-discordant -t -x %s -p %d -1 %s -2 %s --un-conc-gz %s -S %s 1>> %s 2>>%s' % (self.settings.get_bowtie_index(), self.threads,
lib_settings.get_adaptor_trimmed_reads()[0], lib_settings.get_adaptor_trimmed_reads()[1], lib_settings.get_unmappable_reads_prefix(), lib_settings.get_mapped_reads_sam(),
lib_settings.get_log(), lib_settings.get_pool_mapping_stats()), shell=True).wait()
#subprocess.Popen('samtools view -b -h -o %s %s 1>> %s 2>> %s' % (lib_settings.get_mapped_reads(), lib_settings.get_mapped_reads_sam(), lib_settings.get_log(), lib_settings.get_log()), shell=True).wait()
#also, sort bam file, and make an index
#samtools view -uS myfile.sam | samtools sort - myfile.sorted
subprocess.Popen('samtools view -uS %s | samtools sort - %s.temp_sorted 1>>%s 2>>%s' % (lib_settings.get_mapped_reads_sam(), lib_settings.get_mapped_reads_sam(),
lib_settings.get_log(), lib_settings.get_log()), shell=True).wait()
#subprocess.Popen('samtools sort %s %s.temp_sorted 1>>%s 2>>%s' % (lib_settings.get_mapped_reads_sam(), lib_settings.get_mapped_reads_sam(),
# lib_settings.get_log(), lib_settings.get_log()), shell=True).wait()
subprocess.Popen('mv %s.temp_sorted.bam %s' % (lib_settings.get_mapped_reads_sam(),
lib_settings.get_mapped_reads()), shell = True).wait()
subprocess.Popen('samtools index %s' % (lib_settings.get_mapped_reads()), shell = True).wait()
subprocess.Popen('rm %s' % (lib_settings.get_mapped_reads_sam()), shell = True).wait()
lib_settings.write_to_log('mapping_reads done')
def rdir_path(self, *args):
return os.path.join(self.settings.get_rdir(), *args)
def get_rdir_fhandle(self, *args):
"""
returns a filehandle to the fname in the rdir
"""
out_path = self.rdir_path(*args)
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return ms_utils.aopen(out_path, 'w')
def perform_qc(self):
qc_engine = ms_qc.ms_qc(self, self.settings, self.threads)
qc_engine.write_mapping_summary(self.settings.get_overall_mapping_summary())
qc_engine.print_library_count_concordances()
qc_engine.plot_average_read_positions()
qc_engine.plot_fragment_length_distributions()
qc_engine.plot_count_distributions()
qc_engine.read_cutoff_choice_plot()
def make_counts_table(self, fractional=False):
"""
write out number of fragments mapping to each TL in each dataset
:param fractional: if True, replace raw counts with library fraction in reads per million
:return:
"""
if fractional:
summary_file = open(os.path.join(
self.rdir_path('tables'),
'rpm.txt'), 'w')
else:
summary_file = open(os.path.join(
self.rdir_path('tables'),
'raw_counts.txt'), 'w')
header = 'sequence name\t' + '\t'.join([lib.lib_settings.sample_name for lib in self.libs]) + '\n'
summary_file.write(header)
if fractional:
for sequence_name in self.libs[0].pool_sequence_mappings:
out_line = '%s\t%s\n' % (sequence_name,
'\t'.join(['%f' % ((10**6)*lib.pool_sequence_mappings[sequence_name].fragment_count/float(lib.total_mapped_fragments)) for lib in self.libs]))
summary_file.write(out_line)
else:
for sequence_name in self.libs[0].pool_sequence_mappings:
out_line = '%s\t%s\n' % (sequence_name,
'\t'.join(['%f' %
lib.pool_sequence_mappings[sequence_name].fragment_count
for lib in self.libs]))
summary_file.write(out_line)
summary_file.close()
def make_monosome_recruitment_table(self, read_cutoff=128):
"""
write out 80S recruitment metric for each TL in each replicate
:param read_cutoff: require this many read between mRNP and monosome to include this TL.
:return:
"""
output_file = open(os.path.join(
self.rdir_path('tables'),
'monosome_recruitment.txt'), 'w')
trimmed_sequences = ms_utils.convertFastaToDict(self.settings.get_trimmed_pool_fasta())
header = 'sequence name\tsequence\t' + '\t'.join(['%s/(%s+%s)' % (self.monosome_libs[i].lib_settings.sample_name,
self.monosome_libs[i].lib_settings.sample_name,
self.mrnp_libs[i].lib_settings.sample_name)
for i in range(len(self.monosome_libs))]) + '\n'
output_file.write(header)
for sequence_name in self.monosome_libs[0].pool_sequence_mappings:
out_line = '%s\t%s\t%s\n' % (sequence_name, trimmed_sequences[sequence_name],
'\t'.join(['%f' %
(self.monosome_libs[i].get_rpm(sequence_name)/
(self.monosome_libs[i].get_rpm(sequence_name)+
self.mrnp_libs[i].get_rpm(sequence_name)))
if (self.monosome_libs[i].get_counts(sequence_name) +
self.mrnp_libs[i].get_counts(sequence_name)) >= read_cutoff else ''
for i in range(len(self.monosome_libs)) ]))
output_file.write(out_line)
output_file.close()
def write_sequence_subset(self, recruitment_cutoff, read_cutoff=128, as_RNA=True):
"""
write out fasta of all sequences that pass a certain recruitment cutoff in all libraries
:return:
"""
output_file = open(os.path.join(
self.rdir_path('tables'),
'recruitment_above_%f.fasta' % recruitment_cutoff), 'w')
trimmed_sequences = ms_utils.convertFastaToDict(self.settings.get_trimmed_pool_fasta())
for sequence_name in self.monosome_libs[0].pool_sequence_mappings:
rec_scores = [(self.monosome_libs[i].get_rpm(sequence_name) / (self.monosome_libs[i].get_rpm(sequence_name) +
self.mrnp_libs[i].get_rpm(sequence_name)))
for i in range(len(self.monosome_libs)) if (self.monosome_libs[i].get_counts(sequence_name) +
self.mrnp_libs[i].get_counts(sequence_name)) >= read_cutoff]
if (len(rec_scores) == len(self.monosome_libs)):
average_score = np.average(rec_scores)
if average_score >=recruitment_cutoff:
output_file.write('>%s_rec_%f\n' % (sequence_name, average_score))
seq = trimmed_sequences[sequence_name]
if as_RNA:
seq = ms_utils.rna(seq)
output_file.write('%s\n' % (seq))
output_file.close()
def make_matched_recruitment_change_table(self, annotation_file, read_cutoff=128):
"""
write out number of fragments mapping to each TL in each dataset
:param read_cutoff: require this many read between mRNP and monosome to include this TL.
:return:
"""
set_name1, set_name2, matched_set = self.parse_matched_set_annotation(annotation_file)
output_file = open(os.path.join(
self.rdir_path('tables'),
'%s_%s_matched_monosome_recruitment_change.txt' % (set_name1, set_name2)), 'w')
header = '%s\t%s\t' % (set_name1, set_name2) + '\t'.join(['%s %s-%s recruitment score' % (self.monosome_libs[i].lib_settings.sample_name,
set_name1, set_name2)
for i in range(len(self.monosome_libs))]) + '\t'+\
'\t'.join(['%s %s/%s recruitment score' % (self.monosome_libs[i].lib_settings.sample_name,
set_name1, set_name2)
for i in range(len(self.monosome_libs))])+'\tttest p\n'
output_file.write(header)
for matched_pool_seqs in matched_set:
set1_scores = []
set2_scores = []
for i in range(len(self.monosome_libs)):
set_1_counts = self.monosome_libs[i].get_counts(matched_pool_seqs[0])\
+ self.mrnp_libs[i].get_counts(matched_pool_seqs[0])
set_2_counts = self.monosome_libs[i].get_counts(matched_pool_seqs[1]) \
+ self.mrnp_libs[i].get_counts(matched_pool_seqs[1])
# include only comparisons where the average number of reads is high enough
if set_1_counts >= read_cutoff and set_2_counts >= read_cutoff:
set1_score = self.monosome_libs[i].get_rpm(matched_pool_seqs[0]) / \
(self.monosome_libs[i].get_rpm(matched_pool_seqs[0]) +
self.mrnp_libs[i].get_rpm(matched_pool_seqs[0]))
set2_score = self.monosome_libs[i].get_rpm(matched_pool_seqs[1]) / \
(self.monosome_libs[i].get_rpm(matched_pool_seqs[1]) +
self.mrnp_libs[i].get_rpm(matched_pool_seqs[1]))
else:
set1_score = float('nan')
set2_score = float('nan')
set1_scores.append(set1_score)
set2_scores.append(set2_score)
recruitment_changes = np.array(set1_scores)-np.array(set2_scores)
recruitment_fold_changes = np.array(set1_scores)/np.array(set2_scores)
scores_1_filtered, scores_2_filtered = ms_utils.filter_x_y_pairs(set1_scores, set2_scores)
if len(scores_1_filtered)>0 and len(scores_2_filtered)>0:
t, p = stats.ttest_ind(scores_1_filtered, scores_2_filtered)
else:
p = float('nan')
out_line = '%s\t%s\t%s\t%s\t%f\n' % (matched_pool_seqs[0], matched_pool_seqs[1],
'\t'.join(['%f' % score_change for score_change in recruitment_changes]),
'\t'.join(['%f' % score_change for score_change in recruitment_fold_changes]),
p)
output_file.write(out_line)
output_file.close()
def parse_matched_set_annotation(self, filename):
matched_set = set()# a set of tuples matching a sequence name to one matched to it. sequences cana ppear multiple times, but the pairs ought to be unique
f = open(filename)
lines = f.readlines()
header = lines[0]
set_name1, set_name2 = header.strip().split('\t')
for line in lines[1:]:
seq_name1, seq_name2 = line.strip().split('\t')
matched_set.add((seq_name1, seq_name2))
f.close()
return set_name1, set_name2, matched_set
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("settings_file")
parser.add_argument("--make-tables",
help="Makes tables.",
action='store_true')
parser.add_argument("--perform-qc",
help="performs quality control analysis.",
action='store_true')
parser.add_argument("--make-plots",
help="Makes plots.",
action='store_true')
parser.add_argument("--comparisons",
help="Does comparisons to other experiments",
action='store_true')
parser.add_argument("--all-tasks",
help="Makes plots, tables, folding and comparisons",
action='store_true')
parser.add_argument("--threads",
help="Max number of processes to use",
type | |
0),
(1, ints[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], ints[0])
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], ints[1])
self.assertEqual(rows['c'][1], -2**15)
# row 2
self.assertEqual(rows['b'][2], -2**15)
self.assertEqual(rows['c'][2], 2)
# row 3
self.assertEqual(rows['b'][3], -2**15)
self.assertEqual(rows['c'][3], -2**15)
def test_null_ints(self):
ints = [i for i in range(1,3)]
self.cursor.execute("create table t1(a int, b int, c int)")
params = [(0, ints[0], 0),
(1, ints[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], ints[0])
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], ints[1])
self.assertEqual(rows['c'][1], -2**31)
# row 2
self.assertEqual(rows['b'][2], -2**31)
self.assertEqual(rows['c'][2], 2)
# row 3
self.assertEqual(rows['b'][3], -2**31)
self.assertEqual(rows['c'][3], -2**31)
def test_null_bigints(self):
ints = [i for i in range(1,3)]
self.cursor.execute("create table t1(a int, b bigint, c bigint)")
params = [(0, ints[0], 0),
(1, ints[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], ints[0])
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], ints[1])
self.assertEqual(rows['c'][1], -2**63)
# row 2
self.assertEqual(rows['b'][2], -2**63)
self.assertEqual(rows['c'][2], 2)
# row 3
self.assertEqual(rows['b'][3], -2**63)
self.assertEqual(rows['c'][3], -2**63)
def test_null_floats(self):
floats = [float(i) for i in range(1,3)]
self.cursor.execute("create table t1(a int, b float, c float)")
params = [(0, floats[0], 0),
(1, floats[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], floats[0])
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], floats[1])
# The line below does not work without the str() representation
self.assertEqual(str(rows['c'][1]), str(np.nan))
# row 2
# The line below does not work without the str() representation
self.assertEqual(str(rows['b'][2]), str(np.nan))
self.assertEqual(rows['c'][2], 2)
# row 3
# The line below does not work without the str() representation
self.assertEqual(str(rows['b'][3]), str(np.nan))
self.assertEqual(str(rows['c'][3]), str(np.nan))
def test_null_doubles(self):
floats = [float(i) for i in range(1,3)]
self.cursor.execute("create table t1(a int, b float8, c float8)")
params = [(0, floats[0], 0),
(1, floats[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], floats[0])
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], floats[1])
# The line below does not work without the str() representation
self.assertEqual(str(rows['c'][1]), str(np.nan))
# row 2
# The line below does not work without the str() representation
self.assertEqual(str(rows['b'][2]), str(np.nan))
self.assertEqual(rows['c'][2], 2)
# row 3
# The line below does not work without the str() representation
self.assertEqual(str(rows['b'][3]), str(np.nan))
self.assertEqual(str(rows['c'][3]), str(np.nan))
def test_null_strings(self):
strings = [str(i) for i in range(1,3)]
self.cursor.execute("create table t1(a int, b varchar(2), c varchar(2))")
params = [(0, strings[0], ''),
(1, strings[1], None),
(2, None, '2'),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], strings[0])
self.assertEqual(rows['c'][0], '')
# row 1
self.assertEqual(rows['b'][1], strings[1])
self.assertEqual(rows['c'][1], 'NA')
# row 2
self.assertEqual(rows['b'][2], 'NA')
self.assertEqual(rows['c'][2], '2')
# row 3
# The line below does not work without the str() representation
self.assertEqual(rows['b'][3], 'NA')
self.assertEqual(rows['c'][3], 'NA')
def test_null_timestamp(self):
if np.__version__ < "1.7": return
dates = [datetime.strptime("2008-04-%02d 00:01:02"%i,
"%Y-%m-%d %H:%M:%S")
for i in range(1,3)]
self.cursor.execute("create table t1(a int, b timestamp, c int)")
params = [(0, dates[0], 0),
(1, dates[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], np.datetime64(dates[0]))
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], np.datetime64(dates[1]))
self.assertEqual(rows['c'][1], -2147483648)
# row 2
# The line below does not work without the str() representation
self.assertEqual(str(rows['b'][2]), str(np.datetime64('NaT')))
self.assertEqual(rows['c'][2], 2)
# row 3
self.assertEqual(str(rows['b'][3]), str(np.datetime64('NaT')))
self.assertEqual(rows['c'][3], -2147483648)
def test_null_date(self):
if np.__version__ < "1.7": return
dates = [date(2008, 4, i) for i in range(1,3)]
npdates = np.array(dates, dtype="datetime64[D]")
self.cursor.execute("create table t1(a int, b date, c int)")
params = [(0, dates[0], 0),
(1, dates[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], npdates[0])
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], npdates[1])
self.assertEqual(rows['c'][1], -2147483648)
# row 2
# The line below does not work without the str() representation
self.assertEqual(str(rows['b'][2]), str(np.datetime64('NaT')))
self.assertEqual(rows['c'][2], 2)
# row 3
self.assertEqual(str(rows['b'][3]), str(np.datetime64('NaT')))
self.assertEqual(rows['c'][3], -2147483648)
def test_null_time(self):
if np.__version__ < "1.7": return
dates = [time(0, 0, i) for i in range(1,3)]
npdates = np.array(range(1,3), dtype="timedelta64[s]")
self.cursor.execute("create table t1(a int, b time, c int)")
params = [(0, dates[0], 0),
(1, dates[1], None),
(2, None, 2),
(3, None, None),
]
self.cursor.executemany("insert into t1(a, b, c) values (?,?,?)",
params)
self.cursor.execute("select a, b, c from t1 order by a")
rows = self.cursor.fetchdictarray()
# row 0
self.assertEqual(rows['b'][0], npdates[0])
self.assertEqual(rows['c'][0], 0)
# row 1
self.assertEqual(rows['b'][1], npdates[1])
self.assertEqual(rows['c'][1], -2147483648)
# row 2
# The line below does not work without the str() representation
self.assertEqual(str(rows['b'][2]), str(np.timedelta64('NaT')))
self.assertEqual(rows['c'][2], 2)
# row 3
self.assertEqual(str(rows['b'][3]), str(np.timedelta64('NaT')))
self.assertEqual(rows['c'][3], -2147483648)
#
# partial fetchs
#
def test_partial_fetch_dict(self):
self.cursor.execute("create table t1(a real, b int)")
params = [ (i, i) for i in range(6) ]
self.cursor.executemany("insert into t1(a, b) values (?,?)", params)
self.cursor.execute("select * from t1 order by a")
# Row 0
rows = self.cursor.fetchdictarray(1)
self.assertEqual(len(rows['a']), 1)
self.assertEqual(params[0][0], rows['a'][0])
self.assertEqual(params[0][1], rows['b'][0])
# Rows 1,2
rows = self.cursor.fetchdictarray(2)
self.assertEqual(len(rows['a']), 2)
self.assertEqual(params[1][0], rows['a'][0])
self.assertEqual(params[1][1], rows['b'][0])
self.assertEqual(params[2][0], rows['a'][1])
self.assertEqual(params[2][1], rows['b'][1])
# Rows 3,4,5
rows = self.cursor.fetchdictarray(3)
self.assertEqual(len(rows['a']), 3)
self.assertEqual(params[3][0], rows['a'][0])
self.assertEqual(params[3][1], rows['b'][0])
self.assertEqual(params[4][0], rows['a'][1])
self.assertEqual(params[4][1], rows['b'][1])
self.assertEqual(params[5][0], rows['a'][2])
self.assertEqual(params[5][1], rows['b'][2])
# A new fetch should return a length 0 container
rows = self.cursor.fetchdictarray(1)
self.assertEqual(len(rows['a']), 0)
def test_partial_fetch_sarray(self):
self.cursor.execute("create table t1(a real, b int)")
params = [ (i, i) for i in range(6) ]
self.cursor.executemany("insert into t1(a, b) values (?,?)", params)
self.cursor.execute("select * from t1 order by a")
# Row 0
rows = self.cursor.fetchsarray(1)
self.assertEqual(len(rows['a']), 1)
self.assertEqual(params[0][0], rows['a'][0])
self.assertEqual(params[0][1], rows['b'][0])
# Rows 1,2
rows = self.cursor.fetchsarray(2)
self.assertEqual(len(rows['a']), 2)
self.assertEqual(params[1][0], rows['a'][0])
self.assertEqual(params[1][1], rows['b'][0])
self.assertEqual(params[2][0], rows['a'][1])
self.assertEqual(params[2][1], rows['b'][1])
# Rows 3,4,5
rows = self.cursor.fetchsarray(3)
self.assertEqual(len(rows['a']), 3)
self.assertEqual(params[3][0], rows['a'][0])
self.assertEqual(params[3][1], rows['b'][0])
self.assertEqual(params[4][0], rows['a'][1])
self.assertEqual(params[4][1], rows['b'][1])
self.assertEqual(params[5][0], rows['a'][2])
self.assertEqual(params[5][1], rows['b'][2])
# A new fetch should return a length 0 container
rows = self.cursor.fetchsarray(1)
self.assertEqual(len(rows['a']), 0)
def test_partial_fetch_dict2(self):
self.cursor.execute("create table t1(a real, b int)")
params = [ (i, i) for i in range(6) ]
self.cursor.executemany("insert into t1(a, b) values (?,?)", params)
self.cursor.execute("select * from t1 order by a")
# Rows 0,1,2
rows = self.cursor.fetchdictarray(3)
self.assertEqual(len(rows['a']), 3)
self.assertEqual(params[0][0], rows['a'][0])
self.assertEqual(params[0][1], rows['b'][0])
self.assertEqual(params[1][0], rows['a'][1])
self.assertEqual(params[1][1], rows['b'][1])
self.assertEqual(params[2][0], rows['a'][2])
self.assertEqual(params[2][1], rows['b'][2])
# Row 3
rows = self.cursor.fetchdictarray(1)
self.assertEqual(len(rows['a']), 1)
self.assertEqual(params[3][0], rows['a'][0])
self.assertEqual(params[3][1], rows['b'][0])
# Rows 4,5
rows = self.cursor.fetchdictarray()
self.assertEqual(len(rows['a']), 2)
self.assertEqual(params[4][0], rows['a'][0])
self.assertEqual(params[4][1], rows['b'][0])
self.assertEqual(params[5][0], rows['a'][1])
self.assertEqual(params[5][1], rows['b'][1])
# A new fetch should return a length 0 container
rows = self.cursor.fetchdictarray(1)
self.assertEqual(len(rows['a']), 0)
#
# Unsupported data types
#
def test_unsupported_decimal(self):
self.cursor.execute("create table t1(a int, b decimal)")
params = [(0, 2.32)]
self.cursor.executemany("insert into t1(a, b) values (?,?)",
params)
self.cursor.execute("select * from t1")
self.assertRaises(TypeError, self.cursor.fetchdictarray, ())
def test_unsupported_binary(self):
if backend == "postgresql":
# Postgres does not have support for a binary datatype
return
self.cursor.execute("create table t1(a int, b binary)")
params = [(0, "2.32")]
self.cursor.executemany("insert into t1(a, b) values (?,?)",
params)
self.cursor.execute("select * from t1")
self.assertRaises(TypeError, self.cursor.fetchdictarray, ())
def test_unsupported_timestamp(self):
if np.__version__ >= '1.7':
# This is supported when using NumPy >= 1.7
return
self.cursor.execute("create table t1(a int, b timestamp)")
params = [(0, "2008-08-08 08:08:08")]
self.cursor.executemany("insert into t1(a, b) values (?,?)",
params)
self.cursor.execute("select * from t1")
self.assertRaises(TypeError, self.cursor.fetchdictarray, ())
#
# misc
#
def test_varchar(self):
self.cursor.execute("create table t1(a int, b varchar(4), c int)")
# Generate strings below and above 4 char long
params = [ (i, str(16**i), i) for i in range(6) ]
# PostgreSQL | |
<filename>tests/test_stability.py
import pytest
import attr
import numpy as np
from thermo import Chemical
from pytest_lazyfixture import lazy_fixture
from gibbs.models.ceos import PengRobinson78, SoaveRedlichKwong
from gibbs.minimization import PygmoSelfAdaptiveDESettings
from gibbs.mixture import Mixture
from gibbs.stability_analysis import stability_test
from gibbs.utilities import convert_bar_to_Pa
seed = 1234
@pytest.fixture
def methane():
return Chemical('methane')
@pytest.fixture
def ethane():
return Chemical('ethane')
@pytest.fixture
def propane():
return Chemical('propane')
@pytest.fixture
def hydrogen_sulfide():
return Chemical('H2S')
@pytest.fixture
def nitrogen():
return Chemical('N2')
@pytest.fixture
def carbon_dioxide():
return Chemical('CO2')
@attr.s(auto_attribs=True)
class InputModel:
z: np.ndarray
P: float
T: float
Tc: np.ndarray
Pc: np.ndarray
acentric_factor: np.ndarray
bip: np.ndarray
@property
def input_mixture(self):
return Mixture(
z=self.z,
Tc=self.Tc,
Pc=self.Pc,
acentric_factor=self.acentric_factor
)
@property
def model(self):
return PengRobinson78(
mixture=self.input_mixture,
bip=self.bip
)
@property
def number_of_components(self):
return len(self.z)
def fugacity(self, P, T, z):
Z_factor = self.calculate_Z(P, T, z)
return self.model.calculate_fugacity(P, T, z, Z_factor)
def calculate_Z(self, P, T, z):
Z_factor = self.model.calculate_Z_minimal_energy(P, T, z)
return Z_factor
@attr.s(auto_attribs=True)
class NichitaPR:
mixture: Mixture
bip: np.ndarray
@property
def model(self):
return PengRobinson78(
mixture=self.mixture,
bip=self.bip
)
@property
def number_of_components(self):
return len(self.mixture.z)
def fugacity(self, P, T, z):
Z_factor = self.calculate_Z(P, T, z)
return self.model.calculate_fugacity(P, T, z, Z_factor)
def calculate_Z(self, P, T, z):
Z_factor = self.model.calculate_Z_minimal_energy(P, T, z)
return Z_factor
@attr.s(auto_attribs=True)
class NichitaSRK:
mixture: Mixture
bip: np.ndarray
@property
def model(self):
return SoaveRedlichKwong(
mixture=self.mixture,
bip=self.bip
)
@property
def number_of_components(self):
return len(self.mixture.z)
def fugacity(self, P, T, z):
Z_factor = self.calculate_Z(P, T, z)
return self.model.calculate_fugacity(P, T, z, Z_factor)
def calculate_Z(self, P, T, z):
Z_factor = self.model.calculate_Z_minimal_energy(P, T, z)
return Z_factor
@pytest.fixture
def sample_model():
z = np.array([0.5, 0.42, 0.08])
omegas = np.array([0.0115, 0.1928, 0.4902])
Tcs = np.array([190.556, 425.16667, 617.666667])
Pcs = np.array([4604318.9, 3796942.8, 2.096e6])
kijs = np.zeros((3, 3))
P = 3.447e6
T = 410.928
model = InputModel(
z=z,
P=P,
T=T,
Tc=Tcs,
Pc=Pcs,
acentric_factor=omegas,
bip=kijs
)
return model
@pytest.fixture
def model_problem_1_1(methane, hydrogen_sulfide):
z = np.array([0.5, 0.5])
omegas = np.array([methane.omega, hydrogen_sulfide.omega])
Tcs = np.array([methane.Tc, hydrogen_sulfide.Tc])
Pcs = np.array([methane.Pc, hydrogen_sulfide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_1_2(methane, hydrogen_sulfide):
z = np.array([0.9885, 0.0115])
omegas = np.array([methane.omega, hydrogen_sulfide.omega])
Tcs = np.array([methane.Tc, hydrogen_sulfide.Tc])
Pcs = np.array([methane.Pc, hydrogen_sulfide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_1_3(methane, hydrogen_sulfide):
z = np.array([0.9813, 0.0187])
omegas = np.array([methane.omega, hydrogen_sulfide.omega])
Tcs = np.array([methane.Tc, hydrogen_sulfide.Tc])
Pcs = np.array([methane.Pc, hydrogen_sulfide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_1_4(methane, hydrogen_sulfide):
z = np.array([0.112, 0.888])
omegas = np.array([methane.omega, hydrogen_sulfide.omega])
Tcs = np.array([methane.Tc, hydrogen_sulfide.Tc])
Pcs = np.array([methane.Pc, hydrogen_sulfide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_1_5(methane, hydrogen_sulfide):
z = np.array([0.11, 0.89])
omegas = np.array([methane.omega, hydrogen_sulfide.omega])
Tcs = np.array([methane.Tc, hydrogen_sulfide.Tc])
Pcs = np.array([methane.Pc, hydrogen_sulfide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_1_50_bar(methane, propane):
z = np.array([0.10, 0.90])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_2_50_bar(methane, propane):
z = np.array([0.40, 0.60])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_3_50_bar(methane, propane):
z = np.array([0.60, 0.40])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_4_50_bar(methane, propane):
z = np.array([0.90, 0.10])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_1_100_bar(methane, propane):
z = np.array([0.40, 0.60])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_2_100_bar(methane, propane):
z = np.array([0.40, 0.60])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_2_100_bar(methane, propane):
z = np.array([0.68, 0.32])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_3_100_bar(methane, propane):
z = np.array([0.73, 0.27])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_2_4_100_bar(methane, propane):
z = np.array([0.90, 0.10])
omegas = np.array([methane.omega, propane.omega])
Tcs = np.array([methane.Tc, propane.Tc])
Pcs = np.array([methane.Pc, propane.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.029],
[0.029, 0.000]
])
return NichitaSRK(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_3_1(ethane, nitrogen):
z = np.array([0.90, 0.10])
omegas = np.array([ethane.omega, nitrogen.omega])
Tcs = np.array([ethane.Tc, nitrogen.Tc])
Pcs = np.array([ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_3_2(ethane, nitrogen):
z = np.array([0.82, 0.18])
omegas = np.array([ethane.omega, nitrogen.omega])
Tcs = np.array([ethane.Tc, nitrogen.Tc])
Pcs = np.array([ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_3_3(ethane, nitrogen):
z = np.array([0.70, 0.30])
omegas = np.array([ethane.omega, nitrogen.omega])
Tcs = np.array([ethane.Tc, nitrogen.Tc])
Pcs = np.array([ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_3_4(ethane, nitrogen):
z = np.array([0.56, 0.44])
omegas = np.array([ethane.omega, nitrogen.omega])
Tcs = np.array([ethane.Tc, nitrogen.Tc])
Pcs = np.array([ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_3_5(ethane, nitrogen):
z = np.array([0.40, 0.60])
omegas = np.array([ethane.omega, nitrogen.omega])
Tcs = np.array([ethane.Tc, nitrogen.Tc])
Pcs = np.array([ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.080],
[0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_4_1(methane, carbon_dioxide):
z = np.array([0.90, 0.10])
omegas = np.array([methane.omega, carbon_dioxide.omega])
Tcs = np.array([methane.Tc, carbon_dioxide.Tc])
Pcs = np.array([methane.Pc, carbon_dioxide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.095],
[0.095, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_4_2(methane, carbon_dioxide):
z = np.array([0.80, 0.20])
omegas = np.array([methane.omega, carbon_dioxide.omega])
Tcs = np.array([methane.Tc, carbon_dioxide.Tc])
Pcs = np.array([methane.Pc, carbon_dioxide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.095],
[0.095, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_4_3(methane, carbon_dioxide):
z = np.array([0.70, 0.30])
omegas = np.array([methane.omega, carbon_dioxide.omega])
Tcs = np.array([methane.Tc, carbon_dioxide.Tc])
Pcs = np.array([methane.Pc, carbon_dioxide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.095],
[0.095, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_4_4(methane, carbon_dioxide):
z = np.array([0.57, 0.43])
omegas = np.array([methane.omega, carbon_dioxide.omega])
Tcs = np.array([methane.Tc, carbon_dioxide.Tc])
Pcs = np.array([methane.Pc, carbon_dioxide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.095],
[0.095, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_4_5(methane, carbon_dioxide):
z = np.array([0.40, 0.60])
omegas = np.array([methane.omega, carbon_dioxide.omega])
Tcs = np.array([methane.Tc, carbon_dioxide.Tc])
Pcs = np.array([methane.Pc, carbon_dioxide.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.095],
[0.095, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_5_1(methane, ethane, nitrogen):
z = np.array([0.10, 0.60, 0.30])
omegas = np.array([methane.omega, ethane.omega, nitrogen.omega])
Tcs = np.array([methane.Tc, ethane.Tc, nitrogen.Tc])
Pcs = np.array([methane.Pc, ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.021, 0.038],
[0.021, 0.000, 0.080],
[0.038, 0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_5_2(methane, ethane, nitrogen):
z = np.array([0.30, 0.55, 0.15])
omegas = np.array([methane.omega, ethane.omega, nitrogen.omega])
Tcs = np.array([methane.Tc, ethane.Tc, nitrogen.Tc])
Pcs = np.array([methane.Pc, ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.021, 0.038],
[0.021, 0.000, 0.080],
[0.038, 0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_5_3(methane, ethane, nitrogen):
z = np.array([0.38, 0.54, 0.08])
omegas = np.array([methane.omega, ethane.omega, nitrogen.omega])
Tcs = np.array([methane.Tc, ethane.Tc, nitrogen.Tc])
Pcs = np.array([methane.Pc, ethane.Pc, nitrogen.Pc])
mixture = Mixture(z, Tcs, Pcs, omegas)
kijs = np.array([
[0.000, 0.021, 0.038],
[0.021, 0.000, 0.080],
[0.038, 0.080, 0.000]
])
return NichitaPR(
mixture=mixture,
bip=kijs
)
@pytest.fixture
def model_problem_5_4(methane, ethane, nitrogen):
| |
keys are factor column names (
i.e. "paper"/"section"/"paragraph") and whose values are counts of
unique factor instances (e.g. total number of papers/sections/
paragraphs in the dataset)
"""
result_data, counts = prepare_occurrence_data(data)
result_data = result_data.reset_index()
result_data["paper_frequency"] = result_data[
"paper"].transform(lambda x: len([str(p).split(":")[0] for p in x]))
result_data["paper"] = result_data[
"paper"].transform(lambda x: list(x))
result_data["paragraph"] = result_data[
"paragraph"].transform(lambda x: list(x))
result_data["section"] = result_data[
"section"].transform(lambda x: list(x))
result_data["raw_entity_types"] = result_data[
"entity_type"].transform(list)
result_data["raw_frequency"] = result_data["entity_type"].apply(len)
result_data["entity_type"] = result_data[
"entity_type"].transform(
lambda x: ", ".join(list(set(x))))
return result_data, counts
def merge_with_ontology_linking(occurence_data,
factor_columns,
linking_df=None,
linking_path=None,
linked_occurrence_data_path=None):
"""Merge occurrence data with ontology linking data."""
if factor_columns is None:
factor_columns = ["paper", "section", "paragraph"]
# Ontology linking
linking = None
if linking_path:
# Open ontology linking files
print("Loading the ontology linking...")
if "pkl" in linking_path:
with open(linking_path, "rb") as f:
linking = pickle.load(f)
else:
linking = pd.read_csv(linking_path)
elif linking_df is not None:
linking = linking_df
if linking is not None:
linking = linking.rename(columns={"mention": "entity"})
linking["concept"] = linking["concept"].apply(lambda x: x.lower())
linking["entity"] = linking["entity"].apply(lambda x: x.lower())
# The provided occcurence_data is expected to be lower cased and
# the merge is performed on the 'entity' column and not the column one.
print("Merging the occurrence data with the ontology linking...")
# Merge occurrence data with the linking data
occurence_data = occurence_data.reset_index()
merged_data = occurence_data.merge(
linking, on="entity", how="left")
merged_data.loc[
merged_data["concept"].isna(), "concept"] = merged_data[
merged_data["concept"].isna()]["entity"]
for col in factor_columns:
merged_data[col] = merged_data[col].apply(lambda x: list(x))
def aggregate_linking_data(x, factors):
if x.name == "entity":
return list(x)
elif x.name in factors:
return set(sum(x, []))
elif x.name in ["uid", "definition", "taxonomy", "semantic_type"]:
return list(x)[0]
return sum(x, [])
occurrence_data_linked = merged_data.groupby("concept").aggregate(
lambda x: aggregate_linking_data(x, factor_columns))
occurrence_data_linked = occurrence_data_linked.reset_index()
occurrence_data_linked = occurrence_data_linked.rename(
columns={
"concept": "entity",
"entity": "aggregated_entities"
})
occurrence_data_linked = occurrence_data_linked.set_index("entity")
if linked_occurrence_data_path:
with open(linked_occurrence_data_path, "wb") as f:
print("Saving pre-calculated linked occurrence data....")
pickle.dump(occurrence_data_linked, f)
return occurrence_data_linked
elif linked_occurrence_data_path:
print("Loading linked occurrence data...")
with open(linked_occurrence_data_path, "rb") as f:
occurrence_data_linked = pickle.load(f)
else:
raise ValueError(
"Neither linking data nor pre-computed linked occurrence "
"data has been specified"
)
return occurrence_data_linked
def _configure_backends(backend_configs, graph):
if backend_configs is None:
backend_configs = dict()
metrics_backend = (
backend_configs["metrics"]
if "metrics" in backend_configs else "networkx"
)
communities_backend = (
backend_configs["communities"]
if "communities" in backend_configs else "networkx"
)
paths_backend = (
backend_configs["paths"]
if "paths" in backend_configs else "networkx"
)
if metrics_backend == "neo4j":
processor = BACKEND_MAPPING[metrics_backend]["metrics"](
graph,
backend_configs["driver"],
backend_configs["node_label"],
backend_configs["edge_label"],
directed=False)
else:
processor = BACKEND_MAPPING[metrics_backend]["metrics"](
graph, directed=False)
if communities_backend == "neo4j":
com_detector = BACKEND_MAPPING[communities_backend]["communities"](
graph,
backend_configs["driver"],
backend_configs["node_label"],
backend_configs["edge_label"],
directed=False)
else:
com_detector = BACKEND_MAPPING[communities_backend]["communities"](
graph, directed=False)
if paths_backend == "neo4j":
path_finder = BACKEND_MAPPING[paths_backend]["paths"](
graph,
backend_configs["driver"],
backend_configs["node_label"],
backend_configs["edge_label"],
directed=False)
else:
path_finder = BACKEND_MAPPING[paths_backend]["paths"](
graph, directed=False)
pgframe_converter = BACKEND_MAPPING[paths_backend]["to_pgframe"]
return processor, com_detector, path_finder, pgframe_converter
def generate_cooccurrence_analysis(occurrence_data, factor_counts,
type_data=None, min_occurrences=1,
n_most_frequent=None, keep=None,
factors=None, cores=8,
graph_dump_prefix=None,
communities=True, remove_zero_mi=False,
backend_configs=None,
community_strategy="louvain"):
"""Generate co-occurrence analysis.
This utility executes the entire pipeline of the co-occurrence analysis:
it generates co-occurrence networks based on the input factors, yields
various co-occurrence statistics (frequency, mutual-information-based
scores) as edge attributes, computes various node centrality
measures, node communities (and attaches them to the node attributes of
the generated networks). Finally, it computes minimum spanning trees
given the mutual-information-based distance scores (1 / NPMI). The function
allows to dump the resulting graph objects using a pickle representation.
Parameters
----------
occurrence_data : pd.DataFrame
Input occurrence data table. Rows represent unique entities (indexed
by entity names), columns contain sets of aggregated occurrence factors
(e.g. sets of papers/sections/paragraphs where the given term occurs).
factor_counts : dict
Dictionary whose keys are factor column names (
i.e. "paper"/"section"/"paragraph") and whose values are counts of
unique factor instances (e.g. total number of papers/sections/
paragraphs in the dataset)
type_data : pd.DataFrame, optional
Table containing node types (these types are saved as node attributes)
min_occurrences : int, optional
Minimum co-occurrence frequency to consider (add as an edge to the co-
occurrence network). By default every non-zero co-occurrence frequency
yields an edge in the resulting network.
n_most_frequent : int, optional
Number of most frequent entitites to include in the co-occurrence
network. By default is not set, therefore, all the terms from the
occurrence table are included.
keep : iterable
Collection of entities to keep even if they are not included in N most
frequent entities.
factors : iterable, optional
Set of factors to use for constructing co-occurrence networks
(a network per factor is produced).
cores : int, optional
Number of cores to use during the parallel network generation.
graph_dump_prefix : str
Path prefix for dumping the generated networks (the edge
list, edge attributes, node list and node attributes are saved).
communities : bool, optional
Flag indicating whether the community detection should be included
in the analysis. By default True.
remove_zero_mi : bool, optional
Flag indicating whether edges with zero mutual-information scores
(PPMI and NPMI) should be removed from the network (helps to sparsify
the network, however may result in isolated nodes of high occurrence
frequency).
Returns
-------
graphs : dict of nx.DiGraph
Dictionary whose keys are factor names and whose values are
generated co-occurrence networks.
trees : dict of nx.DiGraph
Dictionary whose keys are factor names and whose values are
minimum spanning trees of generated co-occurrence networks.
"""
def compute_distance(x):
return 1 / x if x > 0 else math.inf
# Filter entities that occur only once (only in one paragraph, usually
# represent noisy terms)
if "paragraph" in factors:
occurrence_data = occurrence_data[occurrence_data["paragraph"].apply(
lambda x: len(x) >= min_occurrences)]
occurrence_data["paragraph_frequency"] = occurrence_data[
"paragraph"].apply(lambda x: len(x))
if "section" in factors:
occurrence_data["section_frequency"] = occurrence_data[
"section"].apply(lambda x: len(x))
if "paper" in factors:
occurrence_data["paper_frequency"] = occurrence_data[
"paper"].apply(lambda x: len(x))
graphs = {}
trees = {}
for f in factors:
print("-------------------------------")
print("Factor: {}".format(f))
print("-------------------------------")
# Build a PGFrame from the occurrence data
graph = PandasPGFrame()
entity_nodes = occurrence_data.index
graph.add_nodes(entity_nodes)
graph.add_node_types({n: "Entity" for n in entity_nodes})
graph.add_node_properties(occurrence_data[f], prop_type="category")
graph.add_node_properties(
occurrence_data["{}_frequency".format(f)], prop_type="numeric")
# Select most frequent nodes
nodes_to_include = None
if n_most_frequent:
nodes_to_include = graph._nodes.nlargest(
n_most_frequent, "{}_frequency".format(f)).index
graph = graph.subgraph(nodes=nodes_to_include)
# Generate co-occurrence edges
generator = CooccurrenceGenerator(graph)
edges = generator.generate_from_nodes(
f, total_factor_instances=factor_counts[f],
compute_statistics=["frequency", "ppmi", "npmi"],
parallelize=True, cores=cores)
# Remove edges with zero mutual information
if remove_zero_mi:
edges = edges[edges["ppmi"] > 0]
graph._edges = edges.drop(columns=["common_factors"])
graph.edge_prop_as_numeric("frequency")
graph.edge_prop_as_numeric("ppmi")
graph.edge_prop_as_numeric("npmi")
npmi_distance = edges["npmi"].apply(compute_distance)
npmi_distance.name = "distance_npmi"
graph.add_edge_properties(npmi_distance, "numeric")
# Set entity types
if type_data is not None:
graph.add_node_properties(
type_data.reset_index().rename(
columns={
"entity": "@id",
"type": "entity_type"
}).set_index("@id"),
prop_type="category")
# Set papers as props
graph.remove_node_properties(f)
if nodes_to_include is not None:
paper_data = occurrence_data.loc[nodes_to_include, "paper"]
else:
paper_data = occurrence_data["paper"]
graph.add_node_properties(
paper_data.apply(lambda x: list(x)),
prop_type="category")
graphs[f] = graph
processor, com_detector, path_finder, pgframe_converter =\
_configure_backends(backend_configs, graph)
# Compute centralities
all_metrics = processor.compute_all_node_metrics(
degree_weights=["frequency"],
pagerank_weights=["frequency"])
for metrics, data in all_metrics.items():
for weight, values in data.items():
prop = pd.DataFrame(
values.items(),
columns=["@id", "{}_{}".format(metrics, weight)])
graph.add_node_properties(prop, prop_type="numeric")
# Compute communitites
frequency_partition = com_detector.detect_communities(
strategy=community_strategy, weight="frequency")
prop = pd.DataFrame(
frequency_partition.items(),
columns=["@id", "community_frequency"])
graph.add_node_properties(prop, prop_type="numeric")
npmi_partition = com_detector.detect_communities(
strategy=community_strategy, weight="npmi")
prop = pd.DataFrame(
npmi_partition.items(), columns=["@id", "community_npmi"])
graph.add_node_properties(prop, prop_type="numeric")
# Compute minimum spanning tree
tree = path_finder.minimum_spanning_tree(distance="distance_npmi")
tree_pgframe = pgframe_converter(tree)
trees[f] = tree_pgframe
# Dump the generated PGFrame
if graph_dump_prefix:
graph.export_json("{}_{}_graph.json".format(graph_dump_prefix, f))
tree_pgframe.export_json(
"{}_{}_tree.json".format(graph_dump_prefix, f))
return graphs, trees
def assign_raw_type(x):
counts = {}
for t in x:
if t in counts:
counts[t] += 1
else:
counts[t] = 1
t = max(counts.items(), key=operator.itemgetter(1))[0]
return t
def resolve_taxonomy_to_types(occurrence_data, mapping):
"""Assign entity types from hierarchies of NCIT classes.
This function assigns a unique entity type to every entity
using the ontology linking data (hierarchy, or taxonomy,
of NCIT classes) according to the input type mapping. If
a term was not linked, i.e. does not have such a taxonomy
attached, raw entity types from the NER model are using (
a unique entity type is chosen by the majority vote).
Parameters
----------
occurrence_data : pd.DataFrame
Input occurrence data table. Rows represent unique entities (indexed
by entity names), columns contain the following columns: `taxonomy`
list containing a hierarchy of NCIT ontology classes of the given
entity, `raw_entity_types` list of raw entity types provided by
the NER model.
mapping : | |
try:
# Has to be a number, has to be negative
assert -float(arg) > 0
except (ValueError, AssertionError):
newargs.append(arg)
continue
newargs.append(" " + arg)
return newargs
def interpret_bintime(bintime):
"""If bin time is negative, interpret as power of two.
Examples
--------
>>> interpret_bintime(2)
2
>>> interpret_bintime(-2) == 0.25
True
>>> interpret_bintime(0)
Traceback (most recent call last):
...
ValueError: Bin time cannot be = 0
"""
if bintime < 0:
return 2 ** bintime
elif bintime > 0:
return bintime
raise ValueError("Bin time cannot be = 0")
@njit(nogil=True, parallel=False)
def _get_bin_edges(a, bins, a_min, a_max):
bin_edges = np.zeros(bins + 1, dtype=np.float64)
delta = (a_max - a_min) / bins
for i in range(bin_edges.size):
bin_edges[i] = a_min + i * delta
bin_edges[-1] = a_max # Avoid roundoff error on last point
return bin_edges
def get_bin_edges(a, bins):
"""
Examples
--------
>>> array = np.array([0., 10.])
>>> bins = 2
>>> np.allclose(get_bin_edges(array, bins), [0, 5, 10])
True
"""
a_min = np.min(a)
a_max = np.max(a)
return _get_bin_edges(a, bins, a_min, a_max)
@njit(nogil=True, parallel=False)
def compute_bin(x, bin_edges):
"""
Examples
--------
>>> bin_edges = np.array([0, 5, 10])
>>> compute_bin(1, bin_edges)
0
>>> compute_bin(5, bin_edges)
1
>>> compute_bin(10, bin_edges)
1
"""
# assuming uniform bins for now
n = bin_edges.shape[0] - 1
a_min = bin_edges[0]
a_max = bin_edges[-1]
# special case to mirror NumPy behavior for last bin
if x == a_max:
return n - 1 # a_max always in last bin
bin = int(n * (x - a_min) / (a_max - a_min))
if bin < 0 or bin >= n:
return None
else:
return bin
@njit(nogil=True, parallel=False)
def _hist1d_numba_seq(H, tracks, bins, ranges):
delta = 1 / ((ranges[1] - ranges[0]) / bins)
for t in range(tracks.size):
i = (tracks[t] - ranges[0]) * delta
if 0 <= i < bins:
H[int(i)] += 1
return H
def hist1d_numba_seq(a, bins, ranges, use_memmap=False, tmp=None):
"""
Examples
--------
>>> if os.path.exists('out.npy'): os.unlink('out.npy')
>>> x = np.random.uniform(0., 1., 100)
>>> H, xedges = np.histogram(x, bins=5, range=[0., 1.])
>>> Hn = hist1d_numba_seq(x, bins=5, ranges=[0., 1.], tmp='out.npy',
... use_memmap=True)
>>> assert np.all(H == Hn)
>>> # The number of bins is small, memory map was not used!
>>> assert not os.path.exists('out.npy')
>>> H, xedges = np.histogram(x, bins=10**8, range=[0., 1.])
>>> Hn = hist1d_numba_seq(x, bins=10**8, ranges=[0., 1.], tmp='out.npy',
... use_memmap=True)
>>> assert np.all(H == Hn)
>>> assert os.path.exists('out.npy')
"""
if bins > 10 ** 7 and use_memmap:
if tmp is None:
tmp = tempfile.NamedTemporaryFile("w+")
hist_arr = np.lib.format.open_memmap(
tmp, mode="w+", dtype=a.dtype, shape=(bins,)
)
else:
hist_arr = np.zeros((bins,), dtype=a.dtype)
return _hist1d_numba_seq(hist_arr, a, bins, np.asarray(ranges))
@njit(nogil=True, parallel=False)
def _hist2d_numba_seq(H, tracks, bins, ranges):
delta = 1 / ((ranges[:, 1] - ranges[:, 0]) / bins)
for t in range(tracks.shape[1]):
i = (tracks[0, t] - ranges[0, 0]) * delta[0]
j = (tracks[1, t] - ranges[1, 0]) * delta[1]
if 0 <= i < bins[0] and 0 <= j < bins[1]:
H[int(i), int(j)] += 1
return H
def hist2d_numba_seq(x, y, bins, ranges):
"""
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> y = np.random.uniform(2., 3., 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 5),
... range=[(0., 1.), (2., 3.)])
>>> Hn = hist2d_numba_seq(x, y, bins=(5, 5),
... ranges=[[0., 1.], [2., 3.]])
>>> assert np.all(H == Hn)
"""
H = np.zeros((bins[0], bins[1]), dtype=np.uint64)
return _hist2d_numba_seq(
H, np.array([x, y]), np.asarray(list(bins)), np.asarray(ranges)
)
@njit(nogil=True, parallel=False)
def _hist3d_numba_seq(H, tracks, bins, ranges):
delta = 1 / ((ranges[:, 1] - ranges[:, 0]) / bins)
for t in range(tracks.shape[1]):
i = (tracks[0, t] - ranges[0, 0]) * delta[0]
j = (tracks[1, t] - ranges[1, 0]) * delta[1]
k = (tracks[2, t] - ranges[2, 0]) * delta[2]
if 0 <= i < bins[0] and 0 <= j < bins[1]:
H[int(i), int(j), int(k)] += 1
return H
def hist3d_numba_seq(tracks, bins, ranges):
"""
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> y = np.random.uniform(2., 3., 100)
>>> z = np.random.uniform(4., 5., 100)
>>> H, _ = np.histogramdd((x, y, z), bins=(5, 6, 7),
... range=[(0., 1.), (2., 3.), (4., 5)])
>>> Hn = hist3d_numba_seq((x, y, z), bins=(5, 6, 7),
... ranges=[[0., 1.], [2., 3.], [4., 5.]])
>>> assert np.all(H == Hn)
"""
H = np.zeros((bins[0], bins[1], bins[2]), dtype=np.uint64)
return _hist3d_numba_seq(
H, np.asarray(tracks), np.asarray(list(bins)), np.asarray(ranges)
)
@njit(nogil=True, parallel=False)
def _hist2d_numba_seq_weight(H, tracks, weights, bins, ranges):
delta = 1 / ((ranges[:, 1] - ranges[:, 0]) / bins)
for t in range(tracks.shape[1]):
i = (tracks[0, t] - ranges[0, 0]) * delta[0]
j = (tracks[1, t] - ranges[1, 0]) * delta[1]
if 0 <= i < bins[0] and 0 <= j < bins[1]:
H[int(i), int(j)] += weights[t]
return H
def hist2d_numba_seq_weight(x, y, weights, bins, ranges):
"""
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> y = np.random.uniform(2., 3., 100)
>>> weight = np.random.uniform(0, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 5),
... range=[(0., 1.), (2., 3.)],
... weights=weight)
>>> Hn = hist2d_numba_seq_weight(x, y, bins=(5, 5),
... ranges=[[0., 1.], [2., 3.]],
... weights=weight)
>>> assert np.all(H == Hn)
"""
H = np.zeros((bins[0], bins[1]), dtype=np.double)
return _hist2d_numba_seq_weight(
H,
np.array([x, y]),
weights,
np.asarray(list(bins)),
np.asarray(ranges),
)
@njit(nogil=True, parallel=False)
def _hist3d_numba_seq_weight(H, tracks, weights, bins, ranges):
delta = 1 / ((ranges[:, 1] - ranges[:, 0]) / bins)
for t in range(tracks.shape[1]):
i = (tracks[0, t] - ranges[0, 0]) * delta[0]
j = (tracks[1, t] - ranges[1, 0]) * delta[1]
k = (tracks[2, t] - ranges[2, 0]) * delta[2]
if 0 <= i < bins[0] and 0 <= j < bins[1]:
H[int(i), int(j), int(k)] += weights[t]
return H
def hist3d_numba_seq_weight(tracks, weights, bins, ranges):
"""
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> y = np.random.uniform(2., 3., 100)
>>> z = np.random.uniform(4., 5., 100)
>>> weights = np.random.uniform(0, 1., 100)
>>> H, _ = np.histogramdd((x, y, z), bins=(5, 6, 7),
... range=[(0., 1.), (2., 3.), (4., 5)],
... weights=weights)
>>> Hn = hist3d_numba_seq_weight(
... (x, y, z), weights, bins=(5, 6, 7),
... ranges=[[0., 1.], [2., 3.], [4., 5.]])
>>> assert np.all(H == Hn)
"""
H = np.zeros((bins[0], bins[1], bins[2]), dtype=np.double)
return _hist3d_numba_seq_weight(
H,
np.asarray(tracks),
weights,
np.asarray(list(bins)),
np.asarray(ranges),
)
@njit(nogil=True, parallel=False)
def index_arr(a, ix_arr):
strides = np.array(a.strides) / a.itemsize
ix = int((ix_arr * strides).sum())
return a.ravel()[ix]
@njit(nogil=True, parallel=False)
def index_set_arr(a, ix_arr, val):
strides = np.array(a.strides) / a.itemsize
ix = int((ix_arr * strides).sum())
a.ravel()[ix] = val
@njit(nogil=True, parallel=False)
def _histnd_numba_seq(H, tracks, bins, ranges, slice_int):
delta = 1 / ((ranges[:, 1] - ranges[:, 0]) / bins)
for t in range(tracks.shape[1]):
slicearr = np.array(
[
(tracks[dim, t] - ranges[dim, 0]) * delta[dim]
for dim in range(tracks.shape[0])
]
)
good = np.all((slicearr < bins) & (slicearr >= 0))
slice_int[:] = slicearr
if good:
curr = index_arr(H, slice_int)
index_set_arr(H, slice_int, curr + 1)
return H
def histnd_numba_seq(tracks, bins, ranges):
"""
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> y = np.random.uniform(2., 3., 100)
>>> z = np.random.uniform(4., 5., 100)
>>> # 2d example
>>> H, _, _ = np.histogram2d(x, y, bins=np.array((5, 5)),
... range=[(0., 1.), (2., 3.)])
>>> alldata = np.array([x, y])
>>> Hn = histnd_numba_seq(alldata, bins=np.array([5, 5]),
... ranges=np.array([[0., 1.], [2., 3.]]))
>>> assert np.all(H == Hn)
>>> # 3d example
>>> H, _ = np.histogramdd((x, y, z), bins=np.array((5, 6, 7)),
... range=[(0., 1.), (2., 3.), (4., 5)])
>>> alldata = np.array([x, y, z])
>>> Hn = hist3d_numba_seq(alldata, bins=np.array((5, 6, 7)),
... ranges=np.array([[0., 1.], [2., 3.], [4., 5.]]))
>>> assert np.all(H == Hn)
"""
H = np.zeros(tuple(bins), dtype=np.uint64)
slice_int = np.zeros(len(bins), dtype=np.uint64)
return _histnd_numba_seq(H, tracks, bins, ranges, slice_int)
if HAS_NUMBA:
def histogram2d(*args, **kwargs):
if "range" in kwargs:
kwargs["ranges"] = kwargs.pop("range")
return hist2d_numba_seq(*args, **kwargs)
def histogram(*args, **kwargs):
if "range" in kwargs:
kwargs["ranges"] = kwargs.pop("range")
return hist1d_numba_seq(*args, **kwargs)
else:
def histogram2d(*args, **kwargs):
return histogram2d_np(*args, **kwargs)[0]
def histogram(*args, **kwargs):
return histogram_np(*args, **kwargs)[0]
def touch(fname):
"""Mimick the same shell command.
Examples
--------
>>> touch('bububu')
>>> os.path.exists('bububu')
True
>>> os.unlink('bububu')
"""
Path(fname).touch()
def log_x(a, base):
# Logb x = Loga x/Loga b
return np.log(a) / np.log(base)
def get_list_of_small_powers(maxno=100000000000):
powers_of_two = 2 ** np.arange(0, np.ceil(np.log2(maxno)))
powers_of_three = 3 ** np.arange(0, np.ceil(log_x(maxno, 3)))
powers_of_five = 5 ** np.arange(0, np.ceil(log_x(maxno, 5)))
list_of_powers = []
for p2 in powers_of_two:
for p3 in powers_of_three:
for p5 in powers_of_five:
newno = p2 | |
Zebra
🦌 Deer
🐮 Cow Face
🐂 Ox
🐃 Water Buffalo
🐄 Cow
🐷 Pig Face
🐖 Pig
🐗 Boar
🐽 Pig Nose
🐏 Ram
🐑 Ewe
🐐 Goat
🐪 Camel
🐫 Two-Hump Camel
🦙 Llama
🦒 Giraffe
🐘 Elephant
🦏 Rhinoceros
🦛 Hippopotamus
🐭 Mouse Face
🐁 Mouse
🐀 Rat
🐹 Hamster Face
🐰 Rabbit Face
🐇 Rabbit
🐿️ Chipmunk
🦔 Hedgehog
🦇 Bat
🐻 Bear Face
🐨 Koala
🐼 Panda Face
🦘 Kangaroo
🦡 Badger
🐾 Paw Prints
🦃 Turkey
🐔 Chicken
🐓 Rooster
🐣 Hatching Chick
🐤 Baby Chick
🐥 Front-Facing Baby Chick
🐦 Bird
🐧 Penguin
🕊️ Dove
🦅 Eagle
🦆 Duck
🦢 Swan
🦉 Owl
🦚 Peacock
🦜 Parrot
🐸 Frog Face
🐊 Crocodile
🐢 Turtle
🦎 Lizard
🐍 Snake
🐲 Dragon Face
🐉 Dragon
🦕 Sauropod
🦖 T-Rex
🐳 Spouting Whale
🐋 Whale
🐬 Dolphin
🐟 Fish
🐠 Tropical Fish
🐡 Blowfish
🦈 Shark
🐙 Octopus
🐚 Spiral Shell
🦀 Crab
🦞 Lobster
🦐 Shrimp
🦑 Squid
🐌 Snail
🦋 Butterfly
🐛 Bug
🐜 Ant
🐝 Honeybee
🐞 Lady Beetle
🦗 Cricket
🕷️ Spider
🕸️ Spider Web
🦂 Scorpion
🦟 Mosquito
🦠 Microbe
💐 Bouquet
🌸 Cherry Blossom
💮 White Flower
🏵️ Rosette
🌹 Rose
🥀 Wilted Flower
🌺 Hibiscus
🌻 Sunflower
🌼 Blossom
🌷 Tulip
🌱 Seedling
🌲 Evergreen Tree
🌳 Deciduous Tree
🌴 Palm Tree
🌵 Cactus
🌾 Sheaf of Rice
🌿 Herb
☘️ Shamrock
🍀 Four Leaf Clover
🍁 Maple Leaf
🍂 Fallen Leaf
🍃 Leaf Fluttering in Wind
🍇 Grapes
🍈 Melon
🍉 Watermelon
🍊 Tangerine
🍋 Lemon
🍌 Banana
🍍 Pineapple
🥭 Mango
🍎 Red Apple
🍏 Green Apple
🍐 Pear
🍑 Peach
🍒 Cherries
🍓 Strawberry
🥝 Kiwi Fruit
🍅 Tomato
🥥 Coconut
🥑 Avocado
🍆 Eggplant
🥔 Potato
🥕 Carrot
🌽 Ear of Corn
🌶️ Hot Pepper
🥒 Cucumber
🥬 Leafy Green
🥦 Broccoli
🍄 Mushroom
🥜 Peanuts
🌰 Chestnut
🍞 Bread
🥐 Croissant
🥖 Baguette Bread
🥨 Pretzel
🥯 Bagel
🥞 Pancakes
🧀 Cheese Wedge
🍖 Meat on Bone
🍗 Poultry Leg
🥩 Cut of Meat
🥓 Bacon
🍔 Hamburger
🍟 French Fries
🍕 Pizza
🌭 Hot Dog
🥪 Sandwich
🌮 Taco
🌯 Burrito
🥙 Stuffed Flatbread
🥚 Egg
🍳 Cooking
🥘 Shallow Pan of Food
🍲 Pot of Food
🥣 Bowl With Spoon
🥗 Green Salad
🍿 Popcorn
🧂 Salt
🥫 Canned Food
🍱 Bento Box
🍘 Rice Cracker
🍙 Rice Ball
🍚 Cooked Rice
🍛 Curry Rice
🍜 Steaming Bowl
🍝 Spaghetti
🍠 Roasted Sweet Potato
🍢 Oden
🍣 Sushi
🍤 Fried Shrimp
🍥 Fish Cake With Swirl
🥮 Moon Cake
🍡 Dango
🥟 Dumpling
🥠 Fortune Cookie
🥡 Takeout Box
🍦 Soft Ice Cream
🍧 Shaved Ice
🍨 Ice Cream
🍩 Doughnut
🍪 Cookie
🎂 Birthday Cake
🍰 Shortcake
🧁 Cupcake
🥧 Pie
🍫 Chocolate Bar
🍬 Candy
🍭 Lollipop
🍮 Custard
🍯 Honey Pot
🍼 Baby Bottle
🥛 Glass of Milk
☕ Hot Beverage
🍵 Teacup Without Handle
🍶 Sake
🍾 Bottle With Popping Cork
🍷 Wine Glass
🍸 Cocktail Glass
🍹 Tropical Drink
🍺 Beer Mug
🍻 Clinking Beer Mugs
🥂 Clinking Glasses
🥃 Tumbler Glass
🥤 Cup With Straw
🥢 Chopsticks
🍽️ Fork and Knife With Plate
🍴 Fork and Knife
🥄 Spoon
🔪 Kitchen Knife
🏺 Amphora
🌍 Globe Showing Europe-Africa
🌎 Globe Showing Americas
🌏 Globe Showing Asia-Australia
🌐 Globe With Meridians
🗺️ World Map
🗾 Map of Japan
🧭 Compass
🏔️ Snow-Capped Mountain
⛰️ Mountain
🌋 Volcano
🗻 Mount Fuji
🏕️ Camping
🏖️ Beach With Umbrella
🏜️ Desert
🏝️ Desert Island
🏞️ National Park
🏟️ Stadium
🏛️ Classical Building
🏗️ Building Construction
🏘️ Houses
🏚️ Derelict House
🏠 House
🏡 House With Garden
🧱 Brick
🏢 Office Building
🏤 Post Office
🏥 Hospital
🏦 Bank
🏨 Hotel
🏩 Love Hotel
🏪 Convenience Store
🏫 School
🏬 Department Store
🏭 Factory
🏰 Castle
💒 Wedding
🗼 Tokyo Tower
🗽 Statue of Liberty
⛪ Church
🕌 Mosque
🕍 Synagogue
⛩️ Shinto Shrine
🕋 Kaaba
⛲ Fountain
⛺ Tent
🌁 Foggy
🌃 Night With Stars
🏙️ Cityscape
🌄 Sunrise Over Mountains
🌅 Sunrise
🌆 Cityscape at Dusk
🌇 Sunset
🌉 Bridge at Night
♨️ Hot Springs
🌌 Milky Way
🎠 Carousel Horse
🎡 Ferris Wheel
🎢 Roller Coaster
💈 Barber Pole
🎪 Circus Tent
🚂 Locomotive
🚃 Railway Car
🚄 High-Speed Train
🚅 Bullet Train
🚆 Train
🚇 Metro
🚈 Light Rail
🚉 Station
🚊 Tram
🚝 Monorail
🚞 Mountain Railway
🚋 Tram Car
🚌 Bus
🚍 Oncoming Bus
🚎 Trolleybus
🚐 Minibus
🚑 Ambulance
🚒 Fire Engine
🚓 Police Car
🚔 Oncoming Police Car
🚕 Taxi
🚖 Oncoming Taxi
🚗 Automobile
🚘 Oncoming Automobile
🚙 Sport Utility Vehicle
🚚 Delivery Truck
🚛 Articulated Lorry
🚜 Tractor
🚲 Bicycle
🛴 Kick Scooter
🛹 Skateboard
🛵 Motor Scooter
🚏 Bus Stop
🛣️ Motorway
🛤️ Railway Track
🛢️ Oil Drum
⛽ Fuel Pump
🚨 Police Car Light
🚥 Horizontal Traffic Light
🚦 Vertical Traffic Light
🛑 Stop Sign
🚧 Construction
⚓ Anchor
⛵ Sailboat
🛶 Canoe
🚤 Speedboat
🛳️ Passenger Ship
⛴️ Ferry
🛥️ Motor Boat
🚢 Ship
✈️ Airplane
🛩️ Small Airplane
🛫 Airplane Departure
🛬 Airplane Arrival
💺 Seat
🚁 Helicopter
🚟 Suspension Railway
🚠 Mountain Cableway
🚡 Aerial Tramway
🛰️ Satellite
🚀 Rocket
🛸 Flying Saucer
🛎️ Bellhop Bell
🧳 Luggage
⌛ Hourglass Done
⏳ Hourglass Not Done
⌚ Watch
⏰ Alarm Clock
⏱️ Stopwatch
⏲️ Timer Clock
🕰️ Mantelpiece Clock
🕛 Twelve O’clock
🕧 Twelve-Thirty
🕐 One O’clock
🕜 One-Thirty
🕑 Two O’clock
🕝 Two-Thirty
🕒 Three O’clock
🕞 Three-Thirty
🕓 Four O’clock
🕟 Four-Thirty
🕔 Five O’clock
🕠 Five-Thirty
🕕 Six O’clock
🕡 Six-Thirty
🕖 Seven O’clock
🕢 Seven-Thirty
🕗 Eight O’clock
🕣 Eight-Thirty
🕘 Nine O’clock
🕤 Nine-Thirty
🕙 Ten O’clock
🕥 Ten-Thirty
🕚 Eleven O’clock
🕦 Eleven-Thirty
🌑 New Moon
🌒 Waxing Crescent Moon
🌓 First Quarter Moon
🌔 Waxing Gibbous Moon
🌕 Full Moon
🌖 Waning Gibbous Moon
🌗 Last Quarter Moon
🌘 Waning Crescent Moon
🌙 Crescent Moon
🌚 New Moon Face
🌛 First Quarter Moon Face
🌜 Last Quarter Moon Face
🌡️ Thermometer
☀️ Sun
🌝 Full Moon Face
🌞 Sun With Face
⭐ White Medium Star
🌟 Glowing Star
🌠 Shooting Star
☁️ Cloud
⛅ Sun Behind Cloud
⛈️ Cloud With Lightning and Rain
🌤️ Sun Behind Small Cloud
🌥️ Sun Behind Large Cloud
🌦️ Sun Behind Rain Cloud
🌧️ Cloud With Rain
🌨️ Cloud With Snow
🌩️ Cloud With Lightning
🌪️ Tornado
🌫️ Fog
🌬️ Wind Face
🌀 Cyclone
🌈 Rainbow
🌂 Closed Umbrella
☂️ Umbrella
☔ Umbrella With Rain Drops
⛱️ Umbrella on Ground
⚡ High Voltage
❄️ Snowflake
☃️ Snowman
⛄ Snowman Without Snow
☄️ Comet
🔥 Fire
💧 Droplet
🌊 Water Wave
🎃 Jack-O-Lantern
🎄 Christmas Tree
🎆 Fireworks
🎇 Sparkler
🧨 Firecracker
✨ Sparkles
🎈 Balloon
🎉 Party Popper
🎊 Confetti Ball
🎋 Tanabata Tree
🎍 Pine Decoration
🎏 Carp Streamer
🎐 Wind Chime
🎑 Moon Viewing Ceremony
🧧 Red Gift Envelope
🎀 Ribbon
🎁 Wrapped Gift
🎗️ Reminder Ribbon
🎟️ Admission Tickets
🎫 Ticket
🎖️ Military Medal
🏆 Trophy
🏅 Sports Medal
🥇 1st Place Medal
🥈 2nd Place Medal
🥉 3rd Place Medal
⚽ Soccer Ball
⚾ Baseball
🥎 Softball
🏀 Basketball
🏐 Volleyball
🏈 American Football
🏉 Rugby Football
🎾 Tennis
🥏 Flying Disc
🎳 Bowling
🏏 Cricket Game
🏑 Field Hockey
🏒 Ice Hockey
🥍 Lacrosse
🏓 Ping Pong
🏸 Badminton
🥊 Boxing Glove
🥋 Martial Arts Uniform
🥅 Goal Net
⛳ Flag in Hole
⛸️ Ice Skate
🎣 Fishing Pole
🎽 Running Shirt
🎿 Skis
🛷 Sled
🥌 Curling Stone
🎯 Direct Hit
🎱 Pool 8 Ball
🔮 Crystal Ball
🧿 Nazar Amulet
🎮 Video Game
🕹️ Joystick
🎰 Slot Machine
🎲 Game Die
🧩 Jigsaw
🧸 Teddy Bear
♠️ Spade Suit
♥️ Heart Suit
♦️ Diamond Suit
♣️ Club Suit
♟️ Chess Pawn
🃏 Joker
🀄 Mahjong Red Dragon
🎴 Flower Playing Cards
🎭 Performing Arts
🖼️ Framed Picture
🎨 Artist Palette
🔇 Muted Speaker
🔈 Speaker Low Volume
🔉 Speaker Medium Volume
🔊 Speaker High Volume
📢 Loudspeaker
📣 Megaphone
📯 Postal Horn
🔔 Bell
🔕 Bell With Slash
🎼 Musical Score
🎵 Musical Note
🎶 Musical Notes
🎙️ Studio Microphone
🎚️ Level Slider
🎛️ Control Knobs
🎤 Microphone
🎧 Headphone
📻 Radio
🎷 Saxophone
🎸 Guitar
🎹 Musical Keyboard
🎺 Trumpet
🎻 Violin
🥁 Drum
📱 Mobile Phone
📲 Mobile Phone With Arrow
☎️ Telephone
📞 Telephone Receiver
📟 Pager
📠 Fax Machine
🔋 Battery
🔌 Electric Plug
💻 Laptop Computer
🖥️ Desktop Computer
🖨️ Printer
⌨️ Keyboard
🖱️ Computer Mouse
🖲️ Trackball
💽 Computer Disk
💾 Floppy Disk
💿 Optical Disk
📀 DVD
🧮 Abacus
🎥 Movie Camera
🎞️ Film Frames
📽️ Film Projector
🎬 Clapper Board
📺 Television
📷 Camera
📸 Camera With Flash
📹 Video Camera
📼 Videocassette
🔍 Magnifying Glass Tilted Left
🔎 Magnifying Glass Tilted Right
🕯️ Candle
💡 Light Bulb
🔦 Flashlight
🏮 Red Paper Lantern
📔 Notebook With Decorative Cover
📕 Closed Book
📖 Open Book
📗 Green Book
📘 Blue Book
📙 Orange Book
📚 Books
📓 Notebook
📒 Ledger
📃 Page With Curl
📜 Scroll
📄 Page Facing Up
📰 Newspaper
🗞️ Rolled-Up Newspaper
📑 Bookmark Tabs
🔖 Bookmark
🏷️ Label
💰 Money Bag
💴 Yen Banknote
💵 Dollar Banknote
💶 Euro Banknote
💷 Pound Banknote
💸 Money With Wings
💳 Credit Card
🧾 Receipt
💹 Chart Increasing With Yen
💱 Currency Exchange
💲 Heavy Dollar Sign
✉️ Envelope
📧 E-Mail
📨 Incoming Envelope
📩 Envelope With Arrow
📤 Outbox Tray
📥 Inbox Tray
📦 Package
📫 Closed Mailbox With Raised Flag
📪 Closed Mailbox With Lowered Flag
📬 Open Mailbox With Raised Flag
📭 Open Mailbox With Lowered Flag
📮 Postbox
🗳️ Ballot Box With Ballot
✏️ Pencil
✒️ Black Nib
🖋️ Fountain Pen
🖊️ Pen
🖌️ Paintbrush
🖍️ Crayon
📝 Memo
💼 Briefcase
📁 File Folder
📂 Open File Folder
🗂️ Card Index Dividers
📅 Calendar
📆 Tear-Off Calendar
🗒️ Spiral Notepad
🗓️ Spiral Calendar
📇 Card Index
📈 Chart Increasing
📉 Chart Decreasing
📊 Bar Chart
📋 Clipboard
📌 Pushpin
📍 Round Pushpin
📎 Paperclip
🖇️ Linked Paperclips
📏 Straight Ruler
📐 Triangular Ruler
✂️ Scissors
🗃️ Card File Box
🗄️ File Cabinet
🗑️ Wastebasket
🔒 Locked
🔓 Unlocked
🔏 Locked With Pen
🔐 Locked With Key
🔑 Key
🗝️ Old Key
🔨 Hammer
⛏️ Pick
⚒️ Hammer and Pick
🛠️ Hammer and Wrench
🗡️ Dagger
⚔️ Crossed Swords
🔫 Pistol
🏹 Bow and Arrow
🛡️ Shield
🔧 Wrench
🔩 Nut and Bolt
⚙️ Gear
🗜️ Clamp
⚖️ Balance Scale
🔗 Link
⛓️ Chains
🧰 Toolbox
🧲 Magnet
⚗️ Alembic
🧪 Test Tube
🧫 Petri Dish
🧬 DNA
🧯 Fire Extinguisher
🔬 Microscope
🔭 Telescope
📡 Satellite Antenna
💉 Syringe
💊 Pill
🚪 Door
🛏️ Bed
🛋️ Couch and Lamp
🚽 Toilet
🚿 Shower
🛁 Bathtub
🧴 Lotion Bottle
🧵 Thread
🧶 Yarn
🧷 Safety Pin
🧹 Broom
🧺 Basket
🧻 Roll of Toilet Paper
🧼 Soap
🧽 Sponge
🛒 Shopping Cart
🚬 Cigarette
⚰️ Coffin
⚱️ Funeral Urn
🗿 Moai
🏧 Atm Sign
🚮 Litter in Bin Sign
🚰 Potable Water
♿ Wheelchair Symbol
🚹 Men’s Room
🚺 Women’s Room
🚻 Restroom
🚼 Baby Symbol
🚾 Water Closet
🛂 Passport Control
🛃 Customs
🛄 Baggage Claim
🛅 Left Luggage
⚠️ Warning
🚸 Children Crossing
⛔ No Entry
🚫 Prohibited
🚳 No Bicycles
🚭 No Smoking
🚯 No Littering
🚱 Non-Potable Water
🚷 No Pedestrians
📵 No Mobile Phones
🔞 No One Under Eighteen
☢️ Radioactive
☣️ Biohazard
⬆️ Up Arrow
↗️ Up-Right Arrow
➡️ Right Arrow
↘️ Down-Right Arrow
⬇️ Down Arrow
↙️ Down-Left Arrow
⬅️ Left Arrow
↖️ Up-Left Arrow
↕️ Up-Down Arrow
↔️ Left-Right Arrow
↩️ Right Arrow Curving Left
↪️ Left Arrow Curving Right
⤴️ Right Arrow Curving Up
⤵️ Right Arrow Curving Down
🔃 Clockwise Vertical Arrows
🔄 Counterclockwise Arrows Button
🔙 Back Arrow
🔚 End Arrow
🔛 On! Arrow
🔜 Soon Arrow
🔝 Top Arrow
🛐 Place of Worship
⚛️ Atom Symbol
♾️ Infinity
🕉️ Om
✡️ Star of David
☸️ Wheel of Dharma
☯️ Yin Yang
✝️ Latin Cross
☦️ Orthodox Cross
☪️ Star and | |
<filename>final_code/packages/cct.py
"""
CCT 建模优化代码
CCT
作者:赵润晓
日期:2021年5月1日
"""
import multiprocessing # since v0.1.1 多线程计算
import time # since v0.1.1 统计计算时长
from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union
import matplotlib.pyplot as plt
import math
import random # since v0.1.1 随机数
import sys
import os # since v0.1.1 查看CPU核心数
import numpy
from scipy.integrate import solve_ivp # since v0.1.1 ODE45
import warnings # since v0.1.1 提醒方法过时
from packages.point import *
from packages.constants import *
from packages.base_utils import BaseUtils
from packages.local_coordinate_system import LocalCoordinateSystem
from packages.line2s import *
from packages.line3s import *
from packages.trajectory import Trajectory
from packages.particles import *
from packages.magnets import *
class CCT(Magnet, ApertureObject):
"""
表示一层弯曲 CCT 线圈
"""
def __init__(
self,
# CCT 局部坐标系
local_coordinate_system: LocalCoordinateSystem,
# 大半径:偏转半径
big_r: float,
# 小半径(孔径/2)
small_r: float,
# 偏转角度,即 phi0*winding_number,典型值 67.5
bending_angle: float, # 必须为正
# 各极倾斜角,典型值 [30,90,90,90]
tilt_angles: List[float],
# 匝数
winding_number: int,
# 电流
current: float,
# CCT 路径在二维 ξ-φ 坐标系中的起点
starting_point_in_ksi_phi_coordinate: P2,
# CCT 路径在二维 ξ-φ 坐标系中的终点
end_point_in_ksi_phi_coordinate: P2,
# 每匝线圈离散电流元数目,数字越大计算精度越高
disperse_number_per_winding: int = 120,
):
"""
bending_angle 这个定义有冗余,它等于
abs(end_point_in_ksi_phi_coordinate.y-starting_point_in_ksi_phi_coordinate.y).to_angle()
"""
if bending_angle < 0:
raise ValueError(f"CCT 偏转角度应为正数,不能是 {bending_angle},需要反向偏转的 CCT," +
"应通过 starting_point_in_ksi_phi_coordinate,和 end_point_in_ksi_phi_coordinate 控制偏转方向"
)
if big_r < 0:
raise ValueError(f"big_r = {big_r} 为负数,非法")
if small_r < 0:
raise ValueError(f"small_r = {small_r} 为负数,非法")
if small_r >= big_r:
raise ValueError(f"small_r {small_r} >= big_r {big_r},非法")
self.local_coordinate_system = local_coordinate_system
self.big_r = float(big_r)
self.small_r = float(small_r)
self.bending_angle = float(bending_angle)
self.tilt_angles = [float(e) for e in tilt_angles]
self.winding_number = int(winding_number)
self.current = float(current)
self.starting_point_in_ksi_phi_coordinate = starting_point_in_ksi_phi_coordinate
self.end_point_in_ksi_phi_coordinate = end_point_in_ksi_phi_coordinate
self.disperse_number_per_winding = int(disperse_number_per_winding)
# 弯转角度,弧度制
self.bending_radian = BaseUtils.angle_to_radian(self.bending_angle)
# 倾斜角,弧度制
self.tilt_radians = BaseUtils.angle_to_radian(self.tilt_angles)
# 每绕制一匝,φ 方向前进长度
self.phi0 = self.bending_radian / self.winding_number
# 极点 a
self.a = math.sqrt(self.big_r ** 2 - self.small_r ** 2)
# 双极坐标系另一个常量 η
self.eta = 0.5 * \
math.log((self.big_r + self.a) / (self.big_r - self.a))
# 建立 ξ-φ 坐标到三维 xyz 坐标的转换器
self.bipolar_toroidal_coordinate_system = CCT.BipolarToroidalCoordinateSystem(
self.a, self.eta, self.big_r, self.small_r
)
# CCT 路径的在 ξ-φ 坐标的表示 函数 φ(ξ)
def phi_ksi_function(ksi): return self.phi_ksi_function(ksi)
# CCT 路径的在 ξ-φ 坐标的表示 函数 P(ξ)=(ξ,φ(ξ))
def p2_function(ksi): return P2(ksi, phi_ksi_function(ksi))
# CCT 路径的在 xyz 坐标的表示 函数 P(ξ)=P(x(ξ),y(ξ),z(ξ))
def p3_function(ksi): return self.bipolar_toroidal_coordinate_system.convert(
p2_function(ksi)
)
# self.phi_ksi_function = phi_ksi_function
# self.p2_function = p2_function
# self.p3_function = p3_function
# 总分段数目 / 电流元数目
self.total_disperse_number = self.winding_number * self.disperse_number_per_winding
dispersed_path2: List[List[float]] = [
p2_function(ksi).to_list()
for ksi in BaseUtils.linspace(
self.starting_point_in_ksi_phi_coordinate.x,
self.end_point_in_ksi_phi_coordinate.x,
self.total_disperse_number + 1,
) # +1 为了满足分段正确性,即匝数 m,需要用 m+1 个点
]
self.dispersed_path3_points: List[P3] = [
p3_function(ksi)
for ksi in BaseUtils.linspace(
self.starting_point_in_ksi_phi_coordinate.x,
self.end_point_in_ksi_phi_coordinate.x,
self.total_disperse_number + 1,
) # +1 为了满足分段正确性,见上
]
dispersed_path3: List[List[float]] = [
p.to_list() for p in self.dispersed_path3_points
]
# 为了速度,转为 numpy
self.dispersed_path2: numpy.ndarray = numpy.array(dispersed_path2)
self.dispersed_path3: numpy.ndarray = numpy.array(dispersed_path3)
# 电流元 (miu0/4pi) * current * (p[i+1] - p[i])
# refactor v0.1.1
# 语法分析:示例
# a = array([1, 2, 3, 4])
# a[1:] = array([2, 3, 4])
# a[:-1] = array([1, 2, 3])
self.elementary_current = 1e-7 * current * (
self.dispersed_path3[1:] - self.dispersed_path3[:-1]
)
# 电流元的位置 (p[i+1]+p[i])/2
self.elementary_current_position = 0.5 * (
self.dispersed_path3[1:] + self.dispersed_path3[:-1]
)
def phi_ksi_function(self, ksi: float) -> float:
"""
完成 ξ 到 φ 的映射
"""
x1 = self.starting_point_in_ksi_phi_coordinate.x
y1 = self.starting_point_in_ksi_phi_coordinate.y
x2 = self.end_point_in_ksi_phi_coordinate.x
y2 = self.end_point_in_ksi_phi_coordinate.y
k = (y2 - y1) / (x2 - x1)
b = -k * x1 + y1
phi = k * ksi + b
for i in range(len(self.tilt_radians)):
if BaseUtils.equal(self.tilt_angles[i], 90.0):
continue
else:
phi += (
(1 / math.tan(self.tilt_radians[i]))
/ ((i + 1) * math.sinh(self.eta))
* math.sin((i + 1) * ksi)
)
return phi
class BipolarToroidalCoordinateSystem:
"""
双极点坐标系
"""
def __init__(self, a: float, eta: float, big_r: float, small_r: float):
self.a = a
self.eta = eta
self.big_r = big_r
self.small_r = small_r
BaseUtils.equal(
big_r,
math.sqrt(a * a / (1 - 1 / math.pow(math.cosh(eta), 2))),
msg=f"BipolarToroidalCoordinateSystem:init 错误1 a({a})eta({eta})R({big_r})r({small_r})",
)
BaseUtils.equal(
small_r,
big_r / math.cosh(eta),
msg=f"BipolarToroidalCoordinateSystem:init 错误2 a({a})eta({eta})R({big_r})r({small_r})",
)
def convert(self, p: P2) -> P3:
"""
将二维坐标 (ξ,φ) 转为三维坐标 (x,y,z)
"""
ksi = p.x
phi = p.y
temp = self.a / (math.cosh(self.eta) - math.cos(ksi))
return P3(
temp * math.sinh(self.eta) * math.cos(phi),
temp * math.sinh(self.eta) * math.sin(phi),
temp * math.sin(ksi),
)
def main_normal_direction_at(self, p: P2) -> P3:
"""
返回二维坐标 (ξ,φ) 映射到的三维坐标 (x,y,z) 点,
它在圆环面上的法向量
即返回值 P3 在这点 (x,y,z) 垂直于圆环面
注意:已正则归一化
"""
phi = p.y
center = P3(self.big_r * math.cos(phi),
self.big_r * math.sin(phi), 0)
face_point = self.convert(p)
return (face_point - center).normalize()
def __str__(self):
return f"BipolarToroidalCoordinateSystem a({self.a})eta({self.eta})R({self.big_r})r({self.small_r})"
def __repr__(self) -> str:
return self.__str__()
def magnetic_field_at(self, point: P3) -> P3:
"""
计算 CCT 在全局坐标系点 P3 参数的磁场
为了计算效率,使用 numpy
"""
if BaseUtils.equal(self.current, 0, err=1e-6):
return P3.zeros()
# point 转为局部坐标,并变成 numpy 向量
p = numpy.array(
self.local_coordinate_system.point_to_local_coordinate(
point).to_list()
)
# 点 p 到电流元中点
r = p - self.elementary_current_position
# 点 p 到电流元中点的距离的三次方
rr = (numpy.linalg.norm(r, ord=2, axis=1)
** (-3)).reshape((r.shape[0], 1))
# 计算每个电流元在 p 点产生的磁场 (此时还没有乘系数 μ0/4π )
dB = numpy.cross(self.elementary_current, r) * rr
# 求和,即得到磁场,
# (不用乘乘以系数 μ0/4π = 1e-7)
# refactor v0.1.1
B = numpy.sum(dB, axis=0)
# 转回 P3
B_P3: P3 = P3.from_numpy_ndarry(B)
# 从局部坐标转回全局坐标
B_P3: P3 = self.local_coordinate_system.vector_to_global_coordinate(
B_P3)
return B_P3
# from ApertureObject
def is_out_of_aperture(self, point: P3) -> bool:
"""
判断点 point 是在 CCT 的孔径内还是孔径外
只有当粒子轴向投影在元件内部时,才会进行判断,
否则即时粒子距离轴线很远,也认为粒子没有超出孔径,
这是因为粒子不在元件内时,很可能处于另一个大孔径元件中,这样会造成误判。
point 为全局坐标系点
"""
# 转为局部坐标
local_point = self.local_coordinate_system.point_to_local_coordinate(
point)
local_point_p2 = local_point.to_p2()
# 查看偏转方向
clockwise = self.end_point_in_ksi_phi_coordinate.y < 0
# 映射到 cct 所在圆环轴上
phi = local_point_p2.angle_to_x_axis()
# 查看是否在 cct 轴上
if clockwise:
# phi 应大于 2pi-bending_radian 小于 2pi
if phi > (2 * math.pi - self.bending_radian):
return (
abs(local_point.z) > self.small_r
or local_point_p2.length() > (self.big_r + self.small_r)
or local_point_p2.length() < (self.big_r - self.small_r)
)
else:
return False
else:
if phi < self.bending_radian:
return (
abs(local_point.z) > self.small_r
or local_point_p2.length() > (self.big_r + self.small_r)
or local_point_p2.length() < (self.big_r - self.small_r)
)
else:
return False
def __str__(self):
return (
f"CCT: local_coordinate_system({self.local_coordinate_system})big_r({self.big_r})small_r({self.small_r})"
+ f"bending_angle({self.bending_angle})tilt_angles({self.tilt_angles})winding_number({self.winding_number})"
+ f"current({self.current})starting_point_in_ksi_phi_coordinate({self.starting_point_in_ksi_phi_coordinate})"
+ f"end_point_in_ksi_phi_coordinate({self.end_point_in_ksi_phi_coordinate})"
+ f"disperse_number_per_winding({self.disperse_number_per_winding})"
)
def __repr__(self) -> str:
return self.__str__()
@staticmethod
def create_cct_along(
# 设计轨道
trajectory: Line2,
# 设计轨道上该 CCT 起点
s: float,
# 大半径:偏转半径
big_r: float,
# 小半径(孔径/2)
small_r: float,
# 偏转角度,即 phi0*winding_number,典型值 67.5
bending_angle: float,
# 各极倾斜角,典型值 [30,90,90,90]
tilt_angles: List[float],
# 匝数
winding_number: int,
# 电流
current: float,
# CCT 路径在二维 ξ-φ 坐标系中的起点
starting_point_in_ksi_phi_coordinate: P2,
# CCT 路径在二维 ξ-φ 坐标系中的终点
end_point_in_ksi_phi_coordinate: P2,
# 每匝线圈离散电流元数目,数字越大计算精度越高
disperse_number_per_winding: int = 120,
) -> "CCT":
"""
按照设计轨迹 trajectory 上 s 位置处创建 CCT
"""
start_point: P2 = trajectory.point_at(s)
arc_length: float = big_r * BaseUtils.angle_to_radian(bending_angle)
end_point: P2 = trajectory.point_at(
s + arc_length) # 2021年1月15日 bug fixed
midpoint0: P2 = trajectory.point_at(s + arc_length / 3 * 1)
midpoint1: P2 = trajectory.point_at(s + arc_length / 3 * 2)
c1, r1 = BaseUtils.circle_center_and_radius(
start_point, midpoint0, midpoint1)
c2, r2 = BaseUtils.circle_center_and_radius(
midpoint0, midpoint1, end_point)
BaseUtils.equal(
c1, c2, msg=f"构建 CCT 存在异常,通过设计轨道判断 CCT 圆心不一致,c1{c1},c2{c2}")
BaseUtils.equal(
r1, r2, msg=f"构建 CCT 存在异常,通过设计轨道判断 CCT 半径不一致,r1{r1},r2{r2}")
center: P2 = (c1 + c2) * 0.5
start_direct: P2 = trajectory.direct_at(s)
pos: int = StraightLine2(
# position_of 求点 p 相对于直线段的方位
# 返回值:
# 1 在右侧 # -1 在左侧 # 0 在直线段所在直线上
1.0, start_direct, start_point).position_of(center)
lcs = None
if pos == 0:
raise ValueError(f"错误:圆心{center}在设计轨道{trajectory}上")
elif pos == 1: # center 在 (start_direct, start_point) 右侧,顺时针
lcs = LocalCoordinateSystem.create_by_y_and_z_direction(
location=center.to_p3(),
y_direction=-start_direct.to_p3(), # diff
z_direction=P3.z_direct(),
)
# pos = -1 # center 在 (start_direct, start_point) 左侧,逆时针时针
else:
lcs = LocalCoordinateSystem.create_by_y_and_z_direction(
location=center.to_p3(),
y_direction=start_direct.to_p3(), # diff
z_direction=P3.z_direct(),
)
return CCT(
local_coordinate_system=lcs,
big_r=big_r,
small_r=small_r,
bending_angle=bending_angle,
tilt_angles=tilt_angles,
winding_number=winding_number,
current=current,
starting_point_in_ksi_phi_coordinate=starting_point_in_ksi_phi_coordinate,
end_point_in_ksi_phi_coordinate=end_point_in_ksi_phi_coordinate,
disperse_number_per_winding=disperse_number_per_winding,
)
def global_path3(self) -> List[P3]:
"""
获取 CCT 路径点,以全局坐标系的形式
主要目的是为了 CUDA 计算
since v0.1.1
"""
return [
self.local_coordinate_system.point_to_global_coordinate(p)
for p in self.dispersed_path3_points
]
def global_current_elements_and_elementary_current_positions(self, numpy_dtype=numpy.float64) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
获取全局坐标系下的
电流元 (miu0/4pi) * current * (p[i+1] - p[i])
和
电流元的位置 (p[i+1]+p[i])/2
主要目的是为了 CUDA 计算
since v0.1.1
"""
global_path3: List[P3] = self.global_path3()
global_path3_numpy_array = numpy.array(
[p.to_list() for p in global_path3], dtype=numpy_dtype)
global_current_elements = 1e-7 * self.current * \
(global_path3_numpy_array[1:] - global_path3_numpy_array[:-1])
global_elementary_current_positions = 0.5 * \
(global_path3_numpy_array[1:] + global_path3_numpy_array[:-1])
return (
global_current_elements.flatten(),
global_elementary_current_positions.flatten()
)
def p2_function(self, ksi) -> P2:
"""
二维坐标系点 (ksi, phi)
since v0.1.1
"""
return P2(ksi, self.phi_ksi_function(ksi))
def p3_function(self, ksi) -> P3:
"""
局部坐标系下路径方程
since v0.1.1
"""
return self.bipolar_toroidal_coordinate_system.convert(self.p2_function(ksi))
def conductor_length(self, line_number: int = 2*7, disperse_number_per_winding: int = 360) -> float:
"""
计算导线长度
line_number 导线数目
since v0.1.1
"""
ksi0 = self.starting_point_in_ksi_phi_coordinate.x
| |
7 required positional arguments: 'd', 'e', 'f', 'g', 'h', 'i', and 'j'", lambda:f(1, 2, 3))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a, b=2): pass
if is_cli:
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", f)
self.assertRaisesMessage(TypeError, "f() takes at most 2 arguments (3 given)", f, 1, 2, 3)
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", f)
self.assertRaisesMessage(TypeError, "f() takes from 1 to 2 positional arguments but 3 were given", f, 1, 2, 3)
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes at least 1 non-keyword argument (0 given)", f, b=2)
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", f, b=2)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=3)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, b=2, dummy=3)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, 1, dummy=3)
if is_cli:
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes at most 2 arguments (3 given)", lambda:f(1, 2, 3))
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes from 1 to 2 positional arguments but 3 were given", lambda:f(1, 2, 3))
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes at least 1 non-keyword argument (0 given)", lambda:f(b=2))
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", lambda:f(b=2))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=3))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(b=2, dummy=3))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=3))
def f(a, *argList): pass
if is_cli:
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", f)
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", f)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, 1, dummy=2)
if is_cli:
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", lambda:f())
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", lambda:f())
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a, **keywordDict): pass
if is_cli:
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", f)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (3 given)", f, 1, 2, 3)
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", f)
self.assertRaisesMessage(TypeError, "f() takes 1 positional argument but 3 were given", f, 1, 2, 3)
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", f, dummy=2, dummy2=3)
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", f, dummy=2, dummy2=3)
if is_cli:
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (3 given)", lambda:f(1, 2, 3))
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes 1 positional argument but 3 were given", lambda:f(1, 2, 3))
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", lambda:f(dummy=2, dummy2=3))
else:
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() missing 1 required positional argument: 'a'", lambda:f(dummy=2, dummy2=3))
if is_cli:
self.assertRaisesMessage(TypeError, "abs() takes exactly 1 argument (0 given)", abs)
self.assertRaisesMessage(TypeError, "abs() takes exactly 1 argument (3 given)", abs, 1, 2, 3)
self.assertRaisesMessage(TypeError, "abs() got an unexpected keyword argument 'dummy'", abs, dummy=2)
self.assertRaisesMessage(TypeError, "abs() takes exactly 1 argument (2 given)", abs, 1, dummy=2)
self.assertRaisesMessage(TypeError, "abs() takes exactly 1 argument (0 given)", lambda:abs())
self.assertRaisesMessage(TypeError, "abs() takes exactly 1 argument (3 given)", lambda:abs(1, 2, 3))
self.assertRaisesMessage(TypeError, "abs() got an unexpected keyword argument 'dummy'", lambda:abs(dummy=2))
self.assertRaisesMessage(TypeError, "abs() takes exactly 1 argument (2 given)", lambda:abs(1, dummy=2))
else:
self.assertRaisesMessage(TypeError, "abs() takes exactly one argument (0 given)", abs)
self.assertRaisesMessage(TypeError, "abs() takes exactly one argument (3 given)", abs, 1, 2, 3)
self.assertRaisesMessage(TypeError, "abs() takes no keyword arguments", abs, dummy=2)
self.assertRaisesMessage(TypeError, "abs() takes no keyword arguments", abs, 1, dummy=2)
self.assertRaisesMessage(TypeError, "abs() takes exactly one argument (0 given)", lambda:abs())
self.assertRaisesMessage(TypeError, "abs() takes exactly one argument (3 given)", lambda:abs(1, 2, 3))
self.assertRaisesMessage(TypeError, "abs() takes no keyword arguments", lambda:abs(dummy=2))
self.assertRaisesMessage(TypeError, "abs() takes no keyword arguments", lambda:abs(1, dummy=2))
# list([m]) has one default argument (built-in type)
#self.assertRaisesMessage(TypeError, "list() takes at most 1 argument (2 given)", list, 1, 2)
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, list, [], dict({"dummy":2}))
#======== BUG 697 ===========
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, list, [1], dict({"dummy":2}))
# complex([x,y]) has two default argument (OpsReflectedType type)
#self.assertRaisesMessage(TypeError, "complex() takes at most 2 arguments (3 given)", complex, 1, 2, 3)
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, complex, [], dict({"dummy":2}))
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, complex, [1], dict({"dummy":2}))
# bool([x]) has one default argument (OpsReflectedType and valuetype type)
#self.assertRaisesMessage(TypeError, "bool() takes at most 1 argument (2 given)", bool, 1, 2)
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, bool, [], dict({"dummy":2}))
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, bool, [1], dict({"dummy":2}))
class UserClass(object): pass
if is_cli:
self.assertRaisesMessage(TypeError, "object.__new__() takes no parameters", UserClass, 1)
with self.assertRaisesMessage(TypeError, "object.__new__() takes no parameters"):
UserClass(*[], **dict({"dummy":2}))
else:
self.assertRaisesMessage(TypeError, "object() takes no parameters", UserClass, 1)
with self.assertRaisesMessage(TypeError, "object() takes no parameters"):
UserClass(*[], **dict({"dummy":2}))
class OldStyleClass: pass
if is_cli:
self.assertRaisesMessage(TypeError, "object.__new__() takes no parameters", OldStyleClass, 1)
with self.assertRaisesMessage(TypeError, "object.__new__() takes no parameters"):
OldStyleClass(*[], **dict({"dummy":2}))
else:
self.assertRaisesMessage(TypeError, "object() takes no parameters", OldStyleClass, 1)
with self.assertRaisesMessage(TypeError, "object() takes no parameters"):
OldStyleClass(*[], **dict({"dummy":2}))
@skipUnlessIronPython()
def test_runtime_type_checking(self):
"""accepts / returns runtype type checking tests"""
import clr
@clr.accepts(object)
def foo(x):
return x
self.assertEqual(foo('abc'), 'abc')
self.assertEqual(foo(2), 2)
self.assertEqual(foo(long(2)), long(2))
self.assertEqual(foo(2.0), 2.0)
self.assertEqual(foo(True), True)
@clr.accepts(str)
def foo(x):
return x
self.assertEqual(foo('abc'), 'abc')
self.assertRaises(AssertionError, foo, 2)
self.assertRaises(AssertionError, foo, long(2))
self.assertRaises(AssertionError, foo, 2.0)
self.assertRaises(AssertionError, foo, True)
@clr.accepts(str, bool)
def foo(x, y):
return x, y
self.assertEqual(foo('abc', True), ('abc', True))
self.assertRaises(AssertionError, foo, ('abc',2))
self.assertRaises(AssertionError, foo, ('abc',long(2)))
self.assertRaises(AssertionError, foo, ('abc',2.0))
class bar:
@clr.accepts(clr.Self(), str)
def foo(self, x):
return x
a = bar()
self.assertEqual(a.foo('xyz'), 'xyz')
self.assertRaises(AssertionError, a.foo, 2)
self.assertRaises(AssertionError, a.foo, long(2))
self.assertRaises(AssertionError, a.foo, 2.0)
self.assertRaises(AssertionError, a.foo, True)
@clr.returns(str)
def foo(x):
return x
self.assertEqual(foo('abc'), 'abc')
self.assertRaises(AssertionError, foo, 2)
self.assertRaises(AssertionError, foo, long(2))
self.assertRaises(AssertionError, foo, 2.0)
self.assertRaises(AssertionError, foo, True)
@clr.accepts(bool)
@clr.returns(str)
def foo(x):
if x: return str(x)
else: return 0
self.assertEqual(foo(True), 'True')
self.assertRaises(AssertionError, foo, 2)
self.assertRaises(AssertionError, foo, long(2))
self.assertRaises(AssertionError, foo, False)
@clr.returns(None)
def foo(): pass
self.assertEqual(foo(), None)
def test_error_message(self):
try:
repr()
except TypeError as e:
# make sure we get the right type name when calling w/ wrong # of args
self.assertTrue(str(e).startswith("repr()"))
def test_caller_context(self):
# access a method w/ caller context w/ an args parameter.
def foo(*args):
return hasattr(*args)
self.assertEqual(foo('', 'index'), True)
@skipUnlessIronPython()
def test_dispatch_to_ReflectOptimized(self):
"""dispatch to a ReflectOptimized method"""
from iptest.console_util import IronPythonInstance
from System import Environment
from sys import executable
wkdir = self.test_dir
if "-X:LightweightScopes" in Environment.GetCommandLineArgs():
ipi = IronPythonInstance(executable, wkdir, "-X:LightweightScopes", "-X:BasicConsole")
else:
ipi = IronPythonInstance(executable, wkdir, "-X:BasicConsole")
if (ipi.Start()):
try:
result = ipi.ExecuteLine("from iptest.ipunittest import load_ironpython_test")
result = ipi.ExecuteLine("load_ironpython_test()")
result = ipi.ExecuteLine("from IronPythonTest import DefaultParams")
response = ipi.ExecuteLine("DefaultParams.FuncWithDefaults(1100, z=82)")
self.assertEqual(response, '1184')
finally:
ipi.End()
def test_zip(self):
p = ((1, 2),)
self.assertEqual(list(zip(*(p * 10))), [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 2, 2, 2, 2, 2, 2, 2, 2, 2)])
self.assertEqual(list(zip(*(p * 10))), [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 2, 2, 2, 2, 2, 2, 2, 2, 2)])
def test_super(self):
class A(object): pass
class B(A): pass
#unbound super
for x in [super(B), super(B,None)]:
self.assertEqual(x.__thisclass__, B)
self.assertEqual(x.__self__, None)
self.assertEqual(x.__self_class__, None)
# super w/ both types
x = super(B,B)
self.assertEqual(x.__thisclass__,B)
self.assertEqual(x.__self_class__, B)
self.assertEqual(x.__self__, B)
# super w/ type and instance
b = B()
x = super(B, b)
self.assertEqual(x.__thisclass__,B)
self.assertEqual(x.__self_class__, B)
self.assertEqual(x.__self__, b)
# super w/ mixed types
x = super(A,B)
self.assertEqual(x.__thisclass__,A)
self.assertEqual(x.__self_class__, B)
| |
<reponame>pllim/halotools
"""
This module contains the `NFWProfile` class,
which is used to model the spatial distribution of mass and/or galaxies
inside dark matter halos according to the fitting function introduced in
Navarry, Frenk and White (1995), `arXiv:9508025 <http://arxiv.org/abs/astro-ph/9508025/>`_.
a sub-class of `~halotools.empirical_models.AnalyticDensityProf`.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from .conc_mass import direct_from_halo_catalog, dutton_maccio14
from .kernels import nfw_dimensionless_mass_density, nfw_cumulative_mass_PDF
from .kernels import standalone_mc_generate_nfw_radial_positions
from ...profile_model_template import AnalyticDensityProf
from ..... import model_defaults
from ......sim_manager import sim_defaults
__all__ = ("NFWProfile",)
__author__ = ("<NAME>", "<NAME>")
class NFWProfile(AnalyticDensityProf):
r""" Model for the spatial distribution of mass
and/or galaxies residing in an NFW halo profile,
based on Navarro, Frenk and White (1995),
`arXiv:9508025 <http://arxiv.org/abs/astro-ph/9508025/>`_.
For a review of the mathematics underlying the NFW profile,
including descriptions of how the relevant equations are
implemented in the Halotools code base, see :ref:`nfw_profile_tutorial`.
"""
def __init__(
self,
cosmology=sim_defaults.default_cosmology,
redshift=sim_defaults.default_redshift,
mdef=model_defaults.halo_mass_definition,
conc_mass_model=model_defaults.conc_mass_model,
concentration_key=model_defaults.concentration_key,
halo_boundary_key=None,
**kwargs
):
r"""
Parameters
----------
cosmology : object, optional
Instance of an astropy `~astropy.cosmology`.
Default cosmology is set in
`~halotools.sim_manager.sim_defaults`.
redshift : float, optional
Default is set in `~halotools.sim_manager.sim_defaults`.
mdef: str, optional
String specifying the halo mass definition, e.g., 'vir' or '200m'.
Default is set in `~halotools.empirical_models.model_defaults`.
halo_boundary_key : str, optional
Default behavior is to use the column associated with the input mdef.
conc_mass_model : string or callable, optional
Specifies the function used to model the relation between
NFW concentration and halo mass.
Can either be a custom-built callable function,
or one of the following strings:
``dutton_maccio14``, ``direct_from_halo_catalog``.
concentration_key : string, optional
Column name of the halo catalog storing NFW concentration.
This argument is only relevant when ``conc_mass_model``
is set to ``direct_from_halo_catalog``. In such a case,
the default value is ``halo_nfw_conc``,
which is consistent with all halo catalogs provided by Halotools
but may differ from the convention adopted in custom halo catalogs.
Examples
--------
>>> nfw = NFWProfile()
"""
AnalyticDensityProf.__init__(
self, cosmology, redshift, mdef, halo_boundary_key=halo_boundary_key
)
self.gal_prof_param_keys = ["conc_NFWmodel"]
self.halo_prof_param_keys = ["conc_NFWmodel"]
self.publications = ["arXiv:9611107", "arXiv:0002395"]
self._initialize_conc_mass_behavior(
conc_mass_model, concentration_key=concentration_key
)
def _initialize_conc_mass_behavior(self, conc_mass_model, **kwargs):
if conc_mass_model == "direct_from_halo_catalog":
self.concentration_key = kwargs.get(
"concentration_key", model_defaults.concentration_key
)
self.conc_mass_model = conc_mass_model
def conc_NFWmodel(self, *args, **kwargs):
r""" NFW concentration as a function of halo mass.
Parameters
----------
prim_haloprop : array, optional
Array storing the mass-like variable, e.g., ``halo_mvir``.
If ``prim_haloprop`` is not passed,
then ``table`` keyword argument must be passed.
table : object, optional
`~astropy.table.Table` storing the halo catalog.
If your NFW model is based on the virial definition,
then ``halo_mvir`` must appear in the input table,
and likewise for other halo mass definitions.
If ``table`` is not passed,
then ``prim_haloprop`` keyword argument must be passed.
Returns
-------
conc : array_like
Concentrations of the input halos.
Note that concentrations will be clipped to their min/max permitted
values set in the `~halotools.empirical_models.model_defaults` module.
The purpose of this clipping is to ensure stable results during
mock galaxy population. Due to this clipping,
the behavior of the `conc_NFWmodel` function
is different from the concentration-mass relation that underlies it.
Examples
---------
In the examples below, we'll demonstrate the various ways to use the
`~halotools.empirical_models.NFWProfile.conc_NFWmodel` function, depending
on the initial choice for the ``conc_mass_model``.
>>> fake_masses = np.logspace(12, 15, 10)
If you use the ``direct_from_halo_catalog`` option, you must pass a
``table`` argument storing a `~astropy.table.Table` with a column name
for the halo mass that is consistent with your chosen halo mass definition:
>>> from astropy.table import Table
>>> nfw = NFWProfile(conc_mass_model='direct_from_halo_catalog', mdef='vir')
>>> fake_conc = np.zeros_like(fake_masses) + 5.
>>> fake_halo_table = Table({'halo_mvir': fake_masses, 'halo_nfw_conc': fake_conc})
>>> model_conc = nfw.conc_NFWmodel(table=fake_halo_table)
In case your halo catalog uses a different keyname from the Halotools
default ``halo_nfw_conc``:
>>> nfw = NFWProfile(conc_mass_model='direct_from_halo_catalog', mdef='vir', concentration_key='my_conc_keyname')
>>> fake_halo_table = Table({'halo_mvir': fake_masses, 'my_conc_keyname': fake_conc})
>>> model_conc = nfw.conc_NFWmodel(table=fake_halo_table)
One of the available options provided by Halotools is ``dutton_maccio14``.
With this option, you can either pass in a ``table`` argument, or alternatively
an array of masses via the ``prim_haloprop`` argument:
>>> nfw = NFWProfile(conc_mass_model='dutton_maccio14')
>>> fake_halo_table = Table({'halo_mvir': fake_masses, 'halo_nfw_conc': fake_conc})
>>> model_conc = nfw.conc_NFWmodel(table=fake_halo_table)
>>> model_conc = nfw.conc_NFWmodel(prim_haloprop=fake_masses)
Finally, you may also have chosen to define your own concentration-mass relation.
If so, your function must at a minimum accept a ``table`` keyword argument.
Below we give a trivial example of using the identity function:
>>> def identity_func(*args, **kwargs): return kwargs['table']['halo_mvir']
>>> nfw = NFWProfile(conc_mass_model=identity_func, mdef='vir')
>>> fake_halo_table = Table({'halo_mvir': fake_masses})
>>> model_conc = nfw.conc_NFWmodel(table=fake_halo_table)
"""
if self.conc_mass_model == "direct_from_halo_catalog":
try:
table = kwargs["table"]
except KeyError:
msg = (
"Must pass ``table`` argument to the ``conc_NFWmodel`` function\n"
"when ``conc_mass_model`` is set to ``direct_from_halo_catalog``\n"
)
raise KeyError(msg)
result = direct_from_halo_catalog(
table=table, concentration_key=self.concentration_key
)
elif self.conc_mass_model == "dutton_maccio14":
msg = (
"Must either pass a ``prim_haloprop`` argument, \n"
"or a ``table`` argument with an astropy Table that has the ``{0}`` key"
)
try:
mass = kwargs["table"][self.prim_haloprop_key]
except:
try:
mass = kwargs["prim_haloprop"]
except:
raise KeyError(msg.format(self.prim_haloprop_key))
result = dutton_maccio14(mass, self.redshift)
else:
result = self.conc_mass_model(*args, **kwargs)
cmin = model_defaults.min_permitted_conc
cmax = model_defaults.max_permitted_conc
result = np.where(result < cmin, cmin, result)
result = np.where(result > cmax, cmax, result)
return result
def dimensionless_mass_density(self, scaled_radius, conc):
r"""
Physical density of the NFW halo scaled by the density threshold of the mass definition.
The `dimensionless_mass_density` is defined as
:math:`\tilde{\rho}_{\rm prof}(\tilde{r}) \equiv \rho_{\rm prof}(\tilde{r}) / \rho_{\rm thresh}`,
where :math:`\tilde{r}\equiv r/R_{\Delta}`.
For an NFW halo,
:math:`\tilde{\rho}_{\rm NFW}(\tilde{r}, c) = \frac{c^{3}/3g(c)}{c\tilde{r}(1 + c\tilde{r})^{2}},`
where :math:`g(x) \equiv \log(1+x) - x / (1+x)` is computed using the `g` function.
The quantity :math:`\rho_{\rm thresh}` is a function of
the halo mass definition, cosmology and redshift,
and is computed via the
`~halotools.empirical_models.profile_helpers.density_threshold` function.
The quantity :math:`\rho_{\rm prof}` is the physical mass density of the
halo profile and is computed via the `mass_density` function.
See :ref:`nfw_spatial_profile_derivations` for a derivation of this expression.
Parameters
-----------
scaled_radius : array_like
Halo-centric distance *r* scaled by the halo boundary :math:`R_{\Delta}`, so that
:math:`0 <= \tilde{r} \equiv r/R_{\Delta} <= 1`. Can be a scalar or numpy array.
conc : array_like
Value of the halo concentration. Can either be a scalar, or a numpy array
of the same dimension as the input ``scaled_radius``.
Returns
-------
dimensionless_density: array_like
Dimensionless density of a dark matter halo
at the input ``scaled_radius``, normalized by the
`~halotools.empirical_models.profile_helpers.density_threshold`
:math:`\rho_{\rm thresh}` for the
halo mass definition, cosmology, and redshift.
Result is an array of the dimension as the input ``scaled_radius``.
"""
return nfw_dimensionless_mass_density(scaled_radius, conc)
def mass_density(self, radius, mass, conc):
r"""
Physical density of the halo at the input radius,
given in units of :math:`h^{3}/{\rm Mpc}^{3}`.
Parameters
-----------
radius : array_like
Halo-centric distance in Mpc/h units; can be a scalar or numpy array
mass : array_like
Total mass of the halo; can be a scalar or numpy array of the same
dimension as the input ``radius``.
conc : array_like
Value of the halo concentration. Can either be a scalar, or a numpy array
of the same dimension as the input ``radius``.
Returns
-------
density: array_like
Physical density of a dark matter halo of the input ``mass``
at the input ``radius``. Result is an array of the
dimension as the input ``radius``, reported in units of :math:`h^{3}/Mpc^{3}`.
Examples
--------
>>> model = NFWProfile()
>>> Npts = 100
>>> radius = np.logspace(-2, -1, Npts)
>>> mass = np.zeros(Npts) + 1e12
>>> conc = 5
>>> result = model.mass_density(radius, mass, conc)
>>> concarr = np.linspace(1, 100, Npts)
>>> result = model.mass_density(radius, mass, concarr)
Notes
------
See :ref:`halo_profile_definitions` for derivations and implementation details.
"""
return AnalyticDensityProf.mass_density(self, radius, mass, conc)
def cumulative_mass_PDF(self, scaled_radius, conc):
r"""
Analytical result for the fraction of the total mass
enclosed within dimensionless radius of an NFW halo,
:math:`P_{\rm NFW}(<\tilde{r}) \equiv M_{\Delta}(<\tilde{r}) / M_{\Delta} = g(c\tilde{r})/g(\tilde{r}),`
where :math:`g(x) \equiv \int_{0}^{x}dy\frac{y}{(1+y)^{2}} = \log(1+x) - x / (1+x)` is computed
using `g`, and where :math:`\tilde{r} \equiv r / R_{\Delta}`.
See :ref:`nfw_cumulative_mass_pdf_derivation` for a derivation of this expression.
Parameters
-------------
scaled_radius : array_like
Halo-centric distance *r* scaled by the halo boundary :math:`R_{\Delta}`, so that
:math:`0 <= \tilde{r} \equiv r/R_{\Delta} <= 1`. Can be | |
# Copyright (c) Microsoft Corporation. All rights reserved.
import numpy as np
import pandas as pd
import copy
"""
This file contains miscellaneous helper classes and functions for the data-assertion routines
"""
# ---------------------------------------------------------------
# Class _AssertionType
# ---------------------------------------------------------------
class _AssertionType:
PCA_ASSERTION = "PCA Assertion"
DISJUNCTIVE_ASSERTION = "Disjunctive Assertion"
DECISION_TREE_ASSERTION = "Decision Tree Assertion"
MIXED = "Mixed Assertion"
# ---------------------------------------------------------------
# Class _RelationalOperators: Static class for various relational operators
# ---------------------------------------------------------------
class _RelationalOperators:
EQUAL = 1
LESS_THAN = 2
LESS_THAN_EQUAL_TO = 3
GREATER_THAN = 4
GREATER_THAN_EQUAL_TO = 5
NOT_EQUAL = 6
INCLUSIVE_RANGE = 7
EXCLUSIVE_RANGE = 8
OPERATOR_STRING = {
EQUAL: "=",
LESS_THAN: "<",
LESS_THAN_EQUAL_TO: "<=",
GREATER_THAN: ">",
GREATER_THAN_EQUAL_TO: ">=",
NOT_EQUAL: "!=",
INCLUSIVE_RANGE: "in=",
EXCLUSIVE_RANGE: "in",
}
# ---------------------------------------------------------------
# Class SingleConstraint
# A single constraint requires
# (1) column_name: name of a column of the data frame.
# (2) column_value: values of that column that defines the constraint.
# This is either a single value or a two elements list representing a range.
# (3) relational_op: defines the relation between column_name and column_value.
# Example:
# Constraint "make = Toyota":
# column_name = 'make',
# column_value = 'Toyota',
# relational_op = _RelationalOperators.EQUAL
#
# Constraint: "30 <= mpg <= 35"
# column_name = 'mpg',
# column_value = [30, 35],
# relational_op = _RelationalOperators.INCLUSIVE_RANGE
# ---------------------------------------------------------------
class SingleConstraint:
def __init__(self, column_name, column_value, relational_op):
self.column_name = column_name
self.column_value = column_value
self.relational_op = relational_op
# if the relational_op requires a range, then column_value must be of type list
# for all other relational operators, column_value must not be of type list
assert isinstance(self.column_value, list) == (
self.relational_op
in [
_RelationalOperators.INCLUSIVE_RANGE,
_RelationalOperators.EXCLUSIVE_RANGE,
]
) and (
not isinstance(self.column_name, list) or len(self.column_value) == 2
), "Invalid operator and value combination"
assert self.relational_op is not None, "Operator not supported"
def apply(self, df, drop_column=True):
""" Returns the transformed dataframe after the constraint is applied
:param df: source dataframe
:param drop_column: boolean flag. denotes whether to drop the column, on which constraint is applied, or not
:return: df with the constraint applied
"""
new_df = df
if self.relational_op == _RelationalOperators.EQUAL:
new_df = df[df[self.column_name] == self.column_value]
elif self.relational_op == _RelationalOperators.LESS_THAN:
new_df = df[df[self.column_name] < self.column_value]
elif self.relational_op == _RelationalOperators.LESS_THAN_EQUAL_TO:
new_df = df[df[self.column_name] <= self.column_value]
elif self.relational_op == _RelationalOperators.GREATER_THAN:
new_df = df[df[self.column_name] > self.column_value]
elif self.relational_op == _RelationalOperators.GREATER_THAN_EQUAL_TO:
new_df = df[df[self.column_name] >= self.column_value]
elif self.relational_op == _RelationalOperators.NOT_EQUAL:
new_df = df[df[self.column_name] != self.column_value]
elif self.relational_op == _RelationalOperators.INCLUSIVE_RANGE:
new_df = df[
(df[self.column_name] >= self.column_value[0])
& (df[self.column_name] <= self.column_value[1])
]
elif self.relational_op == _RelationalOperators.EXCLUSIVE_RANGE:
new_df = df[
(df[self.column_name] > self.column_value[0])
& (df[self.column_name] < self.column_value[1])
]
if drop_column:
return new_df.loc[:, new_df.columns != self.column_name]
else:
return new_df
def get_name(self):
return repr(self)
def __repr__(self):
if isinstance(self.column_value, list):
return (
str(self.column_value[0])
+ _RelationalOperators.OPERATOR_STRING[self.relational_op]
+ str(self.column_name)
+ _RelationalOperators.OPERATOR_STRING[self.relational_op]
+ str(self.column_value[1])
)
else:
return (
str(self.column_name)
+ _RelationalOperators.OPERATOR_STRING[self.relational_op]
+ str(self.column_value)
)
# ---------------------------------------------------------------
# Class DisjunctiveConstraint: A disjunctive constraint is disjunction of multiple _SingleConstraints
# ---------------------------------------------------------------
class DisjunctiveConstraint:
def __init__(self, single_constraints):
self.single_constraints = single_constraints
self.column_name = single_constraints[0].column_name
# There should be at most one column involved in a disjunctive constraint for now
assert (
len(set(constraint.column_name for constraint in single_constraints)) == 1
)
def apply(self, df, drop_column=True):
""" Returns the transformed dataframe after the constraint is applied
:param df: the source data frame
:param drop_column: boolean flag. denotes whether to drop the column, on which constraint is applied, or not
:return: new_df: with the constraint applied
"""
partitioned_dfs = []
for constraint in self.single_constraints:
partitioned_dfs.append(constraint.apply(df, drop_column))
new_df = pd.concat(partitioned_dfs)
return new_df
def get_name(self):
return self.__repr__()
def __repr__(self):
return " || ".join(["{0}".format(c) for c in self.single_constraints])
# ---------------------------------------------------------------
# Class ConjunctiveConstraint: A conjunctive constraint is conjunction of multiple _SingleConstraints
# ---------------------------------------------------------------
class ConjunctiveConstraint:
def __init__(self, single_constraints):
self.single_constraints = single_constraints
# Each constraint within conjunctive constraint should be on different columns
assert len(
set(constraint.column_name for constraint in single_constraints)
) == len(single_constraints)
def apply(self, df, drop_column=True):
""" Returns the transformed dataframe after the constraint is applied
:param df: the source data frame
:param drop_column: boolean flag. denotes whether to drop the column, on which constraint is applied, or not
:return: new_df: with the constraint applied
"""
new_df = df
for constraint in self.single_constraints:
new_df = constraint.apply(new_df, drop_column)
return new_df
def get_name(self):
return self.__repr__()
def __repr__(self):
if len(self.single_constraints) == 0:
return "None"
return " && ".join(["{0}".format(c) for c in self.single_constraints])
# ---------------------------------------------------------------
# Class ConstrainedInv
# A constrained invariant consists of
# -- constraint: the constraint of the invariant
# -- data_assertion: a DataAssertion object which encodes invariant(s)
# that only apply on rows where the constraint holds
# ---------------------------------------------------------------
class ConstrainedInv:
def __init__(self, constraint, data_assertion, id=None):
self.id = id
self.constraint = constraint
self.data_assertion = data_assertion
self._valid = data_assertion is not None and data_assertion.is_valid()
def is_valid(self):
return self._valid
def evaluate(self, df, options, apply_constraint=True):
df_to_evaluate = df
if apply_constraint:
df_to_evaluate = self.constraint.apply(df)
result = self.data_assertion.evaluate(df_to_evaluate, options)
if df.shape[0] > 0:
result.fraction_of_rows_tested *= (
float(df_to_evaluate.shape[0]) / df.shape[0]
)
return result
def _get_name(self):
return self.__repr__()
def __repr__(self):
return (
"Constraint: "
+ self.constraint.get_name()
+ " --> "
+ "Number of assertions: "
+ str(self.data_assertion.number_of_invs)
+ ", Detailed assertions: "
+ " && ".join(
[
self.data_assertion._get_name(i)
for i in range(self.data_assertion.number_of_invs)
]
)
)
# ---------------------------------------------------------------
# Class ViolationResult: class for storing violation results when checked against assertions
# ---------------------------------------------------------------
class ViolationResult:
def __init__(
self,
train_df=None,
test_df=None,
assertions=None,
worst_violation=0.0,
violation_name=None,
worst_row=None,
avg_violation=0.0,
fraction_of_rows_tested=0.0,
level_wise_violation=None,
row_wise_violation_summary=None,
row_wise_per_attribute_violation_contribution=None,
compute_explanation=False,
):
self.train_df = train_df
self.test_df = test_df
self.assertions = assertions
self.worst_violation = worst_violation
self.violation_name = violation_name
self.worst_row = worst_row
self.avg_violation = avg_violation
self.fraction_of_rows_tested = fraction_of_rows_tested
self.level_wise_violation = level_wise_violation
self.row_wise_violation_summary = row_wise_violation_summary
self.row_wise_per_attribute_violation_contribution = (
row_wise_per_attribute_violation_contribution
)
self.compute_explanation = compute_explanation
self._row_wise_inv_violation = None
self._row_wise_inv_compatibility = None
self._baseline = None
def _heatmap_highlighter(self, s, reference=None):
a = reference.loc[s.name, :].copy()
rng = max(1, float(max(a) - min(a)))
colors = 229 - a * 229 / rng
colors = [
"#ff" + str(hex(int(c)))[2:].zfill(2) + str(hex(int(c)))[2:].zfill(2)
for c in colors
]
return ["background-color: %s" % color for color in colors]
def _get_sampled_indexes_and_baseline_df(self, violation_threshold, sample_only=True):
sampled_indexes = []
baseline_df = None
for k in reversed(self.assertions.constrained_invariants):
cur_df = pd.DataFrame(
self.row_wise_violation_summary.loc[
list(k.constraint.apply(self.test_df).index)
]["violation"]
).sort_values(by=["violation"], ascending=False)
valid = False
if cur_df.empty:
continue
if sample_only:
# Pick a random sample as a representative row from each decision tree partition
for idx in list(
cur_df[cur_df["violation"] == max(cur_df["violation"])]
.sample(frac=1)
.index
):
if cur_df.loc[idx]["violation"] < violation_threshold:
break
if idx not in sampled_indexes:
sampled_indexes.append(idx)
valid = True
break
if not valid:
continue
else:
newly_added = 0
for i in cur_df.index:
if i not in sampled_indexes:
sampled_indexes.append(i)
newly_added += 1
cur_train_df_numeric = k.constraint.apply(self.train_df)._get_numeric_data()
cur_train_df_categorical = k.constraint.apply(
self.train_df, drop_column=False
)[
[
col
for col in self.train_df.columns
if col not in cur_train_df_numeric.columns
]
]
# Compute a baseline dataframe to visually contrast with the representative violating rows
current_mean_row = pd.DataFrame(
np.array(cur_train_df_numeric.mean()).reshape(
(1, len(cur_train_df_numeric.columns))
),
columns=cur_train_df_numeric.columns,
)
if len(cur_train_df_categorical.columns) > 0:
current_mode_row = pd.DataFrame(
np.array(cur_train_df_categorical.mode()).reshape(
(-1, len(cur_train_df_categorical.columns))
),
columns=cur_train_df_categorical.columns,
)[:1]
cur_row = pd.DataFrame(columns=self.train_df.columns)
for col in self.train_df.columns:
if col in cur_train_df_numeric.columns:
cur_row[col] = current_mean_row[col]
else:
cur_row[col] = current_mode_row[col]
if not sample_only:
cur_row = pd.DataFrame(np.tile(np.array(cur_row), (newly_added, 1)),
columns=self.train_df.columns)
for col in self.train_df.columns:
if col in cur_train_df_numeric.columns:
cur_row[col] = cur_row[col].apply(float)
if baseline_df is None:
baseline_df = cur_row
else:
baseline_df = pd.concat([baseline_df, cur_row], ignore_index=True)
return sampled_indexes, baseline_df
def get_most_violating_indices(self, num_most_violating_indices):
""" Return the indices of the num_most_violating_indices most violating rows.
The indices are positions, not locations, i.e., they are to be used with DataFrame.iloc instead
of `DataFrame.loc`."""
if self.test_df.empty:
return []
num_most_violating_indices = min(
num_most_violating_indices, self.row_wise_violation_summary.shape[0]
)
violations = self.row_wise_violation_summary.loc[self.test_df.index][
"violation"
]
indices = np.argpartition(violations, -num_most_violating_indices)[
-num_most_violating_indices:
]
return indices
def preview(self, violation_threshold=0.0, sample_only=True):
if not self.compute_explanation:
return "Explanation not requested while evaluation. Try evaluation with explanation=True."
if self.test_df.empty:
return "Cannot generate preview when test data frame is empty."
# Now compute responsibility of each attribute within the representative sample rows
self.train_df = self.train_df.dropna()
self.test_df = self.test_df.dropna()
sampled_indexes, baseline = self._get_sampled_indexes_and_baseline_df(
violation_threshold,
sample_only,
)
if len(sampled_indexes) == 0 or baseline is None:
return "No violation to preview with violation threshold: " + str(
violation_threshold
)
numeric_columns = baseline._get_numeric_data().columns
expected = np.array(baseline._get_numeric_data())
found = np.array(
self.row_wise_violation_summary.loc[sampled_indexes][
baseline.columns
]._get_numeric_data()
)
violation_amount = pd.DataFrame(
np.abs(found - expected), columns=numeric_columns, index=sampled_indexes
)
coeffs = self.row_wise_per_attribute_violation_contribution.loc[
sampled_indexes
][numeric_columns]
self.reference = np.multiply(
violation_amount, | |
For each node, return the nsi clustering coefficient with respect to
the out motif.
If a link attribute key is specified, return the associated link
weighted version
Reference: [Zemp2014]_
**Examples:**
>>> net = Network.SmallDirectedTestNetwork()
>>> r(net.nsi_local_outmotif_clustering())
Calculating local nsi out motif clustering coefficient...
array([ 0.67 , 0.6693, 1. , 0.7528, 0.5839, 0.7656])
>>> r(net.splitted_copy(node=0).nsi_local_outmotif_clustering())
Calculating local nsi out motif clustering coefficient...
array([ 0.67 , 0.6693, 1. , 0.7528, 0.5839, 0.7656, 0.67 ])
as compared to the unweighted version:
>>> net = Network.SmallDirectedTestNetwork()
>>> r(net.local_outmotif_clustering())
Calculating local out motif clustering coefficient...
array([ 0.5, 0.5, 0. , 0. , 0. , 0. ])
>>> r(net.splitted_copy(node=0).local_outmotif_clustering())
Calculating local out motif clustering coefficient...
array([ 0.5 , 0.5 , 0. , 0. , 0.3333, 1. , 0.5 ])
:arg str key: link attribute key (optional)
"""
def t_func(x, xT):
return x * x * xT
T = self.nsi_outdegree()**2
return self._motif_clustering_helper(t_func, T, key=key, nsi=True)
@cached_const('base', 'transitivity', 'transitivity coefficient (C_1)')
def transitivity(self):
"""
Return the transitivity (coefficient).
This is the ratio of three times the number of triangles to the number
of connected triples of vertices. [Newman2003]_ refers to this measure
as C_1.
**Example:**
>>> r(Network.SmallTestNetwork().transitivity())
Calculating transitivity coefficient (C_1)...
0.2727
:rtype: float between 0 and 1
"""
return self.graph.transitivity_undirected()
def higher_order_transitivity(self, order, estimate=False):
"""
Return transitivity of a certain order.
The transitivity of order n is defined as:
- (n x Number of cliques of n nodes) / (Number of stars of n nodes)
It is a generalization of the standard network transitivity, which is
included as a special case for n = 3.
:arg int order: The order (number of nodes) of cliques to be
considered.
:arg bool estimate: Toggles random sampling for estimating higher order
transitivity (much faster than exact calculation).
:rtype: number (float) between 0 and 1
"""
if self.silence_level <= 1:
print "Calculating transitivity of order", order, "..."
if order == 0 or order == 1 or order == 2:
raise NetworkError("Higher order transitivity is not defined " +
"for orders 0, 1 and 2.")
elif order == 3:
return self.transitivity()
elif order == 4:
# Gathering
# N = self.N
# A = self.adjacency
# T = _higher_order_transitivity4(N, A)
# return T
if estimate:
motif_counts = self.graph.motifs_randesu(
size=4, cut_prob=[0.5, 0.5, 0.5, 0.5])
else:
motif_counts = self.graph.motifs_randesu(size=4)
# Sum over all motifs that contain a star
n_stars = motif_counts[4] + motif_counts[7] + \
2 * motif_counts[9] + 4 * motif_counts[10]
n_cliques = motif_counts[10]
# print motif_counts
if n_stars != 0:
return 4 * n_cliques / float(n_stars)
else:
return 0.
elif order == 5:
pass
elif order > 5:
raise NotImplementedError("Higher order transitivity is not yet " +
"implemented for orders larger than 5.")
else:
raise ValueError("Order has to be a positive integer.")
def local_cliquishness(self, order):
"""
Return local cliquishness of a certain order.
The local cliquishness measures the relative number of cliques (fully
connected subgraphs) of a certain order that a node participates in.
Local cliquishness is not defined for orders 1 and 2. For order 3,
it is equivalent to the local clustering coefficient
:meth:`local_clustering`, since cliques of order 3 are triangles.
Local cliquishness is always bounded by 0 and 1 and set to zero for
nodes with degree smaller than order - 1.
:type order: number (int)
:arg order: The order (number of nodes) of cliques to be considered.
:rtype: 1d numpy array [node] of floats between 0 and 1
"""
if self.directed:
raise NetworkError("Not implemented yet...")
if self.silence_level <= 1:
print "Calculating local cliquishness of order", order, "..."
if order == 0 or order == 1 or order == 2:
raise NetworkError(
"Local cliquishness is not defined for orders 0, 1 and 2.")
elif order == 3:
return self.local_clustering()
elif order == 4:
return _local_cliquishness_4thorder(self.N,
self.adjacency.astype(int),
self.degree())
elif order == 5:
return _local_cliquishness_5thorder(self.N,
self.adjacency.astype(int),
self.degree())
elif order > 5:
raise NotImplementedError("Local cliquishness is not yet " +
"implemented for orders larger than 5.")
else:
raise ValueError("Order has to be a positive integer.")
@staticmethod
def weighted_local_clustering(weighted_A):
"""
For each node, return its weighted clustering coefficient,
given a weighted adjacency matrix.
This follows [Holme2007]_.
**Example:**
>>> print r(Network.weighted_local_clustering(weighted_A=[
... [ 0. , 0. , 0. , 0.55, 0.65, 0.75],
... [ 0. , 0. , 0.63, 0.77, 0.91, 0. ],
... [ 0. , 0.63, 0. , 0. , 1.17, 0. ],
... [ 0.55, 0.77, 0. , 0. , 0. , 0. ],
... [ 0.65, 0.91, 1.17, 0. , 0. , 0. ],
... [ 0.75, 0. , 0. , 0. , 0. , 0. ]]))
Calculating local weighted clustering coefficient...
[ 0. 0.2149 0.3539 0. 0.1538 0. ]
as compared to the unweighted version:
>>> print r(Network.SmallTestNetwork().local_clustering())
Calculating local clustering coefficients...
[ 0. 0.3333 1. 0. 0.3333 0. ]
:type weighted_A: square numpy array [node,node] of floats >= 0
:arg weighted_A: Entry [i,j] is the link weight from i to j.
A value of 0 means there is no link.
:rtype: 1d numpy array [node] of floats between 0 and 1
"""
# TODO: must be symmetric? directed version?
print "Calculating local weighted clustering coefficient..."
wA = np.array(weighted_A)
max_w = np.ones_like(wA).dot(wA.max())
return (np.linalg.matrix_power(wA, 3).diagonal()
/ (wA.dot(max_w).dot(wA)).diagonal())
def nsi_twinness(self):
"""
For each pair of nodes, return an n.s.i. measure of 'twinness'.
This varies from 0.0 for unlinked nodes to 1.0 for linked nodes having
exactly the same neighbors (called twins).
**Example:**
>>> net = Network.SmallTestNetwork()
>>> print r(net.nsi_twinness())
Calculating n.s.i. degree...
[[ 1. 0. 0. 0.4286 0.4524 0.4762]
[ 0. 1. 0.7375 0.475 0.7375 0. ]
[ 0. 0.7375 1. 0. 0.7973 0. ]
[ 0.4286 0.475 0. 1. 0. 0. ]
[ 0.4524 0.7375 0.7973 0. 1. 0. ]
[ 0.4762 0. 0. 0. 0. 1. ]]
>>> print r(net.splitted_copy().nsi_twinness())
Calculating n.s.i. degree...
[[ 1. 0. 0. 0.4286 0.4524 0.4762 0.4762]
[ 0. 1. 0.7375 0.475 0.7375 0. 0. ]
[ 0. 0.7375 1. 0. 0.7973 0. 0. ]
[ 0.4286 0.475 0. 1. 0. 0. 0. ]
[ 0.4524 0.7375 0.7973 0. 1. 0. 0. ]
[ 0.4762 0. 0. 0. 0. 1. 1. ]
[ 0.4762 0. 0. 0. 0. 1. 1. ]]
:rtype: square array [node,node] of floats between 0 and 1
"""
# TODO: implement other versions as weĺl
N, k, Ap = self.N, self.nsi_degree(), self.sp_Aplus()
commons = Ap * self.sp_diag_w() * Ap
kk = np.repeat([k], N, axis=0)
return Ap.A * commons.A / np.maximum(kk, kk.T)
#
# Measure Assortativity coefficient
#
def assortativity(self):
"""
Return the assortativity coefficient.
This follows [Newman2002]_.
**Example:**
>>> r(Network.SmallTestNetwork().assortativity())
-0.4737
:rtype: float between 0 and 1
"""
degrees = self.graph.degree()
degrees_sq = [deg**2 for deg in degrees]
m = float(self.graph.ecount())
num1, num2, den1 = 0, 0, 0
for source, target in self.graph.get_edgelist():
num1 += degrees[source] * degrees[target]
num2 += degrees[source] + degrees[target]
den1 += degrees_sq[source] + degrees_sq[target]
num1 /= m
den1 /= 2*m
num2 = (num2 / (2 * m)) ** 2
return (num1 - num2) / (den1 - num2)
@cached_const('nsi', 'local clustering')
def nsi_local_clustering_uncorr(self):
"""
For each node, return its uncorrected n.s.i. clustering coefficient
(between 0 and 1).
(not yet implemented for directed networks)
:rtype: array([float])
"""
if self.directed:
raise NotImplementedError("Not implemented for directed networks.")
w, k = self.node_weights, self.nsi_degree()
A_Dw = self.sp_A * self.sp_diag_w()
numerator = (A_Dw * self.sp_Aplus() * A_Dw.T).diagonal()
return (numerator + 2*k*w - w**2) / k**2
def nsi_local_clustering(self, typical_weight=None):
"""
For each node, return its uncorrected (between 0 and 1) or corrected
(at most 1 / negative / NaN) n.s.i. clustering coefficient.
(not yet implemented for directed networks)
**Example:**
>>> net = Network.SmallTestNetwork()
>>> r(net.nsi_local_clustering())
Calculating n.s.i. degree...
array([ 0.5513, 0.7244, 1. , 0.8184, 0.8028, 1. ])
>>> r(net.splitted_copy().nsi_local_clustering())
Calculating n.s.i. degree...
array([ 0.5513, 0.7244, 1. , 0.8184, 0.8028, 1. , 1. ])
as compared to the unweighted version:
>>> net = Network.SmallTestNetwork()
>>> | |
local and
arg.get_local_parent(node.children[0], pos-.5) == node and
node.children[1] in local and
arg.get_local_parent(node.children[1], pos-.5) == node and
node.children[0] != node.children[1])
def walk_up(arg, leaves, time, pos, ignore=None):
order = dict((node, i) for i, node in enumerate(
arg.postorder_marginal_tree(pos-.5)))
local = set(order.keys())
if ignore is not None and ignore in arg:
ptr = arg[ignore]
if ptr in local:
local.remove(ptr)
ptr = arg.get_local_parent(ptr, pos-.5)
else:
ptr = None
while ptr and ptr in local:
if (len(ptr.children) == 2 and
((ptr.children[0] in local and
arg.get_local_parent(ptr.children[0], pos-.5) == ptr) or
(ptr.children[1] in local and
arg.get_local_parent(ptr.children[1], pos-.5) == ptr))):
break
local.remove(ptr)
ptr = arg.get_local_parent(ptr, pos-.5)
queue = [(order[arg[x]], arg[x]) for x in leaves]
seen = set(x[1] for x in queue)
heapq.heapify(queue)
while len(queue) > 1:
i, node = heapq.heappop(queue)
parent = arg.get_local_parent(node, pos-.5)
if parent and parent not in seen:
seen.add(parent)
heapq.heappush(queue, (order[parent], parent))
node = queue[0][1]
parent = arg.get_local_parent(node, pos-.5)
while parent and parent.age <= time:
if is_local_coal(arg, parent, pos, local):
break
node = parent
parent = arg.get_local_parent(node, pos-.5)
if parent:
if parent.age < time:
print leaves, parent.age, time, ignore
tree = arg.get_marginal_tree(pos-.5).get_tree()
tree.write()
treelib.draw_tree_names(tree, maxlen=8, minlen=8)
assert False
return node
def add_node(arg, node, time, pos, event):
node2 = arg.new_node(event=event, age=time, children=[node], pos=pos)
if event == "coal":
node2.pos = 0
parent = arg.get_local_parent(node, pos-.5)
if parent:
node.parents[node.parents.index(parent)] = node2
parent.children[parent.children.index(node)] = node2
node2.parents.append(parent)
else:
node.parents.append(node2)
return node2
arg_recomb = dict((x.pos, x) for x in iter_visible_recombs(arg))
recomb_clades = [
(pos-1, None) + get_clade_point(arg, rnode, rtime, pos-1)
for pos, rnode, rtime in recombs] + [
(node.pos, node.name) +
get_clade_point(arg, node.name, node.age, node.pos)
for node in iter_visible_recombs(arg)]
recomb_clades.sort()
# make initial tree
arg2 = arg.get_marginal_tree(-1)
arglib.remove_single_lineages(arg2)
start = get_clade_point(arg, thread[0][0], thread[0][1], 0)
node = walk_up(arg2, start[0], start[1], -1)
node2 = add_node(arg2, node, start[1], -1, "coal")
leaf = arg2.new_node(name=new_name, event="gene", age=0)
leaf.parents.append(node2)
node2.children.append(leaf)
# add each recomb and re-coal
for rpos, rname, rleaves, rtime in recomb_clades:
if rpos in arg_recomb:
# find re-coal for existing recomb
if thread[rpos][1] != thread[rpos+1][1]:
if rtime > min(thread[rpos][1], thread[rpos+1][1]):
print ">>", rtime, thread[rpos], thread[rpos+1]
treelib.draw_tree_names(
arg.get_marginal_tree(rpos-.5).get_tree(),
maxlen=8, minlen=8)
treelib.draw_tree_names(
arg.get_marginal_tree(rpos+.5).get_tree(),
maxlen=8, minlen=8)
assert False
node = arg_recomb[rpos]
#local1 = set(arg.postorder_marginal_tree(rpos-.5))
local2 = set(arg.postorder_marginal_tree(rpos+.5))
last = node
node = arg.get_local_parent(node, rpos+.5)
while (not is_local_coal(arg, node, rpos+1, local2)):
last = node
node = arg.get_local_parent(node, rpos+.5)
c = node.children
child = c[0] if c[1] == last else c[1]
#recoal = node
cleaves, ctime = get_clade_point(
arg, child.name, node.age, rpos-.5)
# get local tree T^{n-1}_i and add new branch
tree = arg.get_marginal_tree(rpos+.5)
arglib.remove_single_lineages(tree)
node_name, time = thread[rpos+1]
node = tree[node_name]
# add new branch
node2 = add_node(tree, node, time, rpos+1, "coal")
if not node2.parents:
tree.root = node2
leaf = tree.new_node(name=new_name, event="gene", age=0)
leaf.parents.append(node2)
node2.children.append(leaf)
recomb = walk_up(tree, rleaves, rtime, rpos+1, new_name)
if recomb == node2 and rtime == node2.age:
# recomb and new coal-state are near each other
# we must decide if recomb goes above or below coal-state
# if this is a mediated SPR, then recomb goes below.
# otherwise it goes above.
# SPR is mediated if previous coal state is not recomb branch
node_name, time = thread[rpos]
if node2.children[0].name != node_name:
# this is a mediated coal
recomb = node2.children[0]
coal = recomb.parents[0]
c = coal.children
child = c[0] if c[1] == recomb else c[1]
# get coal point in T^n_i
rleaves, rtime = get_clade_point(
tree, recomb.name, rtime, rpos+1)
cleaves, ctime = get_clade_point(
tree, child.name, coal.age, rpos+1)
node1 = walk_up(arg2, rleaves, rtime, rpos+1)
node2 = walk_up(arg2, cleaves, ctime, rpos+1, node1.name)
else:
# find re-coal for new recomb
assert rtime <= thread[rpos][1], (rtime, thread[rpos][1])
if rleaves == [new_name]:
# recomb on new branch, coal given thread
cleaves, ctime = get_clade_point(
arg, thread[rpos+1][0], thread[rpos+1][1], rpos+.5)
assert ctime >= rtime, (rtime, ctime)
node1 = walk_up(arg2, rleaves, rtime, rpos+1)
node2 = walk_up(arg2, cleaves, ctime, rpos+1, new_name)
else:
# recomb in ARG, coal on new branch
cleaves = [new_name]
ctime = thread[rpos+1][1]
assert ctime >= rtime, (rtime, ctime)
# NOTE: new_name is not ignored for walk_up on rleaves
# because I do not want the recombination to be higher
# than the coal point, which could happen if the recomb time
# is the same as the current coal time.
node1 = walk_up(arg2, rleaves, rtime, rpos+1)
node2 = walk_up(arg2, cleaves, ctime, rpos+1, node1.name)
assert node1.parents
assert rtime <= ctime
recomb = add_node(arg2, node1, rtime, rpos, "recomb")
if node1 == node2:
node2 = recomb
coal = add_node(arg2, node2, ctime, rpos, "coal")
recomb.parents.append(coal)
coal.children.append(recomb)
node, time = get_coal_point(arg2, arg2[new_name], rpos+1)
assert time == thread[rpos+1][1], (time, thread[rpos+1][1])
return arg2
def arg_lca(arg, leaves, time, pos, ignore=None):
"""Returns Least Common Ancestor for leaves in an ARG at position 'pos'"""
def is_local_coal(arg, node, pos, local):
return (len(node.children) == 2 and
node.children[0] in local and
arg.get_local_parent(node.children[0], pos-.5) == node and
node.children[1] in local and
arg.get_local_parent(node.children[1], pos-.5) == node and
node.children[0] != node.children[1])
order = dict((node, i) for i, node in enumerate(
arg.postorder_marginal_tree(pos-.5)))
local = set(order.keys())
if ignore is not None and ignore in arg:
ptr = arg[ignore]
local.remove(ptr)
ptr = arg.get_local_parent(ptr, pos-.5)
while ptr and ptr in local:
if (len(ptr.children) == 2 and
((ptr.children[0] in local and
arg.get_local_parent(ptr.children[0], pos-.5) == ptr) or
(ptr.children[1] in local and
arg.get_local_parent(ptr.children[1], pos-.5) == ptr))):
break
local.remove(ptr)
ptr = arg.get_local_parent(ptr, pos-.5)
queue = [(order[arg[x]], arg[x]) for x in leaves]
seen = set(x[1] for x in queue)
heapq.heapify(queue)
while len(queue) > 1:
i, node = heapq.heappop(queue)
parent = arg.get_local_parent(node, pos-.5)
if parent and parent not in seen:
seen.add(parent)
heapq.heappush(queue, (order[parent], parent))
node = queue[0][1]
parent = arg.get_local_parent(node, pos-.5)
# walk up appropriate time if given
if time is not None:
while parent and parent.age <= time:
if is_local_coal(arg, parent, pos, local):
break
node = parent
parent = arg.get_local_parent(node, pos-.5)
if parent:
if parent.age < time:
print (leaves, parent.age, time)
tree = arg.get_marginal_tree(pos-.5).get_tree()
tree.write()
treelib.draw_tree_names(tree, maxlen=8, minlen=8)
assert False
return node
def find_recomb_coal(tree, last_tree, recomb_name=None, pos=None):
"""
Find the recomb and coal points for the SPR between two trees
Returns ((recomb_node_name, recomb_time), (coal_node_name, coal_time))
"""
if recomb_name is None:
recomb = find_tree_next_recomb(last_tree, pos-1, tree=True)
recomb_name = recomb.name
# find recomb node
recomb_node = tree[recomb_name]
recomb_time = recomb_node.age
# find re-coal point
coal = recomb_node.parents[0]
while coal.name not in last_tree and coal.parents:
coal = coal.parents[0]
coal_time = coal.age
# find coal branch in last_tree
if coal.name not in last_tree:
# coal above root
coal_branch = last_tree.root.name
else:
ptr = last_tree[coal.name]
while len(ptr.children) == 1:
ptr = ptr.children[0]
coal_branch = ptr.name
# find recomb branch in tree
recomb = tree[recomb_name]
while len(recomb.children) == 1:
recomb = recomb.children[0]
recomb_branch = recomb.name
return (recomb_branch, recomb_time), (coal_branch, coal_time)
def iter_arg_sprs(arg, start=None, end=None):
"""
Iterates through the SPRs of an ARG
Yields (block, tree, last_tree, spr)
where spr = (recomb_node, recomb_time, coal_node, coal_time)
"""
if start is None:
start = arg.start
if end is None:
end = arg.end
last_tree_full = None
last_tree = None
for block, tree_full in arglib.iter_local_trees(arg, start, end):
if last_tree_full:
recomb = (x for x in tree_full if x.pos == block[0]).next()
spr = find_recomb_coal(tree_full, last_tree_full,
recomb_name=recomb.name)
else:
spr = None
# get tree with only leaves and coalescent nodes
tree = tree_full.copy()
tree = arglib.remove_single_lineages(tree)
yield block, tree, last_tree, spr
last_tree_full = tree_full
last_tree = tree
def get_local_node_mapping(tree, last_tree, spr):
"""
Determine the mapping between nodes in local trees across ARG.
A maps across local trees until it is broken (parent of recomb node).
This method assumes tree and last_tree share the same node naming
and do not contain intermediary nodes (i.e. single lineages).
"""
if last_tree is None:
# no mapping if last_tree is None
return None
else:
(rname, rtime), (cname, ctime) = spr
# assert ARG is SMC-style (no bubbles)
assert rname != cname
# recomb_parent is broken and does not map to anyone
recomb_parent = last_tree[rname].parents[0]
mapping = dict((node.name, node.name) for node in last_tree)
mapping[recomb_parent.name] = None
return mapping
#=============================================================================
# probabilities
def prob_recomb(tree, state, nlineages, times, time_steps, rho, recomb_time):
nbranches, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.