file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
Lt.ts | {
public static Lt: Lang = {
// Side menu
menuMain: 'Pagrindinis',
menuOrganizations: 'Organizacijos',
menuProjects: 'Projektai',
menuSavedProjects: 'Išsaugoti projektai',
menuSelectedProjects: 'Pasirinkti projektai',
menuCreatedProjects: 'Sukurti projektai',
menuNewProject: 'Naujas projektas',
menuCalendar: 'Kalendorius',
menuAboutUs: 'Apie mus',
menuPrivacyPolicy: 'Privatumo politika',
menuLogin: 'Prisijungti',
menuRegistration: 'Registracija',
menuSettings: 'Parametrai',
menuMenu: 'Meniu',
menuLogout: 'Atsijungti',
menuExit: 'Išeiti',
// Main page
mainTitle: 'Savanoriavimo sistema',
mainEvents: 'Artimiausi projektai',
mainWantedOrganizations: 'Populiariausios organizacijos',
mainStart: 'Pradžia',
mainIntroVideo: 'Apie sistemą',
// About us page
aboutTitle: 'Trumpai apie mus',
aboutParag1: 'Platypus kiekvienam suteikia galimybę pasinerti į bendruomenę, dirbti kartu su vietos lyderiais ir spręsti svarbiausius iššūkius Lietuvoje. Mūsų misija - padėti organizacijoms visoje Lietuvoje, suteikiant jiems darbo jėgos ir lėšų, reikalingų kovoti su iššūkiais, su kuriais susiduria mūsų apskrities bendruomenės. Mes tikimės:',
aboutParag2: 'padėti organizacijoms, kurios tiesiogiai ar netiesiogiai paveikė žmonių gyvenimus',
aboutParag3: 'prisidėti prie kitų žmonių gyvenimo gerinimo',
aboutParag4: 'padėti gerinti aplinką',
aboutParag5: 'padėti mažiau pasisekusiems žmonėms ir mažumoms jaustis komandos dalimi ',
aboutParag6: 'Kaip pažiūrime į priežastis kodėl savanoriavimas yra svarbus, , kalbame ne tik apie poveikį, kurį galime padaryti mažiau pasisekusių žmonių gyvenime, bet ir vaidmenį, kurį savanoriavimas gali atlikti, kad taptūmėme tolerantiškesni asmenys ir taptumėte bendruomeneškesni. Kiekvienas iš mūsų turėtų būti pasiruošęs pasiūlyti savo laiką ir nieko nesitikėti atgal.',
aboutTitle2: 'Kodėl?',
aboutVisit: 'Apsilankyk ir sužinosi daugiau!',
aboutOpinion: 'Mums svarbu ką jūs manote!',
aboutButtonComment: 'Palikite komentarą!',
aboutButtonVideo: 'Žiūrėti vaizdo įrašą',
aboutEnterEmail: 'Įveskite el paštą',
aboutFeelFree: 'Laukiame jūsų nuomonės',
aboutComment: 'Jūsų komentaras',
aboutRequired: 'Šis laukas yra privalomas užpildyti',
aboutAlertCommentHeader: 'Ačiū',
aboutAlertCommentMessage: 'Jūsų laiškas sėkmingai išsiųstas',
aboutSourceHeader: 'Norite sužinoti daugiau?',
aboutSourceMessage: 'Paspauskite \'sutinku\' ir nuoroda atsidarys naujame lange',
// Alert cancel and comfirm
alertCancel: 'Atšaukti',
alertConfirm: 'Sutikti',
toastClose: 'Uždaryti',
// Login page
loginWelcome: 'Sveiki!',
loginFieldset: 'Prisijunkite',
loginUsername: 'Vartotojo vardas',
loginPassword: 'Slaptažodis',
loginForgot: 'Pamiršote slaptažodį?',
loginButton: 'Prisijungti',
loginNewHere: 'Pirmas kartas?',
loginSignUp: ' Registruokitės',
loginResetPasswordMessage: 'Įveskite savo el-pašto adresą ir mes jums atsiųsime slaptažodžio keitimo nuorodą ',
loginRequiredField: 'Šis laukas privalomas ir turi turėti bent 5 simbolius',
loginSuccessfulEmail: 'Laiškas buvo išsiųstas',
loginUnSuccessfulEmail: 'Laiško nepavyko išsiųsti',
loginWrongHeader: 'Prisijungti nepavyko',
loginWrongMessage: 'Neteisingi prisijungimo vardas ir slaptažodis!',
loginSuccessLogin: 'Sėkmingai prisijungėte svetainėje',
// Registration page
registrationTitle: 'Susikurkite vartotojo paskyrą',
registrationInformation: 'Jūsų informacija',
registrationUsername: 'Vartotojo vardas',
registrationEmail: 'El-paštas',
registrationType: 'Vartotojo vardas',
registrationVolunteer: 'Savanoris',
registrationOrganization: 'Organizacija',
registrationPassword: 'Slaptažodis',
registrationConfirm: 'Patvirtinkit slaptažodį',
registrationTerms: 'Sutinku su sąlygomis!',
registrationSingUp: 'Registruotis',
registrationRequiredMessage: 'Šis laukas privalomas ir turi turėti bent 5 simbolius',
registrationPasswordMisMatch: 'Slaptažodžiai nesutampa',
registrationHaveAnAccount: 'Aš jau prisiregistravęs!',
registrationValidEmail: 'Validus el-paštas yra būtinas',
registrationNotRegisteredHeader: 'Registracija nesėkminga',
registrationNotRegisteredMessage: 'Įvesti duomenys neteisingi',
registrationSuccess: 'Vartotojas sėkmingai priregistruotas',
registrationEmailTaken: 'Šis el. paštas jau paimtas!',
registrationUsernameTaken: 'Šis vartotojo vardas jau paimtas!',
// Projects page
projectsSearch: 'Paieška',
projectsArchive: 'ARCHYVAS',
projectsNew: 'NAUJI PROJEKTAI',
projectsUntil: 'until',
// Project page
projectEdit: 'Redaguoti projektą',
projectVolunteers: 'Savanoriai',
projectFind: 'Mus rasite',
projectSave: 'Išsaugoti',
projectsToVolunteer: 'Aš noriu savanoriauti',
projectSaved: 'Išsaugotas',
projectsYouVolunteer: 'Jūs esate savanoris',
projectStart: 'Projektas prasidės',
projectEnd: 'Projektas baigsis',
projectGoBack: 'Grįžti',
projectHeader: 'Projekto puslapis',
projectOpenInNewTab: 'Atidaryti naujame lange',
projectClipBoard: 'Nukopijuoti į iškarpinę',
// Volunteers page
volunteersHeader: 'Savanorių puslpapis',
volunteersYourVolunteers: 'Jūsų projekto savanoriai',
volunteersAll: 'Visi užsiregistravę vartotojai',
volunteersNone: 'Nėra informacijos',
volunteersAnonymousName: 'Anoniminis ',
volunteersAnonymousLast: 'vartotojas',
volunteersGoBack: 'Grįžti',
// Modal volunteer
modalVAnonymous: 'Anoniminis vartotojas',
modalVContatInfo: 'Kontaktinė informacija',
modalNone: 'Nėra informacijos',
modalVDescription: 'Aprašymas apie veiklas',
modalClose: 'Uždaryti',
// New project
newPojectHeader: 'Naujas projektas',
newPojectImage: 'Projekto paveikslėlis',
newPojectTitle: 'Pavadinimas',
newPojectEmail: 'El-pašto adresas',
newPojectPhone: 'Telefono numeris',
| Lt | identifier_name | |
Lt.ts | ugoti',
projectsToVolunteer: 'Aš noriu savanoriauti',
projectSaved: 'Išsaugotas',
projectsYouVolunteer: 'Jūs esate savanoris',
projectStart: 'Projektas prasidės',
projectEnd: 'Projektas baigsis',
projectGoBack: 'Grįžti',
projectHeader: 'Projekto puslapis',
projectOpenInNewTab: 'Atidaryti naujame lange',
projectClipBoard: 'Nukopijuoti į iškarpinę',
// Volunteers page | volunteersAll: 'Visi užsiregistravę vartotojai',
volunteersNone: 'Nėra informacijos',
volunteersAnonymousName: 'Anoniminis ',
volunteersAnonymousLast: 'vartotojas',
volunteersGoBack: 'Grįžti',
// Modal volunteer
modalVAnonymous: 'Anoniminis vartotojas',
modalVContatInfo: 'Kontaktinė informacija',
modalNone: 'Nėra informacijos',
modalVDescription: 'Aprašymas apie veiklas',
modalClose: 'Uždaryti',
// New project
newPojectHeader: 'Naujas projektas',
newPojectImage: 'Projekto paveikslėlis',
newPojectTitle: 'Pavadinimas',
newPojectEmail: 'El-pašto adresas',
newPojectPhone: 'Telefono numeris',
newPojectWebsite: 'Internetinis tinklapis',
newPojectDescription: 'Aprašymas',
newPojectStart: 'Pradžios data',
newPojectEnd: 'Pabaigos data',
newPojectLocation: 'Vietovės adresas',
newPojectCreate: 'Sukurti',
newPojectAlertNotHeader: 'Projektas nebuvo sukurtas',
newPojectAlertNotMessage: 'Prašome užpildykite tuščius laukus',
newPojectAlertOk: 'Gerai',
newProjectFailedDates: 'Jūsų projekto pabaigos data yra ankstesnė nei pradžios',
// Edit project
editProjectDelete: 'Ištrinti',
editProjectAlertEditHeader: 'Atnaujinimas nepavyko',
editProjectAlertEditMessage: 'Prašome užpildyti tuščius laukelius',
editHeader: 'Koreguoti projektą',
editTitle: 'Redaguoti pavadinimą',
editDescription: 'Redaguoti aprašą',
editChangeStart: 'Keisti pradžios laiką',
editChangeEnd: 'Keisti pabaigos laiką',
changeLocation: 'Keisti vietovę',
editSave: 'Išsaugoti pakeitimus',
editSucesfull: 'Sėkmingai išsaugota',
editConfirmDelete: 'Ar tikrai norite ištrinti šį projektą?',
yes: 'Taip',
no: 'Ne',
// registrationSuccess: 'Vartotojas sėkmingai priregistruotas',
// Copyright
cr1: 'Privatumo politika',
cr2: 'Ši privatumo politika apibūdina, kaip volunteering.ga naudoja ir saugo bet kokią mums suteiktą informaciją. Esame įsipareigoję užtikrinti, kad jūsų privatumas būtų apsaugotas. Jei pateikiate mums asmeninę informaciją per volunteering.ga, galite būti tikri, kad ji bus naudojama tik pagal šį privatumo pareiškimą.',
cr3: 'Svetainės lankytojai',
cr4: 'Kaip ir dauguma interneto svetainių operatorių, volunteering.ga renka ne asmeniškai identifikuojančią informaciją volunteering.ga taip pat renka potencialiai asmeniškai identifikuojančią informaciją, pvz., interneto protokolo (IP) adresus. volunteering.ga nenaudoja IP adresų, kad nustatytų savo lankytojus, ir neatskleidžia tokios informacijos, išskyrus tas pačias aplinkybes, kuriomis jis naudojasi ir atskleidžia asmeninę informaciją, kaip aprašyta toliau.',
cr5: 'Asmens identifikavimo informacijos rinkimas',
cr6: 'Tam tikri volunteering.ga lankytojai pasirenka sąveikauti su volunteering.ga tokiu būdu, kuris reikalauja volunteering.ga informacijos rinkimo. Informacijos, kurią volunteering.ga renka, kiekis ir rūšis priklauso nuo sąveikos pobūdžio. Pvz., Prašome lankytojų, kurie naudojasi mūsų paslaugomis, pateikti naudotojo vardą ir el. Pašto adresą.',
cr7: 'Kiekvienu atveju volunteering.ga renka tokią informaciją tik tiek, kiek būtina ar tikslinga, siekiant įvykdyti lankytojo sąveiką su volunteering.ga. volunteering.ga neatskleidžia asmeniškai identifikuojančios informacijos, išskyrus žemiau aprašytą informaciją. Be to, lankytojai visada gali atsisakyti pateikti asmeniškai identifikuojančią informaciją, tačiau įspėjimas, kad jis gali užkirsti kelią tam tikrai su svetaine susijusiai veiklai, pavyzdžiui, peržiūrėti naujausius svetainės pasiūlymus.',
cr8: 'Visa informacija, surinkta apie savanorišką veiklą, bus tvarkoma pagal GDPR teisės aktus.',
cr9: 'Tam tikros asmeniškai identifikuojančios informacijos apsauga',
cr10: 'volunteering.ga atskleidžia potencialiai asmeniškai identifikuojančią ir asmeniškai identifikuojančią informaciją tik projektų administratorių, darbuotojų, rangovų ir susijusių organizacijų informacijai, kuri (i) turi žinoti šią informaciją, kad galėtų ją apdoroti savanoriškoje veikloje. ir (ii) kurie sutiko neatskleisti jos kitiems. ',
cr11: 'volunteering.ga niekam neišnuomos ar parduos asmeniškai identifikuojančios ir asmeniškai identifikuojančios informacijos. Išskyrus projekto administratoriams, darbuotojams, rangovams ir susijusioms organizacijoms, kaip aprašyta aukščiau, savanorystė.ga atskleidžia potencialiai asmeniškai identifikuojančią ir asmeniškai identifikuojančią informaciją tik tada, kai to reikalauja įstatymas, jei suteikiate leidimą bendrai naudoti informaciją, arba, kai savanoriška veikla patikima, kad atskleidimas pagrįstai būtinas savanoriškos veiklos, trečiųjų šalių ar plačiosios visuomenės turtui ar teisėms apsaugoti.',
cr12: 'Jei esate registruotas savanoriškos svetainės interneto svetainės vartotojas ir pateikėte savo el. Pašto adresą, volunteering.ga kartais gali atsiųsti jums el. Laišką, kuriame jums pasakys apie naujas funkcijas, prašyti atsiliepimų, arba tiesiog atnaujinti informaciją apie tai, | volunteersHeader: 'Savanorių puslpapis',
volunteersYourVolunteers: 'Jūsų projekto savanoriai', | random_line_split |
card.go |
func GetHand(n int, deck Deck) (Hand, Deck) {
var hand Hand
cards := deck[:n]
hand = append(hand, cards...)
return hand, shuffle(deck[n:])
}
func Less(cards []Card) func(i, j int) bool {
return func(i, j int) bool {
return cards[i].Rank < cards[j].Rank
}
}
func areConsecutive(cards []Card) bool {
for i := 0; i < len(cards)-1; i++ {
if cards[i+1].Rank-cards[i].Rank != 1 {
return false
}
}
return true
}
func intersection(a, b []Card) []Card { //NOTE: pass the smaller slice in `a`
m := make(map[Card]bool)
c := []Card(nil)
for _, item := range a {
m[item] = true
}
for _, item := range b {
if _, ok := m[item]; !ok {
c = append(c, item)
}
}
return c
}
func nOfSameSuit(h Hand, n int) ([]Card, bool) {
m := make(map[Suit][]Card)
for i := len(h) - 1; i >= 0; i-- {
m[h[i].Suit] = append(m[h[i].Suit], h[i])
if len(m[h[i].Suit]) == n {
sort.Slice(m[h[i].Suit], Less(m[h[i].Suit]))
return m[h[i].Suit], true
}
}
return nil, false
}
func nOfSameRank(h Hand, n int) ([]Card, bool) {
m := make(map[Rank][]Card)
for i := len(h) - 1; i >= 0; i-- {
m[h[i].Rank] = append(m[h[i].Rank], h[i])
if len(m[h[i].Rank]) == n {
sort.Slice(m[h[i].Rank], Less(m[h[i].Rank]))
return m[h[i].Rank], true
}
}
return nil, false
}
func nPair(h Hand, n int) ([]Card, bool) {
ret := []Card(nil)
cnt := 0
for i := len(h) - 1; i >= 1; i-- {
if h[i].Rank == h[i-1].Rank {
cnt++
ret = append(ret, h[i], h[i-1])
i--
if cnt == n {
return ret, true
}
} else {
cnt = 0
ret = nil
}
}
return nil, false
}
func normalizedScore(cards []Card, n int) float64 {
cardScore, normalizer := float64(0), float64(0)
for i, c := range cards {
cardScore += (math.Pow(13, float64(i)) * (float64(c.Rank)))
normalizer += math.Pow(13, float64(i)) * 14
}
return float64((cardScore * float64(n)) / normalizer)
}
func (h Hand) contains(card Card) bool {
for _, c := range h {
if (c.Suit == card.Suit) && (c.Rank == card.Rank) {
return true
}
}
return false
}
func nonRepeatingCards(h Hand) Hand {
var ret Hand
m := make(map[Rank]bool)
for _, card := range h {
if _, ok := m[card.Rank]; !ok {
ret = append(ret, card)
m[card.Rank] = true
}
}
return ret
}
func checkRoyalFlushForSuit(h Hand, s Suit) ([]Card, bool) {
if h.contains(Card{Suit: s, Rank: Ten}) && h.contains(Card{Suit: s, Rank: Jack}) && h.contains(Card{Suit: s, Rank: Queen}) && h.contains(Card{Suit: s, Rank: King}) && h.contains(Card{Suit: s, Rank: Ace}) {
return []Card{
Card{Suit: s, Rank: Ten},
Card{Suit: s, Rank: Jack},
Card{Suit: s, Rank: Queen},
Card{Suit: s, Rank: King},
Card{Suit: s, Rank: Ace},
}, true
}
return []Card(nil), false
}
//900-RoyalFlush
func isRoyalFlush(h Hand) ([]Card, bool) {
for suit := minSuit; suit < maxSuit; suit++ {
if cards, ok := checkRoyalFlushForSuit(h, suit); ok {
return cards, true
}
}
return nil, false
}
//800-StraightFlush
func isStraightFlush(h Hand) ([]Card, bool) {
if cards, ok := nOfSameSuit(h, 5); ok {
if areConsecutive(cards) {
return cards, true
} else if cards[len(cards)-1].Rank == Ace && cards[0].Rank == Two && areConsecutive(cards[0:4]) {
return cards, true
}
}
return nil, false
}
//700-FourOfAKind
func isFourOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 4)
}
//600-FullHouse
func isFullHouse(h Hand) ([]Card, []Card, bool) {
cards3, ok3 := nOfSameRank(h, 3)
h = intersection(cards3, h)
cards2, ok2 := nOfSameRank(h, 2)
if ok2 && ok3 {
return cards3, cards2, true
}
return nil, nil, false
}
//500-Flush
func isFlush(h Hand) ([]Card, bool) {
return nOfSameSuit(h, 5)
}
//400-Straight
func isStraight(h Hand) ([]Card, bool) {
//first form slice of non-repeating cards
h = nonRepeatingCards(h)
//check size >= 5; if yes take the last 5 elements
if len(h) < 5 {
return nil, false
}
//check for Ace-case
if h[len(h)-1].Rank == Ace && h[0].Rank == Two && areConsecutive((h[:4])) {
return append([]Card{h[len(h)-1]}, h[0:4]...), true
}
//return the max possible hand
h = h[len(h)-5:]
//check if areConsecutive
if areConsecutive(h) {
return h, true
}
return nil, false
}
//300-ThreeOfAKind
func isThreeOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 3)
}
//200-TwoPair
func isTwoPair(h Hand) ([]Card, []Card, bool) {
cards1, ok1 := nPair(h, 1)
h = intersection(cards1, h)
cards2, ok2 := nPair(h, 1)
if ok1 && ok2 {
return cards1, cards2, true
}
return nil, nil, false
}
//100-OnePair
func isOnePair(h Hand) ([]Card, bool) {
if cards, ok := nPair(h, 1); ok {
return cards, true
}
return nil, false
}
func Score(p Player) []Result {
sort.Slice(p.PocketCards, Less(p.PocketCards))
sort.Slice(p.CommunityCards, Less(p.CommunityCards))
ans := float64(0)
result := []Result(nil)
if cards, ok := isRoyalFlush(p.CommunityCards); ok {
ans = valuehandType[RoyalFlush]
result = append(result, Result{
HandType: symbolhandType[RoyalFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isStraightFlush(p.CommunityCards); ok {
ans = valuehandType[StraightFlush] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[StraightFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isFourOfAKind(p.CommunityCards); ok {
ans = valuehandType[FourOfAKind] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[FourOfAKind],
Score: ans,
Cards: cards,
})
}
if cards3, cards2, ok := isFullHouse(p.CommunityCards); ok {
ans = valuehandType[FullHouse] + normalizedScore(cards3, 60) + normalizedScore(cards2, 30)
handCards := []Card(nil)
handCards = append(handCards, cards3...)
handCards = append(handCards, cards2...)
result = append(result, Result{
HandType: symbolhandType[FullHouse],
Score: ans,
Cards: handCards,
})
}
if cards | for i, v := range perm {
newD[v] = d[i]
}
return newD
} | random_line_split | |
card.go | Hand) contains(card Card) bool {
for _, c := range h {
if (c.Suit == card.Suit) && (c.Rank == card.Rank) {
return true
}
}
return false
}
func nonRepeatingCards(h Hand) Hand {
var ret Hand
m := make(map[Rank]bool)
for _, card := range h {
if _, ok := m[card.Rank]; !ok {
ret = append(ret, card)
m[card.Rank] = true
}
}
return ret
}
func checkRoyalFlushForSuit(h Hand, s Suit) ([]Card, bool) {
if h.contains(Card{Suit: s, Rank: Ten}) && h.contains(Card{Suit: s, Rank: Jack}) && h.contains(Card{Suit: s, Rank: Queen}) && h.contains(Card{Suit: s, Rank: King}) && h.contains(Card{Suit: s, Rank: Ace}) {
return []Card{
Card{Suit: s, Rank: Ten},
Card{Suit: s, Rank: Jack},
Card{Suit: s, Rank: Queen},
Card{Suit: s, Rank: King},
Card{Suit: s, Rank: Ace},
}, true
}
return []Card(nil), false
}
//900-RoyalFlush
func isRoyalFlush(h Hand) ([]Card, bool) {
for suit := minSuit; suit < maxSuit; suit++ {
if cards, ok := checkRoyalFlushForSuit(h, suit); ok {
return cards, true
}
}
return nil, false
}
//800-StraightFlush
func isStraightFlush(h Hand) ([]Card, bool) {
if cards, ok := nOfSameSuit(h, 5); ok {
if areConsecutive(cards) {
return cards, true
} else if cards[len(cards)-1].Rank == Ace && cards[0].Rank == Two && areConsecutive(cards[0:4]) {
return cards, true
}
}
return nil, false
}
//700-FourOfAKind
func isFourOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 4)
}
//600-FullHouse
func isFullHouse(h Hand) ([]Card, []Card, bool) {
cards3, ok3 := nOfSameRank(h, 3)
h = intersection(cards3, h)
cards2, ok2 := nOfSameRank(h, 2)
if ok2 && ok3 {
return cards3, cards2, true
}
return nil, nil, false
}
//500-Flush
func isFlush(h Hand) ([]Card, bool) {
return nOfSameSuit(h, 5)
}
//400-Straight
func isStraight(h Hand) ([]Card, bool) {
//first form slice of non-repeating cards
h = nonRepeatingCards(h)
//check size >= 5; if yes take the last 5 elements
if len(h) < 5 {
return nil, false
}
//check for Ace-case
if h[len(h)-1].Rank == Ace && h[0].Rank == Two && areConsecutive((h[:4])) {
return append([]Card{h[len(h)-1]}, h[0:4]...), true
}
//return the max possible hand
h = h[len(h)-5:]
//check if areConsecutive
if areConsecutive(h) {
return h, true
}
return nil, false
}
//300-ThreeOfAKind
func isThreeOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 3)
}
//200-TwoPair
func isTwoPair(h Hand) ([]Card, []Card, bool) {
cards1, ok1 := nPair(h, 1)
h = intersection(cards1, h)
cards2, ok2 := nPair(h, 1)
if ok1 && ok2 {
return cards1, cards2, true
}
return nil, nil, false
}
//100-OnePair
func isOnePair(h Hand) ([]Card, bool) {
if cards, ok := nPair(h, 1); ok {
return cards, true
}
return nil, false
}
func Score(p Player) []Result {
sort.Slice(p.PocketCards, Less(p.PocketCards))
sort.Slice(p.CommunityCards, Less(p.CommunityCards))
ans := float64(0)
result := []Result(nil)
if cards, ok := isRoyalFlush(p.CommunityCards); ok {
ans = valuehandType[RoyalFlush]
result = append(result, Result{
HandType: symbolhandType[RoyalFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isStraightFlush(p.CommunityCards); ok {
ans = valuehandType[StraightFlush] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[StraightFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isFourOfAKind(p.CommunityCards); ok {
ans = valuehandType[FourOfAKind] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[FourOfAKind],
Score: ans,
Cards: cards,
})
}
if cards3, cards2, ok := isFullHouse(p.CommunityCards); ok {
ans = valuehandType[FullHouse] + normalizedScore(cards3, 60) + normalizedScore(cards2, 30)
handCards := []Card(nil)
handCards = append(handCards, cards3...)
handCards = append(handCards, cards2...)
result = append(result, Result{
HandType: symbolhandType[FullHouse],
Score: ans,
Cards: handCards,
})
}
if cards, ok := isFlush(p.CommunityCards); ok {
ans = valuehandType[Flush] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[Flush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isStraight(p.CommunityCards); ok {
ans = valuehandType[Straight] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[Straight],
Score: ans,
Cards: cards,
})
}
if cards, ok := isThreeOfAKind(p.CommunityCards); ok {
ans = valuehandType[ThreeOfAKind] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[ThreeOfAKind],
Score: ans,
Cards: cards,
})
}
if cards1, cards2, ok := isTwoPair(p.CommunityCards); ok {
ans = valuehandType[TwoPair] + normalizedScore(cards1, 45) + normalizedScore(cards2, 45)
handCards := []Card(nil)
handCards = append(handCards, cards1...)
handCards = append(handCards, cards2...)
result = append(result, Result{
HandType: symbolhandType[TwoPair],
Score: ans,
Cards: handCards,
})
}
if cards, ok := isOnePair(p.CommunityCards); ok {
ans = valuehandType[OnePair] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[OnePair],
Score: ans,
Cards: cards,
})
}
// ans += normalizedScore(p.PocketCards, 10)
ans = normalizedScore(p.PocketCards, 10) //Change of approach, might be reverted
result = append(result, Result{
HandType: symbolhandType[HighCard],
Score: ans,
Cards: []Card{p.PocketCards[1]},
})
result = append(result, Result{
HandType: symbolhandType[HighCard],
Score: ans,
Cards: []Card{p.PocketCards[0]},
})
return result
}
func GetWinner(p1, p2 Player, table Hand) (int, string, []Card) {
p1.CommunityCards = append(p1.PocketCards, table...)
p2.CommunityCards = append(p2.PocketCards, table...)
r1 := Score(p1)
r2 := Score(p2)
for r1[0].Score == r2[0].Score && len(r1) > 0 && len(r2) > 0 {
r1 = r1[1:]
r2 = r2[1:]
}
if r1[0].Score > r2[0].Score {
retu | rn 1, r1[0].HandType, r1[0].Cards
} else {
| conditional_block | |
card.go | ...)
return hand, shuffle(deck[n:])
}
func Less(cards []Card) func(i, j int) bool {
return func(i, j int) bool {
return cards[i].Rank < cards[j].Rank
}
}
func areConsecutive(cards []Card) bool {
for i := 0; i < len(cards)-1; i++ {
if cards[i+1].Rank-cards[i].Rank != 1 {
return false
}
}
return true
}
func intersection(a, b []Card) []Card { //NOTE: pass the smaller slice in `a`
m := make(map[Card]bool)
c := []Card(nil)
for _, item := range a {
m[item] = true
}
for _, item := range b {
if _, ok := m[item]; !ok {
c = append(c, item)
}
}
return c
}
func nOfSameSuit(h Hand, n int) ([]Card, bool) {
m := make(map[Suit][]Card)
for i := len(h) - 1; i >= 0; i-- {
m[h[i].Suit] = append(m[h[i].Suit], h[i])
if len(m[h[i].Suit]) == n {
sort.Slice(m[h[i].Suit], Less(m[h[i].Suit]))
return m[h[i].Suit], true
}
}
return nil, false
}
func nOfSameRank(h Hand, n int) ([]Card, bool) {
m := make(map[Rank][]Card)
for i := len(h) - 1; i >= 0; i-- {
m[h[i].Rank] = append(m[h[i].Rank], h[i])
if len(m[h[i].Rank]) == n {
sort.Slice(m[h[i].Rank], Less(m[h[i].Rank]))
return m[h[i].Rank], true
}
}
return nil, false
}
func nPair(h Hand, n int) ([]Card, bool) {
ret := []Card(nil)
cnt := 0
for i := len(h) - 1; i >= 1; i-- {
if h[i].Rank == h[i-1].Rank {
cnt++
ret = append(ret, h[i], h[i-1])
i--
if cnt == n {
return ret, true
}
} else {
cnt = 0
ret = nil
}
}
return nil, false
}
func normalizedScore(cards []Card, n int) float64 {
cardScore, normalizer := float64(0), float64(0)
for i, c := range cards {
cardScore += (math.Pow(13, float64(i)) * (float64(c.Rank)))
normalizer += math.Pow(13, float64(i)) * 14
}
return float64((cardScore * float64(n)) / normalizer)
}
func (h Hand) contains(card Card) bool {
for _, c := range h {
if (c.Suit == card.Suit) && (c.Rank == card.Rank) {
return true
}
}
return false
}
func nonRepeatingCards(h Hand) Hand {
var ret Hand
m := make(map[Rank]bool)
for _, card := range h {
if _, ok := m[card.Rank]; !ok {
ret = append(ret, card)
m[card.Rank] = true
}
}
return ret
}
func checkRoyalFlushForSuit(h Hand, s Suit) ([]Card, bool) {
if h.contains(Card{Suit: s, Rank: Ten}) && h.contains(Card{Suit: s, Rank: Jack}) && h.contains(Card{Suit: s, Rank: Queen}) && h.contains(Card{Suit: s, Rank: King}) && h.contains(Card{Suit: s, Rank: Ace}) {
return []Card{
Card{Suit: s, Rank: Ten},
Card{Suit: s, Rank: Jack},
Card{Suit: s, Rank: Queen},
Card{Suit: s, Rank: King},
Card{Suit: s, Rank: Ace},
}, true
}
return []Card(nil), false
}
//900-RoyalFlush
func isRoyalFlush(h Hand) ([]Card, bool) {
for suit := minSuit; suit < maxSuit; suit++ {
if cards, ok := checkRoyalFlushForSuit(h, suit); ok {
return cards, true
}
}
return nil, false
}
//800-StraightFlush
func isStraightFlush(h Hand) ([]Card, bool) {
if cards, ok := nOfSameSuit(h, 5); ok {
if areConsecutive(cards) {
return cards, true
} else if cards[len(cards)-1].Rank == Ace && cards[0].Rank == Two && areConsecutive(cards[0:4]) {
return cards, true
}
}
return nil, false
}
//700-FourOfAKind
func isFourOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 4)
}
//600-FullHouse
func isFullHo | ([]Card, []Card, bool) {
cards3, ok3 := nOfSameRank(h, 3)
h = intersection(cards3, h)
cards2, ok2 := nOfSameRank(h, 2)
if ok2 && ok3 {
return cards3, cards2, true
}
return nil, nil, false
}
//500-Flush
func isFlush(h Hand) ([]Card, bool) {
return nOfSameSuit(h, 5)
}
//400-Straight
func isStraight(h Hand) ([]Card, bool) {
//first form slice of non-repeating cards
h = nonRepeatingCards(h)
//check size >= 5; if yes take the last 5 elements
if len(h) < 5 {
return nil, false
}
//check for Ace-case
if h[len(h)-1].Rank == Ace && h[0].Rank == Two && areConsecutive((h[:4])) {
return append([]Card{h[len(h)-1]}, h[0:4]...), true
}
//return the max possible hand
h = h[len(h)-5:]
//check if areConsecutive
if areConsecutive(h) {
return h, true
}
return nil, false
}
//300-ThreeOfAKind
func isThreeOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 3)
}
//200-TwoPair
func isTwoPair(h Hand) ([]Card, []Card, bool) {
cards1, ok1 := nPair(h, 1)
h = intersection(cards1, h)
cards2, ok2 := nPair(h, 1)
if ok1 && ok2 {
return cards1, cards2, true
}
return nil, nil, false
}
//100-OnePair
func isOnePair(h Hand) ([]Card, bool) {
if cards, ok := nPair(h, 1); ok {
return cards, true
}
return nil, false
}
func Score(p Player) []Result {
sort.Slice(p.PocketCards, Less(p.PocketCards))
sort.Slice(p.CommunityCards, Less(p.CommunityCards))
ans := float64(0)
result := []Result(nil)
if cards, ok := isRoyalFlush(p.CommunityCards); ok {
ans = valuehandType[RoyalFlush]
result = append(result, Result{
HandType: symbolhandType[RoyalFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isStraightFlush(p.CommunityCards); ok {
ans = valuehandType[StraightFlush] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[StraightFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isFourOfAKind(p.CommunityCards); ok {
ans = valuehandType[FourOfAKind] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[FourOfAKind],
Score: ans,
Cards: cards,
})
}
if cards3, cards2, ok := isFullHouse(p.CommunityCards); ok {
ans = valuehandType[FullHouse] + normalizedScore(cards3, 60) + normalizedScore(cards2, 30)
handCards := []Card(nil)
handCards = append(handCards, cards3...)
handCards = append(handCards, cards2...)
result = append(result, Result{
HandType: symbolhandType[FullHouse],
Score: ans,
Cards: handCards,
})
}
if cards, ok := isFlush(p.CommunityCards); ok {
ans = valuehandType[Flush] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[Flush],
Score: ans,
| use(h Hand) | identifier_name |
card.go | ...)
return hand, shuffle(deck[n:])
}
func Less(cards []Card) func(i, j int) bool {
return func(i, j int) bool {
return cards[i].Rank < cards[j].Rank
}
}
func areConsecutive(cards []Card) bool {
for i := 0; i < len(cards)-1; i++ {
if cards[i+1].Rank-cards[i].Rank != 1 {
return false
}
}
return true
}
func intersection(a, b []Card) []Card { //NOTE: pass the smaller slice in `a`
m := make(map[Card]bool)
c := []Card(nil)
for _, item := range a {
m[item] = true
}
for _, item := range b {
if _, ok := m[item]; !ok {
c = append(c, item)
}
}
return c
}
func nOfSameSuit(h Hand, n int) ([]Card, bool) {
m := | OfSameRank(h Hand, n int) ([]Card, bool) {
m := make(map[Rank][]Card)
for i := len(h) - 1; i >= 0; i-- {
m[h[i].Rank] = append(m[h[i].Rank], h[i])
if len(m[h[i].Rank]) == n {
sort.Slice(m[h[i].Rank], Less(m[h[i].Rank]))
return m[h[i].Rank], true
}
}
return nil, false
}
func nPair(h Hand, n int) ([]Card, bool) {
ret := []Card(nil)
cnt := 0
for i := len(h) - 1; i >= 1; i-- {
if h[i].Rank == h[i-1].Rank {
cnt++
ret = append(ret, h[i], h[i-1])
i--
if cnt == n {
return ret, true
}
} else {
cnt = 0
ret = nil
}
}
return nil, false
}
func normalizedScore(cards []Card, n int) float64 {
cardScore, normalizer := float64(0), float64(0)
for i, c := range cards {
cardScore += (math.Pow(13, float64(i)) * (float64(c.Rank)))
normalizer += math.Pow(13, float64(i)) * 14
}
return float64((cardScore * float64(n)) / normalizer)
}
func (h Hand) contains(card Card) bool {
for _, c := range h {
if (c.Suit == card.Suit) && (c.Rank == card.Rank) {
return true
}
}
return false
}
func nonRepeatingCards(h Hand) Hand {
var ret Hand
m := make(map[Rank]bool)
for _, card := range h {
if _, ok := m[card.Rank]; !ok {
ret = append(ret, card)
m[card.Rank] = true
}
}
return ret
}
func checkRoyalFlushForSuit(h Hand, s Suit) ([]Card, bool) {
if h.contains(Card{Suit: s, Rank: Ten}) && h.contains(Card{Suit: s, Rank: Jack}) && h.contains(Card{Suit: s, Rank: Queen}) && h.contains(Card{Suit: s, Rank: King}) && h.contains(Card{Suit: s, Rank: Ace}) {
return []Card{
Card{Suit: s, Rank: Ten},
Card{Suit: s, Rank: Jack},
Card{Suit: s, Rank: Queen},
Card{Suit: s, Rank: King},
Card{Suit: s, Rank: Ace},
}, true
}
return []Card(nil), false
}
//900-RoyalFlush
func isRoyalFlush(h Hand) ([]Card, bool) {
for suit := minSuit; suit < maxSuit; suit++ {
if cards, ok := checkRoyalFlushForSuit(h, suit); ok {
return cards, true
}
}
return nil, false
}
//800-StraightFlush
func isStraightFlush(h Hand) ([]Card, bool) {
if cards, ok := nOfSameSuit(h, 5); ok {
if areConsecutive(cards) {
return cards, true
} else if cards[len(cards)-1].Rank == Ace && cards[0].Rank == Two && areConsecutive(cards[0:4]) {
return cards, true
}
}
return nil, false
}
//700-FourOfAKind
func isFourOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 4)
}
//600-FullHouse
func isFullHouse(h Hand) ([]Card, []Card, bool) {
cards3, ok3 := nOfSameRank(h, 3)
h = intersection(cards3, h)
cards2, ok2 := nOfSameRank(h, 2)
if ok2 && ok3 {
return cards3, cards2, true
}
return nil, nil, false
}
//500-Flush
func isFlush(h Hand) ([]Card, bool) {
return nOfSameSuit(h, 5)
}
//400-Straight
func isStraight(h Hand) ([]Card, bool) {
//first form slice of non-repeating cards
h = nonRepeatingCards(h)
//check size >= 5; if yes take the last 5 elements
if len(h) < 5 {
return nil, false
}
//check for Ace-case
if h[len(h)-1].Rank == Ace && h[0].Rank == Two && areConsecutive((h[:4])) {
return append([]Card{h[len(h)-1]}, h[0:4]...), true
}
//return the max possible hand
h = h[len(h)-5:]
//check if areConsecutive
if areConsecutive(h) {
return h, true
}
return nil, false
}
//300-ThreeOfAKind
func isThreeOfAKind(h Hand) ([]Card, bool) {
return nOfSameRank(h, 3)
}
//200-TwoPair
func isTwoPair(h Hand) ([]Card, []Card, bool) {
cards1, ok1 := nPair(h, 1)
h = intersection(cards1, h)
cards2, ok2 := nPair(h, 1)
if ok1 && ok2 {
return cards1, cards2, true
}
return nil, nil, false
}
//100-OnePair
func isOnePair(h Hand) ([]Card, bool) {
if cards, ok := nPair(h, 1); ok {
return cards, true
}
return nil, false
}
func Score(p Player) []Result {
sort.Slice(p.PocketCards, Less(p.PocketCards))
sort.Slice(p.CommunityCards, Less(p.CommunityCards))
ans := float64(0)
result := []Result(nil)
if cards, ok := isRoyalFlush(p.CommunityCards); ok {
ans = valuehandType[RoyalFlush]
result = append(result, Result{
HandType: symbolhandType[RoyalFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isStraightFlush(p.CommunityCards); ok {
ans = valuehandType[StraightFlush] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[StraightFlush],
Score: ans,
Cards: cards,
})
}
if cards, ok := isFourOfAKind(p.CommunityCards); ok {
ans = valuehandType[FourOfAKind] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[FourOfAKind],
Score: ans,
Cards: cards,
})
}
if cards3, cards2, ok := isFullHouse(p.CommunityCards); ok {
ans = valuehandType[FullHouse] + normalizedScore(cards3, 60) + normalizedScore(cards2, 30)
handCards := []Card(nil)
handCards = append(handCards, cards3...)
handCards = append(handCards, cards2...)
result = append(result, Result{
HandType: symbolhandType[FullHouse],
Score: ans,
Cards: handCards,
})
}
if cards, ok := isFlush(p.CommunityCards); ok {
ans = valuehandType[Flush] + normalizedScore(cards, 90)
result = append(result, Result{
HandType: symbolhandType[Flush],
Score: ans,
| make(map[Suit][]Card)
for i := len(h) - 1; i >= 0; i-- {
m[h[i].Suit] = append(m[h[i].Suit], h[i])
if len(m[h[i].Suit]) == n {
sort.Slice(m[h[i].Suit], Less(m[h[i].Suit]))
return m[h[i].Suit], true
}
}
return nil, false
}
func n | identifier_body |
loading.rs | const CARD_TITLE_FONT: &'static str = "Teko-Regular.ttf";
pub const CARD_BACKGROUND_IMG: &'static str = "card_bg.png";
#[derive(Derivative, Default)]
#[derivative(Debug)]
pub struct Assets {
#[derivative(Debug = "ignore")]
pub fonts: HashMap<String, Box<Font>>, // we borrow fonts to create new data: there's no reason to hold it
pub images: HashMap<String, Rc<Image>>, // UI cards do hold reference to images
}
type CardFactory = HashMap<String, Card>;
fn cards_by_counts(factory: &CardFactory, counts: HashMap<String, usize>) -> Vec<Card> {
counts.iter()
.flat_map(|(key, num)| iter::repeat(key).take(*num))
.filter_map(|key| factory.get(key))
.cloned()
.collect()
}
fn | (json: &serde_json::value::Value, node_name: &str, card_factory: &CardFactory) -> Deck {
let deck_node = {
json.get(node_name)
.expect(format!("Deck node \"{}\" not found", node_name).as_str())
.clone()
};
let data: HashMap<String, usize> = serde_json::from_value(deck_node)
.expect("Malformed deck list");
let mut deck = Deck::from(cards_by_counts(card_factory, data));
deck.shuffle();
deck
}
fn parse_store(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> Store {
let source_node = json.get(node).expect(format!("store node {} not found", node).as_str()).clone();
let store_type: StoreType = serde_json::from_value(source_node).expect("Malformed store description");
match store_type.clone() {
StoreType::Fixed { items } => {
let cards = items.iter()
.filter_map(|name| factory.get(name))
.map(|card| card.clone())
.collect();
Store {
store_type: store_type,
menu: CardContainer { zone: zone, cards: cards, size: None },
deck: None,
}
}
StoreType::Drafted { size, from_deck } => {
let mut deck = parse_deck(json, &from_deck, factory);
deck.shuffle();
let cards = (0..size).filter_map(|_| deck.draw()).collect();
Store {
store_type: store_type,
menu: CardContainer { zone: zone, cards: cards, size: Some(size) },
deck: Some(Box::new(deck)),
}
}
}
}
fn container_counts(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> CardContainer {
let source_node = json.get(node).expect(format!("count node {} not found", node).as_str()).clone();
let data: HashMap<String, usize> = serde_json::from_value(source_node)
.expect("Malformed node");
CardContainer {
zone: zone,
cards: cards_by_counts(factory, data),
size: None,
}
}
pub fn load_players(json: &serde_json::Value) -> Vec<Player> {
let player_node = json.get("players")
.expect("file should have \"players\" node.")
.clone();
let mut players: Vec<Player> = serde_json::from_value(player_node)
.expect("Malformed player node");
let game_type = json.get("game_type")
.expect("game type not specified")
.as_str()
.expect("game type not string");
match game_type.to_lowercase().as_str() {
"vs" => {
assert_eq!(players.len(), 2, "For VS game, only 2 players are possible");
players[0].opponent_idx = 1;
players[1].opponent_idx = 0;
},
_ => panic!("Unknown game type")
}
players
}
pub fn load_board(json: &serde_json::Value, card_factory: &CardFactory, player: Player) -> BoardState {
let store_node = "build_store";
let trade_row = "kaiju_store";
let hand_size = 5;
let draw_deck = parse_deck(&json, &player.starting_deck, card_factory);
//let bs_node = { json.get("build_store").expect("build_store node not found").clone() };
let build_store = parse_store(BoardZone::BuildStore, &json, store_node, card_factory);
//let ks_node = { json.get("kaiju_store").expect("kaiju_store node not found").clone() };
let kaiju_store = parse_store(BoardZone::KaijuStore, &json, trade_row, card_factory);
let hand = CardContainer::new_sized(BoardZone::Hand, hand_size);
let buildings = container_counts(BoardZone::Buildings, &json, &player.starting_buildings, card_factory);
let kaiju = CardContainer::new(BoardZone::Kaiju);
let ai = match player.control {
PlayerControl::Human => None,
PlayerControl::AI => Some(AI::new())
};
println!("Loading done");
BoardState {
player: player,
turn: 1,
hand: Box::new(hand),
deck: Box::new(draw_deck),
globals: NumberMap::new(),
stores: Box::new(vec!(build_store, kaiju_store)),
buildings: Box::new(buildings),
kaiju_zone: Box::new(kaiju),
ai: ai
}
}
/// Loading state: loads all assets to memory and passes them to GameplayState.
///
/// The asset loading in Quicksilver (as described in tutorial) is awkward: it requires conditional
/// execution whenever any asset is used. As we don't have large amount of data, it is more ergonomic
/// to just load them all to RAM and use them directly.
///
/// Loading in Quicksilver is internally done using Futures (that can, but don't have to
/// be wrapped in Assets). Futures can be nested using combinators (that themselves are Futures).
/// Every Future has a poll() method that returns Async::NotReady when it is not yet done, and
/// Async::Ready when its data are ready (i. e. loading is done).
/// It must not be called afterwards: it would panic.
///
/// It turns out this is perfect fit for our application: we combine all assets into single Future,
/// hook it into our event loop, polling it every update, while drawing a loading screen. When it
/// becomes ready, we construct a new State, pass it all the assets extracted from the Future and continue.
///
/// Sadly, it is complicated by the fact that Quicksilver re-exports Future trait and combinators, but
/// not the Async enum. As this enum comes from "futures" crate, we just install it in the exact same
/// version that Quicksilver uses and use that.
#[derive(Derivative, Default)]
#[derivative(Debug)]
pub struct LoadingState {
board_states: Vec<BoardState>,
image_names: Vec<String>,
font_names: Vec<String>,
#[derivative(Debug = "ignore")]
// Option just to get Default
loading: Option<
Join<
JoinAll<
Vec<Box<dyn Future<Item=Font, Error=QuicksilverError>>>
>,
JoinAll<
Vec<Box<dyn Future<Item=Image, Error=QuicksilverError>>>
>
>
>,
}
impl LoadingState {
pub fn new() -> Box<Self> {
let font_names = vec![CARD_TITLE_FONT.to_string()];
let file = load_file("cards_expanded.json")
.wait()
.expect("file should open read only"); // TODO: do this asynchronously, too
let json: serde_json::Value = serde_json::from_slice(file.as_slice())
.expect("file should be proper JSON");
let cards: CardFactory = serde_json::from_value(
json.get("cards").expect("file should have \"cards\" node").clone()
).expect("malformed card list");
let mut image_names = cards.values()
.map(|v| v.image.clone())
.unique()
.collect::<Vec<String>>();
image_names.push(CARD_BACKGROUND_IMG.to_string());
println!("Loading fonts {:?} and images: {:?}", font_names, image_names);
let loading_images = join_all(
font_names.iter()
.map(|i| Box::new(Font::load(i.clone())) as Box<dyn Future<Item=Font, Error=QuicksilverError>>)
.collect::<Vec<Box<_>>>()
).join(
join_all(
image_names.iter()
.map(|i| Box::new(Image::load(i.clone())) as Box<dyn Future<Item=Image, Error=QuicksilverError>>)
.collect::<Vec<Box<_>>>()
)
);
let players = load_players(&json);
let board_states = players.iter()
.map(|p| load_board(&json, &cards, p.clone()))
.collect();
//let board_state = load_board(json);
Box::new(Self {
board_states,
image_names,
font_names,
loading: Some(loading_images),
})
}
}
impl AutomatonState for LoadingState {
fn event(&mut self, event: GameEvent) -> Box<dyn AutomatonState> {
Box::new(take(self))
}
fn update(&mut | parse_deck | identifier_name |
loading.rs | const CARD_TITLE_FONT: &'static str = "Teko-Regular.ttf";
pub const CARD_BACKGROUND_IMG: &'static str = "card_bg.png";
#[derive(Derivative, Default)]
#[derivative(Debug)]
pub struct Assets {
#[derivative(Debug = "ignore")]
pub fonts: HashMap<String, Box<Font>>, // we borrow fonts to create new data: there's no reason to hold it
pub images: HashMap<String, Rc<Image>>, // UI cards do hold reference to images
}
type CardFactory = HashMap<String, Card>;
fn cards_by_counts(factory: &CardFactory, counts: HashMap<String, usize>) -> Vec<Card> {
counts.iter()
.flat_map(|(key, num)| iter::repeat(key).take(*num))
.filter_map(|key| factory.get(key))
.cloned()
.collect()
}
fn parse_deck(json: &serde_json::value::Value, node_name: &str, card_factory: &CardFactory) -> Deck {
let deck_node = {
json.get(node_name)
.expect(format!("Deck node \"{}\" not found", node_name).as_str())
.clone()
};
let data: HashMap<String, usize> = serde_json::from_value(deck_node)
.expect("Malformed deck list");
let mut deck = Deck::from(cards_by_counts(card_factory, data));
deck.shuffle();
deck
}
fn parse_store(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> Store {
let source_node = json.get(node).expect(format!("store node {} not found", node).as_str()).clone();
let store_type: StoreType = serde_json::from_value(source_node).expect("Malformed store description");
match store_type.clone() {
StoreType::Fixed { items } => {
let cards = items.iter()
.filter_map(|name| factory.get(name))
.map(|card| card.clone())
.collect();
Store {
store_type: store_type,
menu: CardContainer { zone: zone, cards: cards, size: None },
deck: None,
}
}
StoreType::Drafted { size, from_deck } => {
let mut deck = parse_deck(json, &from_deck, factory);
deck.shuffle();
let cards = (0..size).filter_map(|_| deck.draw()).collect();
Store {
store_type: store_type,
menu: CardContainer { zone: zone, cards: cards, size: Some(size) },
deck: Some(Box::new(deck)),
}
}
}
}
fn container_counts(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> CardContainer {
let source_node = json.get(node).expect(format!("count node {} not found", node).as_str()).clone();
let data: HashMap<String, usize> = serde_json::from_value(source_node)
.expect("Malformed node");
CardContainer {
zone: zone,
cards: cards_by_counts(factory, data),
size: None,
}
}
pub fn load_players(json: &serde_json::Value) -> Vec<Player> {
let player_node = json.get("players")
.expect("file should have \"players\" node.")
.clone();
let mut players: Vec<Player> = serde_json::from_value(player_node)
.expect("Malformed player node");
let game_type = json.get("game_type") |
match game_type.to_lowercase().as_str() {
"vs" => {
assert_eq!(players.len(), 2, "For VS game, only 2 players are possible");
players[0].opponent_idx = 1;
players[1].opponent_idx = 0;
},
_ => panic!("Unknown game type")
}
players
}
pub fn load_board(json: &serde_json::Value, card_factory: &CardFactory, player: Player) -> BoardState {
let store_node = "build_store";
let trade_row = "kaiju_store";
let hand_size = 5;
let draw_deck = parse_deck(&json, &player.starting_deck, card_factory);
//let bs_node = { json.get("build_store").expect("build_store node not found").clone() };
let build_store = parse_store(BoardZone::BuildStore, &json, store_node, card_factory);
//let ks_node = { json.get("kaiju_store").expect("kaiju_store node not found").clone() };
let kaiju_store = parse_store(BoardZone::KaijuStore, &json, trade_row, card_factory);
let hand = CardContainer::new_sized(BoardZone::Hand, hand_size);
let buildings = container_counts(BoardZone::Buildings, &json, &player.starting_buildings, card_factory);
let kaiju = CardContainer::new(BoardZone::Kaiju);
let ai = match player.control {
PlayerControl::Human => None,
PlayerControl::AI => Some(AI::new())
};
println!("Loading done");
BoardState {
player: player,
turn: 1,
hand: Box::new(hand),
deck: Box::new(draw_deck),
globals: NumberMap::new(),
stores: Box::new(vec!(build_store, kaiju_store)),
buildings: Box::new(buildings),
kaiju_zone: Box::new(kaiju),
ai: ai
}
}
/// Loading state: loads all assets to memory and passes them to GameplayState.
///
/// The asset loading in Quicksilver (as described in tutorial) is awkward: it requires conditional
/// execution whenever any asset is used. As we don't have large amount of data, it is more ergonomic
/// to just load them all to RAM and use them directly.
///
/// Loading in Quicksilver is internally done using Futures (that can, but don't have to
/// be wrapped in Assets). Futures can be nested using combinators (that themselves are Futures).
/// Every Future has a poll() method that returns Async::NotReady when it is not yet done, and
/// Async::Ready when its data are ready (i. e. loading is done).
/// It must not be called afterwards: it would panic.
///
/// It turns out this is perfect fit for our application: we combine all assets into single Future,
/// hook it into our event loop, polling it every update, while drawing a loading screen. When it
/// becomes ready, we construct a new State, pass it all the assets extracted from the Future and continue.
///
/// Sadly, it is complicated by the fact that Quicksilver re-exports Future trait and combinators, but
/// not the Async enum. As this enum comes from "futures" crate, we just install it in the exact same
/// version that Quicksilver uses and use that.
#[derive(Derivative, Default)]
#[derivative(Debug)]
pub struct LoadingState {
board_states: Vec<BoardState>,
image_names: Vec<String>,
font_names: Vec<String>,
#[derivative(Debug = "ignore")]
// Option just to get Default
loading: Option<
Join<
JoinAll<
Vec<Box<dyn Future<Item=Font, Error=QuicksilverError>>>
>,
JoinAll<
Vec<Box<dyn Future<Item=Image, Error=QuicksilverError>>>
>
>
>,
}
impl LoadingState {
pub fn new() -> Box<Self> {
let font_names = vec![CARD_TITLE_FONT.to_string()];
let file = load_file("cards_expanded.json")
.wait()
.expect("file should open read only"); // TODO: do this asynchronously, too
let json: serde_json::Value = serde_json::from_slice(file.as_slice())
.expect("file should be proper JSON");
let cards: CardFactory = serde_json::from_value(
json.get("cards").expect("file should have \"cards\" node").clone()
).expect("malformed card list");
let mut image_names = cards.values()
.map(|v| v.image.clone())
.unique()
.collect::<Vec<String>>();
image_names.push(CARD_BACKGROUND_IMG.to_string());
println!("Loading fonts {:?} and images: {:?}", font_names, image_names);
let loading_images = join_all(
font_names.iter()
.map(|i| Box::new(Font::load(i.clone())) as Box<dyn Future<Item=Font, Error=QuicksilverError>>)
.collect::<Vec<Box<_>>>()
).join(
join_all(
image_names.iter()
.map(|i| Box::new(Image::load(i.clone())) as Box<dyn Future<Item=Image, Error=QuicksilverError>>)
.collect::<Vec<Box<_>>>()
)
);
let players = load_players(&json);
let board_states = players.iter()
.map(|p| load_board(&json, &cards, p.clone()))
.collect();
//let board_state = load_board(json);
Box::new(Self {
board_states,
image_names,
font_names,
loading: Some(loading_images),
})
}
}
impl AutomatonState for LoadingState {
fn event(&mut self, event: GameEvent) -> Box<dyn AutomatonState> {
Box::new(take(self))
}
fn update(&mut self | .expect("game type not specified")
.as_str()
.expect("game type not string"); | random_line_split |
loading.rs | const CARD_TITLE_FONT: &'static str = "Teko-Regular.ttf";
pub const CARD_BACKGROUND_IMG: &'static str = "card_bg.png";
#[derive(Derivative, Default)]
#[derivative(Debug)]
pub struct Assets {
#[derivative(Debug = "ignore")]
pub fonts: HashMap<String, Box<Font>>, // we borrow fonts to create new data: there's no reason to hold it
pub images: HashMap<String, Rc<Image>>, // UI cards do hold reference to images
}
type CardFactory = HashMap<String, Card>;
fn cards_by_counts(factory: &CardFactory, counts: HashMap<String, usize>) -> Vec<Card> {
counts.iter()
.flat_map(|(key, num)| iter::repeat(key).take(*num))
.filter_map(|key| factory.get(key))
.cloned()
.collect()
}
fn parse_deck(json: &serde_json::value::Value, node_name: &str, card_factory: &CardFactory) -> Deck {
let deck_node = {
json.get(node_name)
.expect(format!("Deck node \"{}\" not found", node_name).as_str())
.clone()
};
let data: HashMap<String, usize> = serde_json::from_value(deck_node)
.expect("Malformed deck list");
let mut deck = Deck::from(cards_by_counts(card_factory, data));
deck.shuffle();
deck
}
fn parse_store(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> Store {
let source_node = json.get(node).expect(format!("store node {} not found", node).as_str()).clone();
let store_type: StoreType = serde_json::from_value(source_node).expect("Malformed store description");
match store_type.clone() {
StoreType::Fixed { items } => {
let cards = items.iter()
.filter_map(|name| factory.get(name))
.map(|card| card.clone())
.collect();
Store {
store_type: store_type,
menu: CardContainer { zone: zone, cards: cards, size: None },
deck: None,
}
}
StoreType::Drafted { size, from_deck } => {
let mut deck = parse_deck(json, &from_deck, factory);
deck.shuffle();
let cards = (0..size).filter_map(|_| deck.draw()).collect();
Store {
store_type: store_type,
menu: CardContainer { zone: zone, cards: cards, size: Some(size) },
deck: Some(Box::new(deck)),
}
}
}
}
fn container_counts(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> CardContainer {
let source_node = json.get(node).expect(format!("count node {} not found", node).as_str()).clone();
let data: HashMap<String, usize> = serde_json::from_value(source_node)
.expect("Malformed node");
CardContainer {
zone: zone,
cards: cards_by_counts(factory, data),
size: None,
}
}
pub fn load_players(json: &serde_json::Value) -> Vec<Player> {
let player_node = json.get("players")
.expect("file should have \"players\" node.")
.clone();
let mut players: Vec<Player> = serde_json::from_value(player_node)
.expect("Malformed player node");
let game_type = json.get("game_type")
.expect("game type not specified")
.as_str()
.expect("game type not string");
match game_type.to_lowercase().as_str() {
"vs" => {
assert_eq!(players.len(), 2, "For VS game, only 2 players are possible");
players[0].opponent_idx = 1;
players[1].opponent_idx = 0;
},
_ => panic!("Unknown game type")
}
players
}
pub fn load_board(json: &serde_json::Value, card_factory: &CardFactory, player: Player) -> BoardState | PlayerControl::Human => None,
PlayerControl::AI => Some(AI::new())
};
println!("Loading done");
BoardState {
player: player,
turn: 1,
hand: Box::new(hand),
deck: Box::new(draw_deck),
globals: NumberMap::new(),
stores: Box::new(vec!(build_store, kaiju_store)),
buildings: Box::new(buildings),
kaiju_zone: Box::new(kaiju),
ai: ai
}
}
/// Loading state: loads all assets to memory and passes them to GameplayState.
///
/// The asset loading in Quicksilver (as described in tutorial) is awkward: it requires conditional
/// execution whenever any asset is used. As we don't have large amount of data, it is more ergonomic
/// to just load them all to RAM and use them directly.
///
/// Loading in Quicksilver is internally done using Futures (that can, but don't have to
/// be wrapped in Assets). Futures can be nested using combinators (that themselves are Futures).
/// Every Future has a poll() method that returns Async::NotReady when it is not yet done, and
/// Async::Ready when its data are ready (i. e. loading is done).
/// It must not be called afterwards: it would panic.
///
/// It turns out this is perfect fit for our application: we combine all assets into single Future,
/// hook it into our event loop, polling it every update, while drawing a loading screen. When it
/// becomes ready, we construct a new State, pass it all the assets extracted from the Future and continue.
///
/// Sadly, it is complicated by the fact that Quicksilver re-exports Future trait and combinators, but
/// not the Async enum. As this enum comes from "futures" crate, we just install it in the exact same
/// version that Quicksilver uses and use that.
#[derive(Derivative, Default)]
#[derivative(Debug)]
pub struct LoadingState {
board_states: Vec<BoardState>,
image_names: Vec<String>,
font_names: Vec<String>,
#[derivative(Debug = "ignore")]
// Option just to get Default
loading: Option<
Join<
JoinAll<
Vec<Box<dyn Future<Item=Font, Error=QuicksilverError>>>
>,
JoinAll<
Vec<Box<dyn Future<Item=Image, Error=QuicksilverError>>>
>
>
>,
}
impl LoadingState {
pub fn new() -> Box<Self> {
let font_names = vec![CARD_TITLE_FONT.to_string()];
let file = load_file("cards_expanded.json")
.wait()
.expect("file should open read only"); // TODO: do this asynchronously, too
let json: serde_json::Value = serde_json::from_slice(file.as_slice())
.expect("file should be proper JSON");
let cards: CardFactory = serde_json::from_value(
json.get("cards").expect("file should have \"cards\" node").clone()
).expect("malformed card list");
let mut image_names = cards.values()
.map(|v| v.image.clone())
.unique()
.collect::<Vec<String>>();
image_names.push(CARD_BACKGROUND_IMG.to_string());
println!("Loading fonts {:?} and images: {:?}", font_names, image_names);
let loading_images = join_all(
font_names.iter()
.map(|i| Box::new(Font::load(i.clone())) as Box<dyn Future<Item=Font, Error=QuicksilverError>>)
.collect::<Vec<Box<_>>>()
).join(
join_all(
image_names.iter()
.map(|i| Box::new(Image::load(i.clone())) as Box<dyn Future<Item=Image, Error=QuicksilverError>>)
.collect::<Vec<Box<_>>>()
)
);
let players = load_players(&json);
let board_states = players.iter()
.map(|p| load_board(&json, &cards, p.clone()))
.collect();
//let board_state = load_board(json);
Box::new(Self {
board_states,
image_names,
font_names,
loading: Some(loading_images),
})
}
}
impl AutomatonState for LoadingState {
fn event(&mut self, event: GameEvent) -> Box<dyn AutomatonState> {
Box::new(take(self))
}
fn update(&mut | {
let store_node = "build_store";
let trade_row = "kaiju_store";
let hand_size = 5;
let draw_deck = parse_deck(&json, &player.starting_deck, card_factory);
//let bs_node = { json.get("build_store").expect("build_store node not found").clone() };
let build_store = parse_store(BoardZone::BuildStore, &json, store_node, card_factory);
//let ks_node = { json.get("kaiju_store").expect("kaiju_store node not found").clone() };
let kaiju_store = parse_store(BoardZone::KaijuStore, &json, trade_row, card_factory);
let hand = CardContainer::new_sized(BoardZone::Hand, hand_size);
let buildings = container_counts(BoardZone::Buildings, &json, &player.starting_buildings, card_factory);
let kaiju = CardContainer::new(BoardZone::Kaiju);
let ai = match player.control { | identifier_body |
train_3d_occupancy.py | , mesh.faces.shape[0], [bsize]))
batch_barys = np.array(uniform_bary(np.random.uniform(size=[bsize, 2])))
batch_faces = mesh.faces[batch_face_inds]
batch_normals = mesh.face_normals[batch_face_inds]
batch_pts = np.sum(mesh.vertices[batch_faces] * batch_barys[...,None], 1)
return batch_pts, batch_normals
gt_fn = lambda queries, mesh : mesh.ray.contains_points(queries.reshape([-1,3])).reshape(queries.shape[:-1])
R = 2.
c2w = pose_spherical(90. + 10 + 45, -30., R)
N_samples = 64
N_samples_2 = 64
H = 256
W = H
focal = H * .9
rays = get_rays(H, W, focal, c2w[:3,:4])
render_args_lr = [get_rays(H, W, focal, c2w[:3,:4]), None, R-1, R+1, N_samples, N_samples_2, True]
N_samples = 256
N_samples_2 = 256
H = 512
W = H
focal = H * .9
rays = get_rays(H, W, focal, c2w[:3,:4])
render_args_hr = [get_rays(H, W, focal, c2w[:3,:4]), None, R-1, R+1, N_samples, N_samples_2, True]
def train(args, mesh_obj, model, opt=None, iters=10000, device='cuda', liveplot=False, run_label=None):
"""Standard training/evaluation epoch over the dataset"""
criterion = lambda x, z: torch.mean(torch.relu(x) - x * z + torch.log(1 + torch.exp(-torch.abs(x))))
data_iter = tqdm(range(1, iters + 1))
if run_label is not None:
data_iter.set_description(run_label)
mesh, corners = mesh_obj
c0, c1 = [torch.tensor(t, dtype=torch.float32) for t in corners]
render_args_hr[0] = [torch.tensor(t, dtype=torch.float32) for t in render_args_hr[0]]
render_args_lr[0] = [torch.tensor(t, dtype=torch.float32) for t in render_args_lr[0]]
render_args_hr[1] = [c0, c1]
render_args_lr[1] = [c0, c1]
c1, c0 = c1.to(device), c0.to(device)
step_list = []
loss_list = []
step_time_list = []
test_psnr_list = []
model = model.to(device)
# Main training Loop
postfix = {'loss': np.inf, 'psnr': 0., 'forward_steps': 0}
for i in data_iter:
start_time = time.time()
inputs = torch.rand(args.train_batch_size, 3).to(device) * (c1 - c0) + c0
z_init = torch.zeros(1, model.interm_channels).to(device)
target = torch.tensor(gt_fn(inputs.cpu().numpy(), mesh), dtype=torch.bool).to(device)
model_outputs = model(inputs, z_init, skip_solver=False, verbose=False, include_grad=False)
pred = torch.sigmoid(model_outputs['output'].squeeze())
loss = criterion(model_outputs['output'].squeeze(), target.float())
loss_list.append(loss.item())
if opt:
opt.zero_grad()
loss.backward()
opt.step()
postfix['forward_steps'] = model_outputs['forward_steps']
postfix['loss'] = loss.item()
step_time = time.time() - start_time
step_time_list.append(step_time)
do_log = i % args.log_freq == 0
do_vis = i % args.vis_freq == 0
if i % args.save_freq == 0:
torch.save(model.state_dict(), f'{args.save_dir:s}/model_step{i:d}.pth')
if do_log:
summary_dict = {
'train_loss': loss_list,
'step_time': step_time_list
}
with open('{:s}/summary.pkl'.format(args.log_dir), 'wb') as summary_f:
pickle.dump(summary_dict, summary_f)
if do_vis:
with torch.no_grad():
depth_map, acc_map = render_rays_native_hier(model, *render_args_lr, device=device)
normal_map = make_normals(render_args_lr[0], depth_map.cpu()) * 0.5 + 0.5
fig, axes = plt.subplots(1, 3)
fig.set_size_inches(6 * 3, 6)
for ax in axes:
ax.clear()
ax.set_axis_off()
axes[0].imshow(depth_map.squeeze().cpu().numpy())
axes[1].imshow(acc_map.squeeze().cpu().numpy())
axes[2].imshow(normal_map.squeeze().cpu().numpy())
fig.set_tight_layout(True)
fig.savefig(args.vis_dir + '/vis_step{:d}.png'.format(i))
data_iter.set_postfix(postfix)
with torch.no_grad():
depth_map, acc_map = render_rays_native_hier(model, *render_args_hr, device=device)
normal_map = make_normals(render_args_hr[0], depth_map.cpu()) * 0.5 + 0.5
skimage.io.imsave(args.vis_dir + '/final_rendered.png', normal_map.squeeze().cpu().numpy())
return {
'loss': loss_list,
'step_time': step_time_list,
'step': step_list,
'test_psnr': test_psnr_list,
}
def eval(args, model):
assert args.restore_path is not None, 'Restore path cannot be empty'
state_dict = torch.load(args.restore_path)
model.load_state_dict(state_dict)
model.to(args.device)
mesh_obj = load_mesh(args.dataset)
all_tests = load_test_pts(args.dataset, mesh_obj, regen=False, verbose=True)
log_file = os.path.join(args.log_dir, "test_output.txt")
with open(log_file, 'w') as f:
for test_name in all_tests:
test_pts, test_labels = [torch.tensor(arr, dtype=torch.float32) for arr in all_tests[test_name]]
test_ds = torch.utils.data.TensorDataset(test_pts, test_labels)
test_loader = torch.utils.data.DataLoader(test_ds, shuffle=False, batch_size=10000, drop_last=False, pin_memory=True)
cm = ConfusionMatrix()
for (pts, labels) in iter(test_loader):
pts, labels = pts.to(args.device), labels.to(args.device)
model_outputs = model(pts, skip_solver=False, verbose=False, include_grad=False)
pred = model_outputs['output'].squeeze() > 0
# print(pred, labels)
cm.update(pred.detach().cpu().numpy(), labels.cpu().numpy())
f.write(f"Test: {test_name}\n")
f.write(f"\tAccuracy {cm.get_acc():.5f}\n")
f.write(f"\tPrecision {cm.get_precision():.5f}\n")
f.write(f"\tRecall {cm.get_recall():.5f}\n")
f.write(f"\tIoU {cm.get_iou():.5f}\n")
f.write("\n")
mesh, corners = mesh_obj
c0, c1 = [torch.tensor(t, dtype=torch.float32) for t in corners]
render_args_hr[0] = [torch.tensor(t, dtype=torch.float32) for t in render_args_hr[0]]
render_args_lr[0] = [torch.tensor(t, dtype=torch.float32) for t in render_args_lr[0]]
render_args_hr[1] = [c0, c1]
render_args_lr[1] = [c0, c1]
with torch.no_grad():
depth_map, acc_map = render_rays_native_hier(model, *render_args_hr, device=args.device)
normal_map = make_normals(render_args_hr[0], depth_map.cpu()) * 0.5 + 0.5
skimage.io.imsave(args.vis_dir + '/final_rendered.png', normal_map.squeeze().cpu().numpy())
def main(args):
model_args = construct_model_args(
model_type=args.model_type,
n_layers=args.n_layers,
in_channels=3,
interm_channels=args.interm_channels,
out_channels=1,
input_scale=args.input_scale,
use_implicit=args.use_implicit,
filter_type=args.filter_type,
filter_options={'alpha': args.gabor_alpha},
norm_type=args.norm_type,
forward_solver=args.forward_solver,
backward_solver=args.backward_solver,
tol=args.test_tol if args.eval else args.train_tol
)
model = get_model(model_args)
print(model)
if args.restore_path is not None:
model.load_state_dict(torch.load(args.restore_path))
opt = torch.optim.Adam(model.parameters(), lr=args.lr)
if not args.eval:
mesh_obj = load_mesh(args.dataset)
train(
args,
mesh_obj,
model,
opt,
iters=args.max_train_iters,
device=args.device
)
else:
| eval(
args,
model
) | conditional_block | |
train_3d_occupancy.py | } - Outside obj: {np.sum(1 - test_labels_easy):d}")
print(f"Test points [hard] - Inside obj: {np.sum(test_labels_hard):d} - Outside obj: {np.sum(1 - test_labels_hard):d}")
return {
'easy': (test_pts_easy, test_labels_easy),
'hard': (test_pts_hard, test_labels_hard)
}
###################
trans_t = lambda t : np.array([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=np.float32)
rot_phi = lambda phi : np.array([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1],
], dtype=np.float32)
rot_theta = lambda th : np.array([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1],
], dtype=np.float32)
def | (theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
# c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
return c2w
def get_rays(H, W, focal, c2w):
i, j = np.meshgrid(np.arange(W), np.arange(H), indexing='xy')
dirs = np.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -np.ones_like(i)], -1)
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1)
rays_o = np.broadcast_to(c2w[:3,-1], rays_d.shape)
return np.stack([rays_o, rays_d], 0)
#########
def render_rays_native_hier(model, rays, corners, near, far, N_samples, N_samples_2, clip, device): #, rand=False):
rays_o, rays_d = rays
c0, c1 = corners
th = .5
# Compute 3D query points
z_vals = torch.linspace(near, far, N_samples)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
pts = 0.5 * (pts + 1)
h, w, d = pts.shape[:-1]
# Run network
model_output = batched_apply(model, pts.view(-1, 3), batch_size=50000, device=device)
alpha = torch.sigmoid(model_output).view(h, w, d)
if clip:
mask = torch.logical_or(torch.any(pts < c0, -1), torch.any(pts > c1, -1)).to(device)
alpha = torch.where(mask, torch.zeros_like(alpha).to(device), alpha)
alpha = torch.where(alpha > th, torch.ones_like(alpha).to(device), torch.zeros_like(alpha).to(device))
trans = 1.-alpha + 1e-10
trans = torch.cat([torch.ones_like(trans[...,:1]).to(trans.device), trans[...,:-1]], -1)
weights = alpha * torch.cumprod(trans, -1)
depth_map = torch.sum(weights * z_vals.to(device), -1)
acc_map = torch.sum(weights, -1)
# Second pass to refine isosurface
z_vals = torch.linspace(-1., 1., N_samples_2) * .01 + depth_map[...,None].cpu()
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
pts = 0.5 * (pts + 1)
# Run network
model_output = batched_apply(model, pts.view(-1, 3), batch_size=50000, device=device)
alpha = torch.sigmoid(model_output).view(h, w, d)
if clip:
mask = torch.logical_or(torch.any(pts < c0, -1), torch.any(pts > c1, -1)).to(device)
alpha = torch.where(mask, torch.zeros_like(alpha).to(device), alpha)
alpha = torch.where(alpha > th, torch.ones_like(alpha).to(device), torch.zeros_like(alpha).to(device))
trans = 1.-alpha + 1e-10
trans = torch.cat([torch.ones_like(trans[...,:1]).to(trans.device), trans[...,:-1]], -1)
weights = alpha * torch.cumprod(trans, -1)
depth_map = torch.sum(weights * z_vals.to(device), -1)
acc_map = torch.sum(weights, -1)
return depth_map, acc_map
def make_normals(rays, depth_map):
rays_o, rays_d = rays
pts = rays_o + rays_d * depth_map[...,None]
dx = pts - torch.roll(pts, -1, dims=0)
dy = pts - torch.roll(pts, -1, dims=1)
normal_map = torch.cross(dx, dy)
normal_map = normal_map / torch.clamp_min(torch.norm(normal_map, dim=-1, keepdim=True), 1e-5)
return normal_map
def render_mesh_normals(mesh, rays):
origins, dirs = rays.reshape([2,-1,3])
origins = origins * .5 + .5
dirs = dirs * .5
z = mesh.ray.intersects_first(origins, dirs)
pic = np.zeros([origins.shape[0],3])
pic[z!=-1] = mesh.face_normals[z[z!=-1]]
pic = np.reshape(pic, rays.shape[1:])
return pic
def uniform_bary(u):
su0 = np.sqrt(u[..., 0])
b0 = 1. - su0
b1 = u[..., 1] * su0
return np.stack([b0, b1, 1. - b0 - b1], -1)
def get_normal_batch(mesh, bsize):
batch_face_inds = np.array(np.random.randint(0, mesh.faces.shape[0], [bsize]))
batch_barys = np.array(uniform_bary(np.random.uniform(size=[bsize, 2])))
batch_faces = mesh.faces[batch_face_inds]
batch_normals = mesh.face_normals[batch_face_inds]
batch_pts = np.sum(mesh.vertices[batch_faces] * batch_barys[...,None], 1)
return batch_pts, batch_normals
gt_fn = lambda queries, mesh : mesh.ray.contains_points(queries.reshape([-1,3])).reshape(queries.shape[:-1])
R = 2.
c2w = pose_spherical(90. + 10 + 45, -30., R)
N_samples = 64
N_samples_2 = 64
H = 256
W = H
focal = H * .9
rays = get_rays(H, W, focal, c2w[:3,:4])
render_args_lr = [get_rays(H, W, focal, c2w[:3,:4]), None, R-1, R+1, N_samples, N_samples_2, True]
N_samples = 256
N_samples_2 = 256
H = 512
W = H
focal = H * .9
rays = get_rays(H, W, focal, c2w[:3,:4])
render_args_hr = [get_rays(H, W, focal, c2w[:3,:4]), None, R-1, R+1, N_samples, N_samples_2, True]
def train(args, mesh_obj, model, opt=None, iters=10000, device='cuda', liveplot=False, run_label=None):
"""Standard training/evaluation epoch over the dataset"""
criterion = lambda x, z: torch.mean(torch.relu(x) - x * z + torch.log(1 + torch.exp(-torch.abs(x))))
data_iter = tqdm(range(1, iters + 1))
if run_label is not None:
data_iter.set_description(run_label)
mesh, corners = mesh_obj
c0, c1 = [torch.tensor(t, dtype=torch.float32) for t in corners]
render_args_hr[0] = [torch.tensor(t, dtype=torch.float32) for t in render_args_hr[0]]
render_args_lr[0] = [torch.tensor(t, dtype=torch.float32) for t in render_args_lr[0]]
render_args_hr[1] = [c0, c1]
render_args_lr[1] = [c0, c1]
c1, c0 = c1.to(device), c0.to(device)
step_list = []
loss_list = []
step_time_list = []
test_psnr_list = | pose_spherical | identifier_name |
train_3d_occupancy.py |
def as_mesh(scene_or_mesh):
"""
Convert a possible scene to a mesh.
If conversion occurs, the returned mesh has only vertex and face data.
"""
if isinstance(scene_or_mesh, trimesh.Scene):
if len(scene_or_mesh.geometry) == 0:
mesh = None # empty scene
else:
# we lose texture information here
mesh = trimesh.util.concatenate(
tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
for g in scene_or_mesh.geometry.values()))
else:
assert(isinstance(scene_or_mesh, trimesh.Trimesh))
mesh = scene_or_mesh
return mesh
def recenter_mesh(mesh):
mesh.vertices -= mesh.vertices.mean(0)
mesh.vertices /= np.max(np.abs(mesh.vertices))
mesh.vertices = .5 * (mesh.vertices + 1.)
def load_mesh(mesh_name, verbose=True):
mesh = trimesh.load(_mesh_paths[mesh_name])
mesh = as_mesh(mesh)
if verbose:
print(mesh.vertices.shape)
recenter_mesh(mesh)
c0, c1 = mesh.vertices.min(0) - 1e-3, mesh.vertices.max(0) + 1e-3
corners = [c0, c1]
if verbose:
print(c0, c1)
print(c1-c0)
print(np.prod(c1-c0))
print(.5 * (c0+c1) * 2 - 1)
return mesh, corners
def load_test_pts(mesh_name, mesh_obj=None, regen=True, verbose=True):
test_pt_file = os.path.join(os.path.split(_mesh_paths[mesh_name])[0], mesh_name + '_test_pts.npz')
if mesh_obj is None:
mesh, corners = load_mesh(mesh_name)
else:
mesh, corners = mesh_obj
if regen or not os.path.exists(test_pt_file):
test_pts_easy, test_pts_hard = make_test_pts(mesh, corners)
np.savez(test_pt_file, easy=test_pts_easy, hard=test_pts_hard)
else:
if verbose: print('load pts')
test_pts_dict = np.load(test_pt_file)
test_pts_easy, test_pts_hard = test_pts_dict['easy'], test_pts_dict['hard']
if verbose: print(test_pts_easy.shape)
test_labels_easy = gt_fn(test_pts_easy, mesh)
test_labels_hard = gt_fn(test_pts_hard, mesh)
if verbose:
print(f"Test points [easy] - Inside obj: {np.sum(test_labels_easy):d} - Outside obj: {np.sum(1 - test_labels_easy):d}")
print(f"Test points [hard] - Inside obj: {np.sum(test_labels_hard):d} - Outside obj: {np.sum(1 - test_labels_hard):d}")
return {
'easy': (test_pts_easy, test_labels_easy),
'hard': (test_pts_hard, test_labels_hard)
}
###################
trans_t = lambda t : np.array([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=np.float32)
rot_phi = lambda phi : np.array([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1],
], dtype=np.float32)
rot_theta = lambda th : np.array([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1],
], dtype=np.float32)
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
# c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
return c2w
def get_rays(H, W, focal, c2w):
i, j = np.meshgrid(np.arange(W), np.arange(H), indexing='xy')
dirs = np.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -np.ones_like(i)], -1)
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1)
rays_o = np.broadcast_to(c2w[:3,-1], rays_d.shape)
return np.stack([rays_o, rays_d], 0)
#########
def render_rays_native_hier(model, rays, corners, near, far, N_samples, N_samples_2, clip, device): #, rand=False):
rays_o, rays_d = rays
c0, c1 = corners
th = .5
# Compute 3D query points
z_vals = torch.linspace(near, far, N_samples)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
pts = 0.5 * (pts + 1)
h, w, d = pts.shape[:-1]
# Run network
model_output = batched_apply(model, pts.view(-1, 3), batch_size=50000, device=device)
alpha = torch.sigmoid(model_output).view(h, w, d)
if clip:
mask = torch.logical_or(torch.any(pts < c0, -1), torch.any(pts > c1, -1)).to(device)
alpha = torch.where(mask, torch.zeros_like(alpha).to(device), alpha)
alpha = torch.where(alpha > th, torch.ones_like(alpha).to(device), torch.zeros_like(alpha).to(device))
trans = 1.-alpha + 1e-10
trans = torch.cat([torch.ones_like(trans[...,:1]).to(trans.device), trans[...,:-1]], -1)
weights = alpha * torch.cumprod(trans, -1)
depth_map = torch.sum(weights * z_vals.to(device), -1)
acc_map = torch.sum(weights, -1)
# Second pass to refine isosurface
z_vals = torch.linspace(-1., 1., N_samples_2) * .01 + depth_map[...,None].cpu()
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
pts = 0.5 * (pts + 1)
# Run network
model_output = batched_apply(model, pts.view(-1, 3), batch_size=50000, device=device)
alpha = torch.sigmoid(model_output).view(h, w, d)
if clip:
mask = torch.logical_or(torch.any(pts < c0, -1), torch.any(pts > c1, -1)).to(device)
alpha = torch.where(mask, torch.zeros_like(alpha).to(device), alpha)
alpha = torch.where(alpha > th, torch.ones_like(alpha).to(device), torch.zeros_like(alpha).to(device))
trans = 1.-alpha + 1e-10
trans = torch.cat([torch.ones_like(trans[...,:1]).to(trans.device), trans[...,:-1]], -1)
weights = alpha * torch.cumprod(trans, -1)
depth_map = torch.sum(weights * z_vals.to(device), -1)
acc_map = torch.sum(weights, -1)
return depth_map, acc_map
def make_normals(rays, depth_map):
rays_o, rays_d = rays
pts = rays_o + rays_d * depth_map[...,None]
dx = pts - torch.roll(pts, -1, dims=0)
dy = pts - torch.roll(pts, -1, dims=1)
normal_map = torch.cross(dx, dy)
normal_map = normal_map / torch.clamp_min(torch.norm(normal_map, dim=-1, keepdim=True), 1e-5)
return normal_map
def render_mesh_normals(mesh, rays):
origins, dirs = rays.reshape([2,-1,3])
origins = origins * .5 + .5
dirs = dirs * .5
z = mesh.ray.intersects_first(origins, dirs)
pic = np.zeros([origins.shape[0],3])
pic[z!=-1] = mesh.face_normals[z[z!=-1]]
pic = np.reshape(pic, rays.shape[1:])
return pic
def uniform_bary(u):
su0 = np.sqrt(u[..., 0])
b0 = 1. - su0
b1 = u[..., 1] * su0
return np.stack([b0, b1, 1. - b0 | c0, c1 = corners
test_easy = np.random.uniform(size=[test_size, 3]) * (c1-c0) + c0
batch_pts, batch_normals = get_normal_batch(mesh, test_size)
test_hard = batch_pts + np.random.normal(size=[test_size,3]) * .001
return test_easy, test_hard | identifier_body | |
train_3d_occupancy.py | mesh.vertices -= mesh.vertices.mean(0)
mesh.vertices /= np.max(np.abs(mesh.vertices))
mesh.vertices = .5 * (mesh.vertices + 1.)
def load_mesh(mesh_name, verbose=True):
mesh = trimesh.load(_mesh_paths[mesh_name])
mesh = as_mesh(mesh)
if verbose:
print(mesh.vertices.shape)
recenter_mesh(mesh)
c0, c1 = mesh.vertices.min(0) - 1e-3, mesh.vertices.max(0) + 1e-3
corners = [c0, c1]
if verbose:
print(c0, c1)
print(c1-c0)
print(np.prod(c1-c0))
print(.5 * (c0+c1) * 2 - 1)
return mesh, corners
def load_test_pts(mesh_name, mesh_obj=None, regen=True, verbose=True):
test_pt_file = os.path.join(os.path.split(_mesh_paths[mesh_name])[0], mesh_name + '_test_pts.npz')
if mesh_obj is None:
mesh, corners = load_mesh(mesh_name)
else:
mesh, corners = mesh_obj
if regen or not os.path.exists(test_pt_file):
test_pts_easy, test_pts_hard = make_test_pts(mesh, corners)
np.savez(test_pt_file, easy=test_pts_easy, hard=test_pts_hard)
else:
if verbose: print('load pts')
test_pts_dict = np.load(test_pt_file)
test_pts_easy, test_pts_hard = test_pts_dict['easy'], test_pts_dict['hard']
if verbose: print(test_pts_easy.shape)
test_labels_easy = gt_fn(test_pts_easy, mesh)
test_labels_hard = gt_fn(test_pts_hard, mesh)
if verbose:
print(f"Test points [easy] - Inside obj: {np.sum(test_labels_easy):d} - Outside obj: {np.sum(1 - test_labels_easy):d}")
print(f"Test points [hard] - Inside obj: {np.sum(test_labels_hard):d} - Outside obj: {np.sum(1 - test_labels_hard):d}")
return {
'easy': (test_pts_easy, test_labels_easy),
'hard': (test_pts_hard, test_labels_hard)
}
###################
trans_t = lambda t : np.array([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=np.float32)
rot_phi = lambda phi : np.array([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1],
], dtype=np.float32)
rot_theta = lambda th : np.array([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1],
], dtype=np.float32)
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
# c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
return c2w
def get_rays(H, W, focal, c2w):
i, j = np.meshgrid(np.arange(W), np.arange(H), indexing='xy')
dirs = np.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -np.ones_like(i)], -1)
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1)
rays_o = np.broadcast_to(c2w[:3,-1], rays_d.shape)
return np.stack([rays_o, rays_d], 0)
#########
def render_rays_native_hier(model, rays, corners, near, far, N_samples, N_samples_2, clip, device): #, rand=False):
rays_o, rays_d = rays
c0, c1 = corners
th = .5
# Compute 3D query points
z_vals = torch.linspace(near, far, N_samples)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
pts = 0.5 * (pts + 1)
h, w, d = pts.shape[:-1]
# Run network
model_output = batched_apply(model, pts.view(-1, 3), batch_size=50000, device=device)
alpha = torch.sigmoid(model_output).view(h, w, d)
if clip:
mask = torch.logical_or(torch.any(pts < c0, -1), torch.any(pts > c1, -1)).to(device)
alpha = torch.where(mask, torch.zeros_like(alpha).to(device), alpha)
alpha = torch.where(alpha > th, torch.ones_like(alpha).to(device), torch.zeros_like(alpha).to(device))
trans = 1.-alpha + 1e-10
trans = torch.cat([torch.ones_like(trans[...,:1]).to(trans.device), trans[...,:-1]], -1)
weights = alpha * torch.cumprod(trans, -1)
depth_map = torch.sum(weights * z_vals.to(device), -1)
acc_map = torch.sum(weights, -1)
# Second pass to refine isosurface
z_vals = torch.linspace(-1., 1., N_samples_2) * .01 + depth_map[...,None].cpu()
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
pts = 0.5 * (pts + 1)
# Run network
model_output = batched_apply(model, pts.view(-1, 3), batch_size=50000, device=device)
alpha = torch.sigmoid(model_output).view(h, w, d)
if clip:
mask = torch.logical_or(torch.any(pts < c0, -1), torch.any(pts > c1, -1)).to(device)
alpha = torch.where(mask, torch.zeros_like(alpha).to(device), alpha)
alpha = torch.where(alpha > th, torch.ones_like(alpha).to(device), torch.zeros_like(alpha).to(device))
trans = 1.-alpha + 1e-10
trans = torch.cat([torch.ones_like(trans[...,:1]).to(trans.device), trans[...,:-1]], -1)
weights = alpha * torch.cumprod(trans, -1)
depth_map = torch.sum(weights * z_vals.to(device), -1)
acc_map = torch.sum(weights, -1)
return depth_map, acc_map
def make_normals(rays, depth_map):
rays_o, rays_d = rays
pts = rays_o + rays_d * depth_map[...,None]
dx = pts - torch.roll(pts, -1, dims=0)
dy = pts - torch.roll(pts, -1, dims=1)
normal_map = torch.cross(dx, dy)
normal_map = normal_map / torch.clamp_min(torch.norm(normal_map, dim=-1, keepdim=True), 1e-5)
return normal_map
def render_mesh_normals(mesh, rays):
origins, dirs = rays.reshape([2,-1,3])
origins = origins * .5 + .5
dirs = dirs * .5
z = mesh.ray.intersects_first(origins, dirs)
pic = np.zeros([origins.shape[0],3])
pic[z!=-1] = mesh.face_normals[z[z!=-1]]
pic = np.reshape(pic, rays.shape[1:])
return pic
def uniform_bary(u):
su0 = np.sqrt(u[..., 0])
b0 = 1. - su0
b1 = u[..., 1] * su0
return np.stack([b0, b1, 1. - b0 - b1], -1)
def get_normal_batch(mesh, bsize):
batch_face_inds = np.array(np.random.randint(0, mesh.faces.shape[0], [bsize]))
batch_barys = np.array(uniform_bary(np.random.uniform(size=[bsize, 2])))
batch_faces = mesh.faces[batch_face_inds]
batch_normals = mesh.face_normals[batch_face_inds]
batch_pts = np.sum(mesh.vertices[batch_faces] * batch_barys[...,None], 1)
return batch_pts, batch_normals
gt_fn = lambda queries, mesh : mesh.ray.contains_points(queries.reshape([-1,3])).reshape(queries.shape[:-1])
R = 2.
c2w = pose_spherical(90. + 10 + 45, -30., R)
N_samples = 64
N_samples_2 = 64
H = 256
W = H
focal = H * .9
rays = get_rays(H, W, focal, c | def recenter_mesh(mesh): | random_line_split | |
gen.go | () {
var err error
flag.Parse()
// special characters
keys := map[rune]kb.Key{
'\b': {"Backspace", "Backspace", "", "", int64('\b'), int64('\b'), false, false},
'\t': {"Tab", "Tab", "", "", int64('\t'), int64('\t'), false, false},
'\r': {"Enter", "Enter", "\r", "\r", int64('\r'), int64('\r'), false, true},
}
// load keys
err = loadKeys(keys)
if err != nil {
log.Fatal(err)
}
// process keys
constBuf, mapBuf, err := processKeys(keys)
if err != nil {
log.Fatal(err)
}
// output
err = ioutil.WriteFile(
*flagOut,
[]byte(fmt.Sprintf(hdr, *flagPkg, string(constBuf), string(mapBuf))),
0644,
)
if err != nil {
log.Fatal(err)
}
// format
err = exec.Command("goimports", "-w", *flagOut).Run()
if err != nil {
log.Fatal(err)
}
// format
err = exec.Command("gofmt", "-s", "-w", *flagOut).Run()
if err != nil {
log.Fatal(err)
}
}
// loadKeys loads the dom key definitions from the chromium source tree.
func loadKeys(keys map[rune]kb.Key) error {
var err error
// load key converter data
keycodeConverterMap, err := loadKeycodeConverterData()
if err != nil {
return err
}
// load dom code map
domKeyMap, err := loadDomKeyData()
if err != nil {
return err
}
// load US layout data
layoutBuf, err := grab(domUsLayoutDataH)
if err != nil {
return err
}
// load scan code map
scanCodeMap, err := loadScanCodes(keycodeConverterMap, domKeyMap, layoutBuf)
if err != nil {
return err
}
// process printable
err = loadPrintable(keys, keycodeConverterMap, domKeyMap, layoutBuf, scanCodeMap)
if err != nil {
return err
}
// process non-printable
err = loadNonPrintable(keys, keycodeConverterMap, domKeyMap, layoutBuf, scanCodeMap)
if err != nil {
return err
}
return nil
}
var fixRE = regexp.MustCompile(`,\n\s{10,}`)
var usbKeyRE = regexp.MustCompile(`(?m)^\s*USB_KEYMAP\((.*?), (.*?), (.*?), (.*?), (.*?), (.*?), (.*?)\)`)
// loadKeycodeConverterData loads the key codes from the keycode_converter_data.inc.
func loadKeycodeConverterData() (map[string][]string, error) {
buf, err := grab(keycodeConverterDataInc)
if err != nil {
return nil, err
}
buf = fixRE.ReplaceAllLiteral(buf, []byte(", "))
domMap := make(map[string][]string)
matches := usbKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
vkey := m[7]
if _, ok := domMap[vkey]; ok {
panic(fmt.Sprintf("vkey %s already defined", vkey))
}
domMap[vkey] = m[1:]
}
return domMap, nil
}
// decodeRune is a wrapper around parsing a printable c++ int/char definition to a unicode
// rune value.
func decodeRune(s string) rune {
if strings.HasPrefix(s, "0x") {
i, err := strconv.ParseInt(s, 0, 16)
if err != nil {
panic(err)
}
return rune(i)
}
if !strings.HasPrefix(s, "'") || !strings.HasSuffix(s, "'") {
panic(fmt.Sprintf("expected character, got: %s", s))
}
if len(s) == 4 {
if s[1] != '\\' {
panic(fmt.Sprintf("expected escaped character, got: %s", s))
}
return rune(s[2])
}
if len(s) != 3 {
panic(fmt.Sprintf("expected character, got: %s", s))
}
return rune(s[1])
}
// getCode is a simple wrapper around parsing the code definition.
func getCode(s string) string {
if !strings.HasPrefix(s, `"`) || !strings.HasSuffix(s, `"`) {
panic(fmt.Sprintf("expected string, got: %s", s))
}
return s[1 : len(s)-1]
}
// addKey is a simple map add wrapper to panic if the key is already defined,
// and to lookup the correct scan code.
func addKey(keys map[rune]kb.Key, r rune, key kb.Key, scanCodeMap map[string][]int64, shouldPanic bool) {
if _, ok := keys[r]; ok {
if shouldPanic {
panic(fmt.Sprintf("rune %U (%s/%s) already defined in keys", r, key.Code, key.Key))
}
return
}
sc, ok := scanCodeMap[key.Code]
if ok {
key.Native = sc[0]
key.Windows = sc[1]
}
keys[r] = key
}
var printableKeyRE = regexp.MustCompile(`\{DomCode::(.+?), \{(.+?), (.+?)\}\}`)
// loadPrintable loads the printable key definitions.
func loadPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {
buf := extract(layoutBuf, "kPrintableCodeMap")
matches := printableKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
domCode := m[1]
// ignore domCodes that are duplicates of other unicode characters
if domCode == "INTL_BACKSLASH" || domCode == "INTL_HASH" || strings.HasPrefix(domCode, "NUMPAD") {
continue
}
kc, ok := keycodeConverterMap[domCode]
if !ok {
panic(fmt.Sprintf("could not find key %s in keycode map", domCode))
}
code := getCode(kc[5])
r1, r2 := decodeRune(m[2]), decodeRune(m[3])
addKey(keys, r1, kb.Key{
Code: code,
Key: string(r1),
Text: string(r1),
Unmodified: string(r1),
Print: true,
}, scanCodeMap, true)
// shifted value is same as non-shifted, so skip
if r2 == r1 {
continue
}
// skip for duplicate keys
if r2 == '|' && domCode != "BACKSLASH" {
continue
}
addKey(keys, r2, kb.Key{
Code: code,
Key: string(r2),
Text: string(r2),
Unmodified: string(r1),
Shift: true,
Print: true,
}, scanCodeMap, true)
}
return nil
}
var domKeyRE = regexp.MustCompile(`(?m)^\s+DOM_KEY_(?:UNI|MAP)\("(.+?)",\s*(.+?),\s*(0x[0-9A-F]{4})\)`)
// loadDomKeyData loads the dom key data definitions.
func loadDomKeyData() (map[string][]string, error) {
buf, err := grab(domKeyDataInc)
if err != nil {
return nil, err
}
buf = fixRE.ReplaceAllLiteral(buf, []byte(", "))
keyMap := make(map[string][]string)
matches := domKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
keyMap[m[2]] = m[1:]
}
return keyMap, nil
}
var nonPrintableKeyRE = regexp.MustCompile(`\n\s{4}\{DomCode::(.+?), DomKey::(.+?)\}`)
// loadNonPrintable loads the not printable key definitions.
func loadNonPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {
buf := extract(layoutBuf, "kNonPrintableCodeMap")
matches := nonPrintableKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
code, key := m[1], m[2]
// get code, key definitions
dc, ok := keycodeConverterMap[code]
if !ok {
panic(fmt.Sprintf("no dom code definition for %s", code))
}
dk, ok := domKeyMap[key]
if !ok {
panic(fmt.Sprintf("no dom key definition for %s", key))
}
// some scan codes do not have names defined, so use key name
c := dk[0]
if dc[5] != "NULL" {
c = getCode(dc[5])
}
// convert rune
r, err := strconv.ParseInt(d | main | identifier_name | |
gen.go | , *flagPkg, string(constBuf), string(mapBuf))),
0644,
)
if err != nil {
log.Fatal(err)
}
// format
err = exec.Command("goimports", "-w", *flagOut).Run()
if err != nil {
log.Fatal(err)
}
// format
err = exec.Command("gofmt", "-s", "-w", *flagOut).Run()
if err != nil {
log.Fatal(err)
}
}
// loadKeys loads the dom key definitions from the chromium source tree.
func loadKeys(keys map[rune]kb.Key) error {
var err error
// load key converter data
keycodeConverterMap, err := loadKeycodeConverterData()
if err != nil {
return err
}
// load dom code map
domKeyMap, err := loadDomKeyData()
if err != nil {
return err
}
// load US layout data
layoutBuf, err := grab(domUsLayoutDataH)
if err != nil {
return err
}
// load scan code map
scanCodeMap, err := loadScanCodes(keycodeConverterMap, domKeyMap, layoutBuf)
if err != nil {
return err
}
// process printable
err = loadPrintable(keys, keycodeConverterMap, domKeyMap, layoutBuf, scanCodeMap)
if err != nil {
return err
}
// process non-printable
err = loadNonPrintable(keys, keycodeConverterMap, domKeyMap, layoutBuf, scanCodeMap)
if err != nil {
return err
}
return nil
}
var fixRE = regexp.MustCompile(`,\n\s{10,}`)
var usbKeyRE = regexp.MustCompile(`(?m)^\s*USB_KEYMAP\((.*?), (.*?), (.*?), (.*?), (.*?), (.*?), (.*?)\)`)
// loadKeycodeConverterData loads the key codes from the keycode_converter_data.inc.
func loadKeycodeConverterData() (map[string][]string, error) {
buf, err := grab(keycodeConverterDataInc)
if err != nil {
return nil, err
}
buf = fixRE.ReplaceAllLiteral(buf, []byte(", "))
domMap := make(map[string][]string)
matches := usbKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
vkey := m[7]
if _, ok := domMap[vkey]; ok {
panic(fmt.Sprintf("vkey %s already defined", vkey))
}
domMap[vkey] = m[1:]
}
return domMap, nil
}
// decodeRune is a wrapper around parsing a printable c++ int/char definition to a unicode
// rune value.
func decodeRune(s string) rune | if len(s) != 3 {
panic(fmt.Sprintf("expected character, got: %s", s))
}
return rune(s[1])
}
// getCode is a simple wrapper around parsing the code definition.
func getCode(s string) string {
if !strings.HasPrefix(s, `"`) || !strings.HasSuffix(s, `"`) {
panic(fmt.Sprintf("expected string, got: %s", s))
}
return s[1 : len(s)-1]
}
// addKey is a simple map add wrapper to panic if the key is already defined,
// and to lookup the correct scan code.
func addKey(keys map[rune]kb.Key, r rune, key kb.Key, scanCodeMap map[string][]int64, shouldPanic bool) {
if _, ok := keys[r]; ok {
if shouldPanic {
panic(fmt.Sprintf("rune %U (%s/%s) already defined in keys", r, key.Code, key.Key))
}
return
}
sc, ok := scanCodeMap[key.Code]
if ok {
key.Native = sc[0]
key.Windows = sc[1]
}
keys[r] = key
}
var printableKeyRE = regexp.MustCompile(`\{DomCode::(.+?), \{(.+?), (.+?)\}\}`)
// loadPrintable loads the printable key definitions.
func loadPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {
buf := extract(layoutBuf, "kPrintableCodeMap")
matches := printableKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
domCode := m[1]
// ignore domCodes that are duplicates of other unicode characters
if domCode == "INTL_BACKSLASH" || domCode == "INTL_HASH" || strings.HasPrefix(domCode, "NUMPAD") {
continue
}
kc, ok := keycodeConverterMap[domCode]
if !ok {
panic(fmt.Sprintf("could not find key %s in keycode map", domCode))
}
code := getCode(kc[5])
r1, r2 := decodeRune(m[2]), decodeRune(m[3])
addKey(keys, r1, kb.Key{
Code: code,
Key: string(r1),
Text: string(r1),
Unmodified: string(r1),
Print: true,
}, scanCodeMap, true)
// shifted value is same as non-shifted, so skip
if r2 == r1 {
continue
}
// skip for duplicate keys
if r2 == '|' && domCode != "BACKSLASH" {
continue
}
addKey(keys, r2, kb.Key{
Code: code,
Key: string(r2),
Text: string(r2),
Unmodified: string(r1),
Shift: true,
Print: true,
}, scanCodeMap, true)
}
return nil
}
var domKeyRE = regexp.MustCompile(`(?m)^\s+DOM_KEY_(?:UNI|MAP)\("(.+?)",\s*(.+?),\s*(0x[0-9A-F]{4})\)`)
// loadDomKeyData loads the dom key data definitions.
func loadDomKeyData() (map[string][]string, error) {
buf, err := grab(domKeyDataInc)
if err != nil {
return nil, err
}
buf = fixRE.ReplaceAllLiteral(buf, []byte(", "))
keyMap := make(map[string][]string)
matches := domKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
keyMap[m[2]] = m[1:]
}
return keyMap, nil
}
var nonPrintableKeyRE = regexp.MustCompile(`\n\s{4}\{DomCode::(.+?), DomKey::(.+?)\}`)
// loadNonPrintable loads the not printable key definitions.
func loadNonPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {
buf := extract(layoutBuf, "kNonPrintableCodeMap")
matches := nonPrintableKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
code, key := m[1], m[2]
// get code, key definitions
dc, ok := keycodeConverterMap[code]
if !ok {
panic(fmt.Sprintf("no dom code definition for %s", code))
}
dk, ok := domKeyMap[key]
if !ok {
panic(fmt.Sprintf("no dom key definition for %s", key))
}
// some scan codes do not have names defined, so use key name
c := dk[0]
if dc[5] != "NULL" {
c = getCode(dc[5])
}
// convert rune
r, err := strconv.ParseInt(dk[2], 0, 32)
if err != nil {
return err
}
addKey(keys, rune(r), kb.Key{
Code: c,
Key: dk[0],
}, scanCodeMap, false)
}
return nil
}
var nameRE = regexp.MustCompile(`[A-Z][a-z]+:`)
// processKeys processes the generated keys.
func processKeys(keys map[rune]kb.Key) ([]byte, []byte, error) {
// order rune keys
idx := make([]rune, len(keys))
var i int
for c := range keys {
idx[i] = c
i++
}
sort.Slice(idx, func(a, b int) bool {
return idx[a] < idx[b]
})
// process
var constBuf, mapBuf bytes.Buffer
for _, c := range idx {
key := keys[c | {
if strings.HasPrefix(s, "0x") {
i, err := strconv.ParseInt(s, 0, 16)
if err != nil {
panic(err)
}
return rune(i)
}
if !strings.HasPrefix(s, "'") || !strings.HasSuffix(s, "'") {
panic(fmt.Sprintf("expected character, got: %s", s))
}
if len(s) == 4 {
if s[1] != '\\' {
panic(fmt.Sprintf("expected escaped character, got: %s", s))
}
return rune(s[2])
}
| identifier_body |
gen.go |
"github.com/rjeczalik/chromedp/kb"
)
var (
flagOut = flag.String("out", "keys.go", "out source")
flagPkg = flag.String("pkg", "kb", "out package name")
)
const (
// chromiumSrc is the base chromium source repo location
chromiumSrc = "https://chromium.googlesource.com/chromium/src"
// domUsLayoutDataH contains the {printable,non-printable} DomCode -> DomKey
// also contains DomKey -> VKEY (not used)
domUsLayoutDataH = chromiumSrc + "/+/master/ui/events/keycodes/dom_us_layout_data.h?format=TEXT"
// keycodeConverterDataInc contains DomKey -> Key Name
keycodeConverterDataInc = chromiumSrc + "/+/master/ui/events/keycodes/dom/keycode_converter_data.inc?format=TEXT"
// domKeyDataInc contains DomKey -> Key Name + unicode (non-printable)
domKeyDataInc = chromiumSrc + "/+/master/ui/events/keycodes/dom/dom_key_data.inc?format=TEXT"
// keyboardCodesPosixH contains the scan code definitions for posix (ie native) keys.
keyboardCodesPosixH = chromiumSrc + "/+/master/ui/events/keycodes/keyboard_codes_posix.h?format=TEXT"
// keyboardCodesWinH contains the scan code definitions for windows keys.
keyboardCodesWinH = chromiumSrc + "/+/master/ui/events/keycodes/keyboard_codes_win.h?format=TEXT"
// windowsKeyboardCodesH contains the actual #defs for windows.
windowsKeyboardCodesH = chromiumSrc + "/third_party/+/master/WebKit/Source/platform/WindowsKeyboardCodes.h?format=TEXT"
)
const (
hdr = `package %s
` + `// Code generated by gen.go. DO NOT EDIT.` + `
// DOM keys.
const (
%s)
// Keys is the map of unicode characters to their DOM key data.
var Keys = map[rune]*Key{
%s}
`
)
func main() {
var err error
flag.Parse()
// special characters
keys := map[rune]kb.Key{
'\b': {"Backspace", "Backspace", "", "", int64('\b'), int64('\b'), false, false},
'\t': {"Tab", "Tab", "", "", int64('\t'), int64('\t'), false, false},
'\r': {"Enter", "Enter", "\r", "\r", int64('\r'), int64('\r'), false, true},
}
// load keys
err = loadKeys(keys)
if err != nil {
log.Fatal(err)
}
// process keys
constBuf, mapBuf, err := processKeys(keys)
if err != nil {
log.Fatal(err)
}
// output
err = ioutil.WriteFile(
*flagOut,
[]byte(fmt.Sprintf(hdr, *flagPkg, string(constBuf), string(mapBuf))),
0644,
)
if err != nil {
log.Fatal(err)
}
// format
err = exec.Command("goimports", "-w", *flagOut).Run()
if err != nil {
log.Fatal(err)
}
// format
err = exec.Command("gofmt", "-s", "-w", *flagOut).Run()
if err != nil {
log.Fatal(err)
}
}
// loadKeys loads the dom key definitions from the chromium source tree.
func loadKeys(keys map[rune]kb.Key) error {
var err error
// load key converter data
keycodeConverterMap, err := loadKeycodeConverterData()
if err != nil {
return err
}
// load dom code map
domKeyMap, err := loadDomKeyData()
if err != nil {
return err
}
// load US layout data
layoutBuf, err := grab(domUsLayoutDataH)
if err != nil {
return err
}
// load scan code map
scanCodeMap, err := loadScanCodes(keycodeConverterMap, domKeyMap, layoutBuf)
if err != nil {
return err
}
// process printable
err = loadPrintable(keys, keycodeConverterMap, domKeyMap, layoutBuf, scanCodeMap)
if err != nil {
return err
}
// process non-printable
err = loadNonPrintable(keys, keycodeConverterMap, domKeyMap, layoutBuf, scanCodeMap)
if err != nil {
return err
}
return nil
}
var fixRE = regexp.MustCompile(`,\n\s{10,}`)
var usbKeyRE = regexp.MustCompile(`(?m)^\s*USB_KEYMAP\((.*?), (.*?), (.*?), (.*?), (.*?), (.*?), (.*?)\)`)
// loadKeycodeConverterData loads the key codes from the keycode_converter_data.inc.
func loadKeycodeConverterData() (map[string][]string, error) {
buf, err := grab(keycodeConverterDataInc)
if err != nil {
return nil, err
}
buf = fixRE.ReplaceAllLiteral(buf, []byte(", "))
domMap := make(map[string][]string)
matches := usbKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
vkey := m[7]
if _, ok := domMap[vkey]; ok {
panic(fmt.Sprintf("vkey %s already defined", vkey))
}
domMap[vkey] = m[1:]
}
return domMap, nil
}
// decodeRune is a wrapper around parsing a printable c++ int/char definition to a unicode
// rune value.
func decodeRune(s string) rune {
if strings.HasPrefix(s, "0x") {
i, err := strconv.ParseInt(s, 0, 16)
if err != nil {
panic(err)
}
return rune(i)
}
if !strings.HasPrefix(s, "'") || !strings.HasSuffix(s, "'") {
panic(fmt.Sprintf("expected character, got: %s", s))
}
if len(s) == 4 {
if s[1] != '\\' {
panic(fmt.Sprintf("expected escaped character, got: %s", s))
}
return rune(s[2])
}
if len(s) != 3 {
panic(fmt.Sprintf("expected character, got: %s", s))
}
return rune(s[1])
}
// getCode is a simple wrapper around parsing the code definition.
func getCode(s string) string {
if !strings.HasPrefix(s, `"`) || !strings.HasSuffix(s, `"`) {
panic(fmt.Sprintf("expected string, got: %s", s))
}
return s[1 : len(s)-1]
}
// addKey is a simple map add wrapper to panic if the key is already defined,
// and to lookup the correct scan code.
func addKey(keys map[rune]kb.Key, r rune, key kb.Key, scanCodeMap map[string][]int64, shouldPanic bool) {
if _, ok := keys[r]; ok {
if shouldPanic {
panic(fmt.Sprintf("rune %U (%s/%s) already defined in keys", r, key.Code, key.Key))
}
return
}
sc, ok := scanCodeMap[key.Code]
if ok {
key.Native = sc[0]
key.Windows = sc[1]
}
keys[r] = key
}
var printableKeyRE = regexp.MustCompile(`\{DomCode::(.+?), \{(.+?), (.+?)\}\}`)
// loadPrintable loads the printable key definitions.
func loadPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {
buf := extract(layoutBuf, "kPrintableCodeMap")
matches := printableKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
domCode := m[1]
// ignore domCodes that are duplicates of other unicode characters
if domCode == "INTL_BACKSLASH" || domCode == "INTL_HASH" || strings.HasPrefix(domCode, "NUMPAD") {
continue
}
kc, ok := keycodeConverterMap[domCode]
if !ok {
panic(fmt.Sprintf("could not find key %s in keycode map", domCode))
}
code := getCode(kc[5])
r1, r2 := decodeRune(m[2]), decodeRune(m[3])
addKey(keys, r1, kb.Key{
Code: code,
Key: string(r1),
Text: string(r1),
Unmodified: string(r1),
Print: true,
}, scanCodeMap, true)
// shifted value is same as non-shifted, so skip
if r2 == r1 {
continue
}
// skip for duplicate keys
if r2 == '|' && domCode != "BACKSLASH" {
continue
}
addKey(keys, r2, kb.Key{
Code: code,
Key: string(r2),
Text: string(r2),
Unmodified: string(r1),
Shift: true,
Print: true,
}, scanCodeMap, true)
}
return nil
}
var domKeyRE = regexp.MustCompile(`(?m) | random_line_split | ||
gen.go | [0]
key.Windows = sc[1]
}
keys[r] = key
}
var printableKeyRE = regexp.MustCompile(`\{DomCode::(.+?), \{(.+?), (.+?)\}\}`)
// loadPrintable loads the printable key definitions.
func loadPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {
buf := extract(layoutBuf, "kPrintableCodeMap")
matches := printableKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
domCode := m[1]
// ignore domCodes that are duplicates of other unicode characters
if domCode == "INTL_BACKSLASH" || domCode == "INTL_HASH" || strings.HasPrefix(domCode, "NUMPAD") {
continue
}
kc, ok := keycodeConverterMap[domCode]
if !ok {
panic(fmt.Sprintf("could not find key %s in keycode map", domCode))
}
code := getCode(kc[5])
r1, r2 := decodeRune(m[2]), decodeRune(m[3])
addKey(keys, r1, kb.Key{
Code: code,
Key: string(r1),
Text: string(r1),
Unmodified: string(r1),
Print: true,
}, scanCodeMap, true)
// shifted value is same as non-shifted, so skip
if r2 == r1 {
continue
}
// skip for duplicate keys
if r2 == '|' && domCode != "BACKSLASH" {
continue
}
addKey(keys, r2, kb.Key{
Code: code,
Key: string(r2),
Text: string(r2),
Unmodified: string(r1),
Shift: true,
Print: true,
}, scanCodeMap, true)
}
return nil
}
var domKeyRE = regexp.MustCompile(`(?m)^\s+DOM_KEY_(?:UNI|MAP)\("(.+?)",\s*(.+?),\s*(0x[0-9A-F]{4})\)`)
// loadDomKeyData loads the dom key data definitions.
func loadDomKeyData() (map[string][]string, error) {
buf, err := grab(domKeyDataInc)
if err != nil {
return nil, err
}
buf = fixRE.ReplaceAllLiteral(buf, []byte(", "))
keyMap := make(map[string][]string)
matches := domKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
keyMap[m[2]] = m[1:]
}
return keyMap, nil
}
var nonPrintableKeyRE = regexp.MustCompile(`\n\s{4}\{DomCode::(.+?), DomKey::(.+?)\}`)
// loadNonPrintable loads the not printable key definitions.
func loadNonPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {
buf := extract(layoutBuf, "kNonPrintableCodeMap")
matches := nonPrintableKeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
code, key := m[1], m[2]
// get code, key definitions
dc, ok := keycodeConverterMap[code]
if !ok {
panic(fmt.Sprintf("no dom code definition for %s", code))
}
dk, ok := domKeyMap[key]
if !ok {
panic(fmt.Sprintf("no dom key definition for %s", key))
}
// some scan codes do not have names defined, so use key name
c := dk[0]
if dc[5] != "NULL" {
c = getCode(dc[5])
}
// convert rune
r, err := strconv.ParseInt(dk[2], 0, 32)
if err != nil {
return err
}
addKey(keys, rune(r), kb.Key{
Code: c,
Key: dk[0],
}, scanCodeMap, false)
}
return nil
}
var nameRE = regexp.MustCompile(`[A-Z][a-z]+:`)
// processKeys processes the generated keys.
func processKeys(keys map[rune]kb.Key) ([]byte, []byte, error) {
// order rune keys
idx := make([]rune, len(keys))
var i int
for c := range keys {
idx[i] = c
i++
}
sort.Slice(idx, func(a, b int) bool {
return idx[a] < idx[b]
})
// process
var constBuf, mapBuf bytes.Buffer
for _, c := range idx {
key := keys[c]
g, isGoCode := goCodes[c]
s := fmt.Sprintf("\\u%04x", c)
if isGoCode {
s = g
} else if key.Print {
s = fmt.Sprintf("%c", c)
}
// add key definition
v := strings.TrimPrefix(fmt.Sprintf("%#v", key), "kb.")
v = nameRE.ReplaceAllString(v, "")
mapBuf.WriteString(fmt.Sprintf("'%s': &%s,\n", s, v))
// fix 'Quote' const
if s == `\'` {
s = `'`
}
// add const definition
if (isGoCode && c != '\n') || !key.Print {
n := strings.TrimPrefix(key.Key, ".")
if n == `'` || n == `\` {
n = key.Code
}
constBuf.WriteString(fmt.Sprintf("%s = \"%s\"\n", n, s))
}
}
return constBuf.Bytes(), mapBuf.Bytes(), nil
}
var domCodeVkeyFixRE = regexp.MustCompile(`,\n\s{5,}`)
var domCodeVkeyRE = regexp.MustCompile(`(?m)^\s*\{DomCode::(.+?), (.+?)\}`)
// loadScanCodes loads the scan codes for the dom key definitions.
func loadScanCodes(keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte) (map[string][]int64, error) {
vkeyCodeMap, err := loadPosixWinKeyboardCodes()
if err != nil {
return nil, err
}
buf := extract(layoutBuf, "kDomCodeToKeyboardCodeMap")
buf = domCodeVkeyFixRE.ReplaceAllLiteral(buf, []byte(", "))
scanCodeMap := make(map[string][]int64)
matches := domCodeVkeyRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
domCode, vkey := m[1], m[2]
kc, ok := keycodeConverterMap[domCode]
if !ok {
panic(fmt.Sprintf("dom code %s not defined in keycode map", domCode))
}
sc, ok := vkeyCodeMap[vkey]
if !ok {
panic(fmt.Sprintf("vkey %s is not defined in keyboardCodeMap", vkey))
}
scanCodeMap[getCode(kc[5])] = sc
}
return scanCodeMap, nil
}
var defineRE = regexp.MustCompile(`(?m)^#define\s+(.+?)\s+([0-9A-Fx]+)`)
// loadPosixWinKeyboardCodes loads the native and windows keyboard scan codes
// mapped to the DOM key.
func loadPosixWinKeyboardCodes() (map[string][]int64, error) {
var err error
lookup := map[string]string{
// mac alias
"VKEY_LWIN": "0x5B",
// no idea where these are defined in chromium code base (assuming in
// windows headers)
//
// manually added here as pulled from various online docs
"VK_CANCEL": "0x03",
"VK_OEM_ATTN": "0xF0",
"VK_OEM_FINISH": "0xF1",
"VK_OEM_COPY": "0xF2",
"VK_DBE_SBCSCHAR": "0xF3",
"VK_DBE_DBCSCHAR": "0xF4",
"VK_OEM_BACKTAB": "0xF5",
"VK_OEM_AX": "0xE1",
}
// load windows key lookups
buf, err := grab(windowsKeyboardCodesH)
if err != nil {
return nil, err
}
matches := defineRE.FindAllStringSubmatch(string(buf), -1)
for _, m := range matches {
lookup[m[1]] = m[2]
}
// load posix and win keyboard codes
keyboardCodeMap := make(map[string][]int64)
err = loadKeyboardCodes(keyboardCodeMap, lookup, keyboardCodesPosixH, 0)
if err != nil {
return nil, err
}
err = loadKeyboardCodes(keyboardCodeMap, lookup, keyboardCodesWinH, 1)
if err != nil | {
return nil, err
} | conditional_block | |
XGBoost_BayesOpt.py | _y'])
merge['ground_ratio'] = merge['gro_wat_3']/merge['total_withdrawal_3']
merge['fresh_ratio'] = merge['total_withdrawal_1']/merge['total_withdrawal_3']
merge['industry_ratio'] = merge['ind_9']/merge['total_withdrawal_3']
merge['irrigation_ratio'] = merge['irrigation_3']/merge['total_withdrawal_3']
merge['livestock_ratio'] = merge['livestock_3']/merge['total_withdrawal_3']
merge['aqua_ratio'] = merge['aqua_9']/merge['total_withdrawal_3']
merge['mining_ratio'] = merge['mining_9']/merge['total_withdrawal_3']
merge['thermoelectric_ratio'] = merge['thermoelectric_9']/merge['total_withdrawal_3']
merge['dom_per_cap'] = merge['dom_sup_5']+merge['dom_sup_7']
#tempDT$dom_per_cap <- as.numeric(tempDT$dom_sup_5)+as.numeric(tempDT$dom_sup_7)
merge=merge[(merge['year']>2010)&(merge['year']<=2015)]
merge=merge[merge['state']=='CA']
#state_le = preprocessing.LabelEncoder()
#county_le = preprocessing.LabelEncoder()
#merge['state_enc']=state_le.fit_transform(merge['state'])
#merge['county_enc']=county_le.fit_transform(merge['county'])
col_list=list(merge.columns.values)
lags=[]
resp_list=['none','d0','d1','d2','d3','d4']
for each in resp_list:
for i in range(4,11):
merge[each+'_'+str(i+1)+'_Week_lag']=merge.groupby("fips")[each].shift(i)
lags.append(each+'_'+str(i+1)+'_Week_lag')
features=col_list[:]
merge['d0_pred'] = np.where(merge['d0']>0, 1, 0)
features.remove('year')
features.remove('valid_start')
features.remove('valid_end')
features.remove('date')
features.remove('state')
features.remove('county')
features.remove('d0')
features.remove('d1')
features.remove('d2')
features.remove('d3')
features.remove('d4')
features.remove('none')
features+=lags
#features.append('state_enc')
#features.append('county_enc')
########################
### Global Variables ###
########################
train = merge[merge['year']<2015]
test = merge[merge['year']==2015]
#features=list(train.columns.values)
#train['none']=merge['none']
target='d0_pred'
num_models=2
RANDOM_SEED=184
########################
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / (y_true+.0000001))) * 100
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def get_importance(gbm, features):
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=itemgetter(1), reverse=True)
return importance
def run_single(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=False
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
return test_prediction
def run_single_plot(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
xgb.plot_importance(gbm)
plt.show()
imp = get_importance(gbm, features)
print('Importance array: ', imp)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
############################################ ROC Curve
check=test_prediction
#area under the precision-recall curve
score = average_precision_score(test[target].values, check)
print('area under the precision-recall curve: {:.6f}'.format(score))
# Compute micro-average ROC curve and ROC area
fpr, tpr, _ = roc_curve(test[target].values, check)
roc_auc = auc(fpr, tpr)
#xgb.plot_importance(gbm)
#plt.show()
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.02, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.show()
##################################################
return test_prediction
def optim_run_single(train, features, target, params, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y2_train = X_train[target]
y2_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y2_train)
dvalid = xgb.DMatrix(X_valid[features], y2_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
return gbm.best_score
#return -1.0 * gbm['test-rmse-mean'].iloc[-1]
def multi_model(train, test, params, features, target,num_models, random_state=0):
all_preds=[]
for i in range(num_models):
preds =run_single(train, test,params, features, target, random_state)
all_preds.append(preds)
random_state=random_state+1
avg_pred=np.mean(np.array(all_preds),axis=0)
return avg_pred
def xgb_eval_single(min_child_weight,colsample_bytree,max_depth,subsample,gamma,alpha):
| params['max_depth'] = int(max_depth)
params['subsample'] = max(min(subsample, | random_state=42
eta=.05
xtrain=train
xfeatures=features
params = {
"objective": "reg:logistic",
"booster" : "gbtree",
"eval_metric": "auc",
"eta": eta,
"tree_method": 'exact',
"max_depth": max_depth,
"silent": 1,
"seed": random_state,
#"num_class" : 22,
}
params['min_child_weight'] = int(min_child_weight)
params['cosample_bytree'] = max(min(colsample_bytree, 1), 0) | identifier_body |
XGBoost_BayesOpt.py | _y'])
merge['ground_ratio'] = merge['gro_wat_3']/merge['total_withdrawal_3']
merge['fresh_ratio'] = merge['total_withdrawal_1']/merge['total_withdrawal_3']
merge['industry_ratio'] = merge['ind_9']/merge['total_withdrawal_3']
merge['irrigation_ratio'] = merge['irrigation_3']/merge['total_withdrawal_3']
merge['livestock_ratio'] = merge['livestock_3']/merge['total_withdrawal_3']
merge['aqua_ratio'] = merge['aqua_9']/merge['total_withdrawal_3']
merge['mining_ratio'] = merge['mining_9']/merge['total_withdrawal_3']
merge['thermoelectric_ratio'] = merge['thermoelectric_9']/merge['total_withdrawal_3']
merge['dom_per_cap'] = merge['dom_sup_5']+merge['dom_sup_7']
#tempDT$dom_per_cap <- as.numeric(tempDT$dom_sup_5)+as.numeric(tempDT$dom_sup_7)
merge=merge[(merge['year']>2010)&(merge['year']<=2015)]
merge=merge[merge['state']=='CA']
#state_le = preprocessing.LabelEncoder()
#county_le = preprocessing.LabelEncoder()
#merge['state_enc']=state_le.fit_transform(merge['state'])
#merge['county_enc']=county_le.fit_transform(merge['county'])
col_list=list(merge.columns.values)
lags=[]
resp_list=['none','d0','d1','d2','d3','d4']
for each in resp_list:
for i in range(4,11):
merge[each+'_'+str(i+1)+'_Week_lag']=merge.groupby("fips")[each].shift(i)
lags.append(each+'_'+str(i+1)+'_Week_lag')
features=col_list[:]
merge['d0_pred'] = np.where(merge['d0']>0, 1, 0)
features.remove('year')
features.remove('valid_start')
features.remove('valid_end')
features.remove('date')
features.remove('state')
features.remove('county')
features.remove('d0')
features.remove('d1')
features.remove('d2')
features.remove('d3')
features.remove('d4')
features.remove('none')
features+=lags
#features.append('state_enc')
#features.append('county_enc')
########################
### Global Variables ###
########################
train = merge[merge['year']<2015]
test = merge[merge['year']==2015]
#features=list(train.columns.values)
#train['none']=merge['none']
target='d0_pred'
num_models=2
RANDOM_SEED=184
########################
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / (y_true+.0000001))) * 100
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def get_importance(gbm, features):
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=itemgetter(1), reverse=True)
return importance
def run_single(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=False
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
return test_prediction
def run_single_plot(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
xgb.plot_importance(gbm)
plt.show()
imp = get_importance(gbm, features)
print('Importance array: ', imp)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
############################################ ROC Curve
check=test_prediction
#area under the precision-recall curve
score = average_precision_score(test[target].values, check)
print('area under the precision-recall curve: {:.6f}'.format(score))
# Compute micro-average ROC curve and ROC area
fpr, tpr, _ = roc_curve(test[target].values, check)
roc_auc = auc(fpr, tpr)
#xgb.plot_importance(gbm)
#plt.show()
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.02, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.show()
##################################################
return test_prediction
def optim_run_single(train, features, target, params, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y2_train = X_train[target]
y2_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y2_train)
dvalid = xgb.DMatrix(X_valid[features], y2_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
return gbm.best_score
#return -1.0 * gbm['test-rmse-mean'].iloc[-1]
def multi_model(train, test, params, features, target,num_models, random_state=0):
all_preds=[]
for i in range(num_models):
preds =run_single(train, test,params, features, target, random_state)
all_preds.append(preds)
random_state=random_state+1
avg_pred=np.mean(np.array(all_preds),axis=0)
return avg_pred
def | (min_child_weight,colsample_bytree,max_depth,subsample,gamma,alpha):
random_state=42
eta=.05
xtrain=train
xfeatures=features
params = {
"objective": "reg:logistic",
"booster" : "gbtree",
"eval_metric": "auc",
"eta": eta,
"tree_method": 'exact',
"max_depth": max_depth,
"silent": 1,
"seed": random_state,
#"num_class" : 22,
}
params['min_child_weight'] = int(min_child_weight)
params['cosample_bytree'] = max(min(colsample_bytree, 1), 0)
params['max_depth'] = int(max_depth)
params['subsample'] = max(min(subsample, | xgb_eval_single | identifier_name |
XGBoost_BayesOpt.py | total_withdrawal_3']
merge['fresh_ratio'] = merge['total_withdrawal_1']/merge['total_withdrawal_3']
merge['industry_ratio'] = merge['ind_9']/merge['total_withdrawal_3']
merge['irrigation_ratio'] = merge['irrigation_3']/merge['total_withdrawal_3']
merge['livestock_ratio'] = merge['livestock_3']/merge['total_withdrawal_3']
merge['aqua_ratio'] = merge['aqua_9']/merge['total_withdrawal_3']
merge['mining_ratio'] = merge['mining_9']/merge['total_withdrawal_3']
merge['thermoelectric_ratio'] = merge['thermoelectric_9']/merge['total_withdrawal_3']
merge['dom_per_cap'] = merge['dom_sup_5']+merge['dom_sup_7']
#tempDT$dom_per_cap <- as.numeric(tempDT$dom_sup_5)+as.numeric(tempDT$dom_sup_7)
merge=merge[(merge['year']>2010)&(merge['year']<=2015)]
merge=merge[merge['state']=='CA']
#state_le = preprocessing.LabelEncoder()
#county_le = preprocessing.LabelEncoder()
#merge['state_enc']=state_le.fit_transform(merge['state'])
#merge['county_enc']=county_le.fit_transform(merge['county'])
col_list=list(merge.columns.values)
lags=[]
resp_list=['none','d0','d1','d2','d3','d4']
for each in resp_list:
for i in range(4,11):
merge[each+'_'+str(i+1)+'_Week_lag']=merge.groupby("fips")[each].shift(i)
lags.append(each+'_'+str(i+1)+'_Week_lag')
features=col_list[:]
merge['d0_pred'] = np.where(merge['d0']>0, 1, 0)
features.remove('year')
features.remove('valid_start')
features.remove('valid_end')
features.remove('date')
features.remove('state')
features.remove('county')
features.remove('d0')
features.remove('d1')
features.remove('d2')
features.remove('d3')
features.remove('d4')
features.remove('none')
features+=lags
#features.append('state_enc')
#features.append('county_enc')
########################
### Global Variables ###
########################
train = merge[merge['year']<2015]
test = merge[merge['year']==2015]
#features=list(train.columns.values)
#train['none']=merge['none']
target='d0_pred'
num_models=2
RANDOM_SEED=184
########################
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / (y_true+.0000001))) * 100
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def get_importance(gbm, features):
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=itemgetter(1), reverse=True)
return importance
def run_single(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=False
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
return test_prediction
def run_single_plot(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
xgb.plot_importance(gbm)
plt.show()
imp = get_importance(gbm, features)
print('Importance array: ', imp)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
############################################ ROC Curve
check=test_prediction
#area under the precision-recall curve
score = average_precision_score(test[target].values, check)
print('area under the precision-recall curve: {:.6f}'.format(score))
# Compute micro-average ROC curve and ROC area
fpr, tpr, _ = roc_curve(test[target].values, check)
roc_auc = auc(fpr, tpr)
#xgb.plot_importance(gbm)
#plt.show()
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.02, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.show()
##################################################
return test_prediction
def optim_run_single(train, features, target, params, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y2_train = X_train[target]
y2_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y2_train)
dvalid = xgb.DMatrix(X_valid[features], y2_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
return gbm.best_score
#return -1.0 * gbm['test-rmse-mean'].iloc[-1]
def multi_model(train, test, params, features, target,num_models, random_state=0):
all_preds=[]
for i in range(num_models):
preds =run_single(train, test,params, features, target, random_state)
all_preds.append(preds)
random_state=random_state+1
avg_pred=np.mean(np.array(all_preds),axis=0)
return avg_pred
def xgb_eval_single(min_child_weight,colsample_bytree,max_depth,subsample,gamma,alpha):
random_state=42
eta=.05
xtrain=train
xfeatures=features
params = {
"objective": "reg:logistic",
"booster" : "gbtree",
"eval_metric": "auc",
"eta": eta,
"tree_method": 'exact',
"max_depth": max_depth,
"silent": 1,
"seed": random_state,
#"num_class" : 22,
}
params['min_child_weight'] = int(min_child_weight)
params['cosample_bytree'] = max(min(colsample_bytree, 1), 0)
params['max_depth'] = int(max_depth) | params['subsample'] = max(min(subsample, 1), 0)
params['gamma'] = max(gamma, 0) | random_line_split | |
XGBoost_BayesOpt.py | _y'])
merge['ground_ratio'] = merge['gro_wat_3']/merge['total_withdrawal_3']
merge['fresh_ratio'] = merge['total_withdrawal_1']/merge['total_withdrawal_3']
merge['industry_ratio'] = merge['ind_9']/merge['total_withdrawal_3']
merge['irrigation_ratio'] = merge['irrigation_3']/merge['total_withdrawal_3']
merge['livestock_ratio'] = merge['livestock_3']/merge['total_withdrawal_3']
merge['aqua_ratio'] = merge['aqua_9']/merge['total_withdrawal_3']
merge['mining_ratio'] = merge['mining_9']/merge['total_withdrawal_3']
merge['thermoelectric_ratio'] = merge['thermoelectric_9']/merge['total_withdrawal_3']
merge['dom_per_cap'] = merge['dom_sup_5']+merge['dom_sup_7']
#tempDT$dom_per_cap <- as.numeric(tempDT$dom_sup_5)+as.numeric(tempDT$dom_sup_7)
merge=merge[(merge['year']>2010)&(merge['year']<=2015)]
merge=merge[merge['state']=='CA']
#state_le = preprocessing.LabelEncoder()
#county_le = preprocessing.LabelEncoder()
#merge['state_enc']=state_le.fit_transform(merge['state'])
#merge['county_enc']=county_le.fit_transform(merge['county'])
col_list=list(merge.columns.values)
lags=[]
resp_list=['none','d0','d1','d2','d3','d4']
for each in resp_list:
|
features=col_list[:]
merge['d0_pred'] = np.where(merge['d0']>0, 1, 0)
features.remove('year')
features.remove('valid_start')
features.remove('valid_end')
features.remove('date')
features.remove('state')
features.remove('county')
features.remove('d0')
features.remove('d1')
features.remove('d2')
features.remove('d3')
features.remove('d4')
features.remove('none')
features+=lags
#features.append('state_enc')
#features.append('county_enc')
########################
### Global Variables ###
########################
train = merge[merge['year']<2015]
test = merge[merge['year']==2015]
#features=list(train.columns.values)
#train['none']=merge['none']
target='d0_pred'
num_models=2
RANDOM_SEED=184
########################
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / (y_true+.0000001))) * 100
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def get_importance(gbm, features):
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=itemgetter(1), reverse=True)
return importance
def run_single(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=False
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
return test_prediction
def run_single_plot(train, test, params, features, target, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
#verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
xgb.plot_importance(gbm)
plt.show()
imp = get_importance(gbm, features)
print('Importance array: ', imp)
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration+1)
############################################ ROC Curve
check=test_prediction
#area under the precision-recall curve
score = average_precision_score(test[target].values, check)
print('area under the precision-recall curve: {:.6f}'.format(score))
# Compute micro-average ROC curve and ROC area
fpr, tpr, _ = roc_curve(test[target].values, check)
roc_auc = auc(fpr, tpr)
#xgb.plot_importance(gbm)
#plt.show()
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.02, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.show()
##################################################
return test_prediction
def optim_run_single(train, features, target, params, random_state=0):
num_boost_round = 1000
early_stopping_rounds = 20
test_size = 0.125
verbosity=True
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
#print('Length train:', len(X_train.index))
#print('Length valid:', len(X_valid.index))
y2_train = X_train[target]
y2_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y2_train)
dvalid = xgb.DMatrix(X_valid[features], y2_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbosity)
return gbm.best_score
#return -1.0 * gbm['test-rmse-mean'].iloc[-1]
def multi_model(train, test, params, features, target,num_models, random_state=0):
all_preds=[]
for i in range(num_models):
preds =run_single(train, test,params, features, target, random_state)
all_preds.append(preds)
random_state=random_state+1
avg_pred=np.mean(np.array(all_preds),axis=0)
return avg_pred
def xgb_eval_single(min_child_weight,colsample_bytree,max_depth,subsample,gamma,alpha):
random_state=42
eta=.05
xtrain=train
xfeatures=features
params = {
"objective": "reg:logistic",
"booster" : "gbtree",
"eval_metric": "auc",
"eta": eta,
"tree_method": 'exact',
"max_depth": max_depth,
"silent": 1,
"seed": random_state,
#"num_class" : 22,
}
params['min_child_weight'] = int(min_child_weight)
params['cosample_bytree'] = max(min(colsample_bytree, 1), 0)
params['max_depth'] = int(max_depth)
params['subsample'] = max(min(subsample, | for i in range(4,11):
merge[each+'_'+str(i+1)+'_Week_lag']=merge.groupby("fips")[each].shift(i)
lags.append(each+'_'+str(i+1)+'_Week_lag') | conditional_block |
action.go | .Sprintf(`<input type="hidden" name="%v" value="%v"/>`,
XSRF_TAG, c.XsrfValue()))
}
// WriteString writes string data into the response object.
func (c *Action) WriteBytes(bytes []byte) error {
_, err := c.ResponseWriter.Write(bytes)
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
func (c *Action) Write(content string, values ...interface{}) error {
if len(values) > 0 {
content = fmt.Sprintf(content, values...)
}
//c.SetHeader("Content-Length", strconv.Itoa(len(content)))
_, err := c.ResponseWriter.Write([]byte(content))
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
// Abort is a helper method that sends an HTTP header and an optional
// body. It is useful for returning 4xx or 5xx errors.
// Once it has been called, any return value from the handler will
// not be written to the response.
func (c *Action) Abort(status int, body string) error {
c.ResponseWriter.WriteHeader(status)
_, err := c.ResponseWriter.Write([]byte(body))
return err
}
// Redirect is a helper method for 3xx redirects.
func (c *Action) Redirect(url string, status ...int) error {
s := 302
if len(status) > 0 {
s = status[0]
}
c.ResponseWriter.Header().Set("Location", url)
c.ResponseWriter.WriteHeader(s)
_, err := c.ResponseWriter.Write([]byte("Redirecting to: " + url))
return err
}
// Notmodified writes a 304 HTTP response
func (c *Action) NotModified() {
c.ResponseWriter.WriteHeader(304)
}
// NotFound writes a 404 HTTP response
func (c *Action) NotFound(message string) error {
return c.Abort(404, message)
}
// ContentType sets the Content-Type header for an HTTP response.
// For example, c.ContentType("json") sets the content-type to "application/json"
// If the supplied value contains a slash (/) it is set as the Content-Type
// verbatim. The return value is the content type as it was
// set, or an empty string if none was found.
func (c *Action) SetContentType(val string) string {
var ctype string
if strings.ContainsRune(val, '/') {
ctype = val
} else {
if !strings.HasPrefix(val, ".") {
val = "." + val
}
ctype = mime.TypeByExtension(val)
}
if ctype != "" {
c.SetHeader("Content-Type", ctype)
}
return ctype
}
// SetCookie adds a cookie header to the response.
func (c *Action) SetCookie(cookie *http.Cookie) {
c.AddHeader("Set-Cookie", cookie.String())
}
func (c *Action) GetCookie(cookieName string) (*http.Cookie, error) {
return c.Request.Cookie(cookieName)
}
func getCookieSig(key string, val []byte, timestamp string) string {
hm := hmac.New(sha1.New, []byte(key))
hm.Write(val)
hm.Write([]byte(timestamp))
hex := fmt.Sprintf("%02x", hm.Sum(nil))
return hex
}
func (c *Action) SetSecureCookie(name string, val string, age int64) {
//base64 encode the val
if len(c.App.AppConfig.CookieSecret) == 0 {
c.App.Server.Logger.Println("Secret Key for secure cookies has not been set. Please assign a cookie secret to web.Config.CookieSecret.")
return
}
var buf bytes.Buffer
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
encoder.Write([]byte(val))
encoder.Close()
vs := buf.String()
vb := buf.Bytes()
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
sig := getCookieSig(c.App.AppConfig.CookieSecret, vb, timestamp)
cookie := strings.Join([]string{vs, timestamp, sig}, "|")
c.SetCookie(NewCookie(name, cookie, age))
}
func (c *Action) GetSecureCookie(name string) (string, bool) {
for _, cookie := range c.Request.Cookies() {
if cookie.Name != name {
continue
}
parts := strings.SplitN(cookie.Value, "|", 3)
val := parts[0]
timestamp := parts[1]
sig := parts[2]
if getCookieSig(c.App.AppConfig.CookieSecret, []byte(val), timestamp) != sig {
return "", false
}
ts, _ := strconv.ParseInt(timestamp, 0, 64)
if time.Now().Unix()-31*86400 > ts {
return "", false
}
buf := bytes.NewBufferString(val)
encoder := base64.NewDecoder(base64.StdEncoding, buf)
res, _ := ioutil.ReadAll(encoder)
return string(res), true
}
return "", false
}
func (c *Action) Method() string {
return c.Request.Method
}
func (c *Action) Go(m string, anotherc ...interface{}) error {
var t reflect.Type
if len(anotherc) > 0 {
t = reflect.TypeOf(anotherc[0]).Elem()
} else {
t = reflect.TypeOf(c.C.Interface()).Elem()
}
root, ok := c.App.Actions[t]
if !ok {
return NotFound()
}
uris := strings.Split(m, "?")
tag, ok := t.FieldByName(uris[0])
if !ok {
return NotFound()
}
tagStr := tag.Tag.Get("xweb")
if tagStr != "" {
p := tagStr
ts := strings.Split(tagStr, " ")
if len(ts) >= 2 {
p = ts[1]
}
rPath := root + p + m[len(uris[0]):]
rPath = strings.Replace(rPath, "//", "/", -1)
return c.Redirect(rPath)
} else {
return c.Redirect(root + m)
}
}
func (c *Action) Flush() {
flusher, _ := c.ResponseWriter.(http.Flusher)
flusher.Flush()
}
func (c *Action) BasePath() string {
return c.App.BasePath
}
func (c *Action) Namespace() string {
return c.App.Actions[c.C.Type()]
}
func (c *Action) Include(tmplName string) interface{} {
t := c.RootTemplate.New(tmplName)
t.Funcs(c.getFuncs())
content, err := c.getTemplate(tmplName)
if err != nil {
fmt.Printf("RenderTemplate %v read err\n", tmplName)
return ""
}
tmpl, err := t.Parse(string(content))
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
} else {
return template.HTML(string(tplcontent))
}
} else {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
}
func (c *Action) NamedRender(name, content string, params ...*T) error {
c.f["include"] = c.Include
c.f["XsrfFormHtml"] = c.XsrfFormHtml
c.f["XsrfValue"] = c.XsrfValue
c.RootTemplate = template.New(name)
if len(params) >= 1 {
for k, v := range *params[0] {
c.T[k] = v
}
if len(params) >= 2 {
for k, v := range *params[1] {
c.f[k] = v
}
}
}
c.RootTemplate.Funcs(c.getFuncs())
tmpl, err := c.RootTemplate.Parse(string(content))
if err == nil {
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err == nil |
}
}
return err
}
func (c *Action) getTemplate(tmpl string) ([]byte, error) {
if c.App.AppConfig.CacheTemplates {
return c.App.TemplateMgr.GetTemplate(tmpl)
}
path := c.App.getTemplatePath(tmpl)
if path == "" {
return nil, errors.New(fmt.Sprintf("No template file %v found", path))
}
return ioutil.ReadFile(path)
}
func (c *Action) Render(tmpl string, params ...*T) error {
content, err := c.getTemplate(tmpl)
if err == nil {
err = c.NamedRender(tmpl, string(content), params...)
}
return err
}
func (c *Action) getFuncs() template.FuncMap {
funcs := c.App.FuncMaps
if c.f != nil {
for k, v := range c.f {
funcs[k] = v
}
}
return funcs
}
func (c *Action) GetConfig(name string) interface{} {
return c.App.Config[name | {
_, err = c.ResponseWriter.Write(tplcontent)
} | conditional_block |
action.go | .Sprintf(`<input type="hidden" name="%v" value="%v"/>`,
XSRF_TAG, c.XsrfValue()))
}
// WriteString writes string data into the response object.
func (c *Action) WriteBytes(bytes []byte) error {
_, err := c.ResponseWriter.Write(bytes)
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
func (c *Action) Write(content string, values ...interface{}) error {
if len(values) > 0 {
content = fmt.Sprintf(content, values...)
}
//c.SetHeader("Content-Length", strconv.Itoa(len(content)))
_, err := c.ResponseWriter.Write([]byte(content))
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
// Abort is a helper method that sends an HTTP header and an optional
// body. It is useful for returning 4xx or 5xx errors.
// Once it has been called, any return value from the handler will
// not be written to the response.
func (c *Action) Abort(status int, body string) error {
c.ResponseWriter.WriteHeader(status)
_, err := c.ResponseWriter.Write([]byte(body))
return err
}
// Redirect is a helper method for 3xx redirects.
func (c *Action) Redirect(url string, status ...int) error {
s := 302
if len(status) > 0 {
s = status[0]
}
c.ResponseWriter.Header().Set("Location", url)
c.ResponseWriter.WriteHeader(s)
_, err := c.ResponseWriter.Write([]byte("Redirecting to: " + url))
return err
}
// Notmodified writes a 304 HTTP response
func (c *Action) NotModified() {
c.ResponseWriter.WriteHeader(304)
}
// NotFound writes a 404 HTTP response
func (c *Action) NotFound(message string) error {
return c.Abort(404, message)
}
// ContentType sets the Content-Type header for an HTTP response.
// For example, c.ContentType("json") sets the content-type to "application/json"
// If the supplied value contains a slash (/) it is set as the Content-Type
// verbatim. The return value is the content type as it was
// set, or an empty string if none was found.
func (c *Action) SetContentType(val string) string {
var ctype string
if strings.ContainsRune(val, '/') {
ctype = val
} else {
if !strings.HasPrefix(val, ".") {
val = "." + val
}
ctype = mime.TypeByExtension(val)
}
if ctype != "" {
c.SetHeader("Content-Type", ctype)
}
return ctype
}
// SetCookie adds a cookie header to the response.
func (c *Action) SetCookie(cookie *http.Cookie) {
c.AddHeader("Set-Cookie", cookie.String())
}
func (c *Action) GetCookie(cookieName string) (*http.Cookie, error) {
return c.Request.Cookie(cookieName)
}
func getCookieSig(key string, val []byte, timestamp string) string {
hm := hmac.New(sha1.New, []byte(key))
hm.Write(val)
hm.Write([]byte(timestamp))
hex := fmt.Sprintf("%02x", hm.Sum(nil))
return hex
}
func (c *Action) SetSecureCookie(name string, val string, age int64) {
//base64 encode the val
if len(c.App.AppConfig.CookieSecret) == 0 {
c.App.Server.Logger.Println("Secret Key for secure cookies has not been set. Please assign a cookie secret to web.Config.CookieSecret.")
return
}
var buf bytes.Buffer
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
encoder.Write([]byte(val))
encoder.Close()
vs := buf.String()
vb := buf.Bytes()
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
sig := getCookieSig(c.App.AppConfig.CookieSecret, vb, timestamp)
cookie := strings.Join([]string{vs, timestamp, sig}, "|")
c.SetCookie(NewCookie(name, cookie, age))
}
func (c *Action) GetSecureCookie(name string) (string, bool) {
for _, cookie := range c.Request.Cookies() {
if cookie.Name != name {
continue
}
parts := strings.SplitN(cookie.Value, "|", 3)
val := parts[0]
timestamp := parts[1]
sig := parts[2]
if getCookieSig(c.App.AppConfig.CookieSecret, []byte(val), timestamp) != sig {
return "", false
}
ts, _ := strconv.ParseInt(timestamp, 0, 64)
if time.Now().Unix()-31*86400 > ts {
return "", false
}
buf := bytes.NewBufferString(val)
encoder := base64.NewDecoder(base64.StdEncoding, buf)
res, _ := ioutil.ReadAll(encoder)
return string(res), true
}
return "", false
}
func (c *Action) Method() string |
func (c *Action) Go(m string, anotherc ...interface{}) error {
var t reflect.Type
if len(anotherc) > 0 {
t = reflect.TypeOf(anotherc[0]).Elem()
} else {
t = reflect.TypeOf(c.C.Interface()).Elem()
}
root, ok := c.App.Actions[t]
if !ok {
return NotFound()
}
uris := strings.Split(m, "?")
tag, ok := t.FieldByName(uris[0])
if !ok {
return NotFound()
}
tagStr := tag.Tag.Get("xweb")
if tagStr != "" {
p := tagStr
ts := strings.Split(tagStr, " ")
if len(ts) >= 2 {
p = ts[1]
}
rPath := root + p + m[len(uris[0]):]
rPath = strings.Replace(rPath, "//", "/", -1)
return c.Redirect(rPath)
} else {
return c.Redirect(root + m)
}
}
func (c *Action) Flush() {
flusher, _ := c.ResponseWriter.(http.Flusher)
flusher.Flush()
}
func (c *Action) BasePath() string {
return c.App.BasePath
}
func (c *Action) Namespace() string {
return c.App.Actions[c.C.Type()]
}
func (c *Action) Include(tmplName string) interface{} {
t := c.RootTemplate.New(tmplName)
t.Funcs(c.getFuncs())
content, err := c.getTemplate(tmplName)
if err != nil {
fmt.Printf("RenderTemplate %v read err\n", tmplName)
return ""
}
tmpl, err := t.Parse(string(content))
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
} else {
return template.HTML(string(tplcontent))
}
} else {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
}
func (c *Action) NamedRender(name, content string, params ...*T) error {
c.f["include"] = c.Include
c.f["XsrfFormHtml"] = c.XsrfFormHtml
c.f["XsrfValue"] = c.XsrfValue
c.RootTemplate = template.New(name)
if len(params) >= 1 {
for k, v := range *params[0] {
c.T[k] = v
}
if len(params) >= 2 {
for k, v := range *params[1] {
c.f[k] = v
}
}
}
c.RootTemplate.Funcs(c.getFuncs())
tmpl, err := c.RootTemplate.Parse(string(content))
if err == nil {
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err == nil {
_, err = c.ResponseWriter.Write(tplcontent)
}
}
}
return err
}
func (c *Action) getTemplate(tmpl string) ([]byte, error) {
if c.App.AppConfig.CacheTemplates {
return c.App.TemplateMgr.GetTemplate(tmpl)
}
path := c.App.getTemplatePath(tmpl)
if path == "" {
return nil, errors.New(fmt.Sprintf("No template file %v found", path))
}
return ioutil.ReadFile(path)
}
func (c *Action) Render(tmpl string, params ...*T) error {
content, err := c.getTemplate(tmpl)
if err == nil {
err = c.NamedRender(tmpl, string(content), params...)
}
return err
}
func (c *Action) getFuncs() template.FuncMap {
funcs := c.App.FuncMaps
if c.f != nil {
for k, v := range c.f {
funcs[k] = v
}
}
return funcs
}
func (c *Action) GetConfig(name string) interface{} {
return c.App.Config[name | {
return c.Request.Method
} | identifier_body |
action.go | .Sprintf(`<input type="hidden" name="%v" value="%v"/>`,
XSRF_TAG, c.XsrfValue()))
}
// WriteString writes string data into the response object.
func (c *Action) WriteBytes(bytes []byte) error {
_, err := c.ResponseWriter.Write(bytes)
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
func (c *Action) Write(content string, values ...interface{}) error {
if len(values) > 0 {
content = fmt.Sprintf(content, values...)
}
//c.SetHeader("Content-Length", strconv.Itoa(len(content)))
_, err := c.ResponseWriter.Write([]byte(content))
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
// Abort is a helper method that sends an HTTP header and an optional
// body. It is useful for returning 4xx or 5xx errors.
// Once it has been called, any return value from the handler will
// not be written to the response.
func (c *Action) Abort(status int, body string) error {
c.ResponseWriter.WriteHeader(status)
_, err := c.ResponseWriter.Write([]byte(body))
return err
}
// Redirect is a helper method for 3xx redirects.
func (c *Action) Redirect(url string, status ...int) error {
s := 302
if len(status) > 0 {
s = status[0]
}
c.ResponseWriter.Header().Set("Location", url)
c.ResponseWriter.WriteHeader(s)
_, err := c.ResponseWriter.Write([]byte("Redirecting to: " + url))
return err
}
// Notmodified writes a 304 HTTP response
func (c *Action) NotModified() {
c.ResponseWriter.WriteHeader(304)
}
// NotFound writes a 404 HTTP response
func (c *Action) NotFound(message string) error {
return c.Abort(404, message)
}
// ContentType sets the Content-Type header for an HTTP response.
// For example, c.ContentType("json") sets the content-type to "application/json"
// If the supplied value contains a slash (/) it is set as the Content-Type
// verbatim. The return value is the content type as it was
// set, or an empty string if none was found.
func (c *Action) SetContentType(val string) string {
var ctype string
if strings.ContainsRune(val, '/') {
ctype = val
} else {
if !strings.HasPrefix(val, ".") {
val = "." + val
}
ctype = mime.TypeByExtension(val)
}
if ctype != "" {
c.SetHeader("Content-Type", ctype)
}
return ctype
}
// SetCookie adds a cookie header to the response.
func (c *Action) SetCookie(cookie *http.Cookie) {
c.AddHeader("Set-Cookie", cookie.String())
}
func (c *Action) GetCookie(cookieName string) (*http.Cookie, error) {
return c.Request.Cookie(cookieName)
}
func getCookieSig(key string, val []byte, timestamp string) string {
hm := hmac.New(sha1.New, []byte(key))
hm.Write(val)
hm.Write([]byte(timestamp))
hex := fmt.Sprintf("%02x", hm.Sum(nil))
return hex
}
func (c *Action) SetSecureCookie(name string, val string, age int64) {
//base64 encode the val
if len(c.App.AppConfig.CookieSecret) == 0 {
c.App.Server.Logger.Println("Secret Key for secure cookies has not been set. Please assign a cookie secret to web.Config.CookieSecret.")
return
}
var buf bytes.Buffer
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
encoder.Write([]byte(val))
encoder.Close()
vs := buf.String()
vb := buf.Bytes()
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
sig := getCookieSig(c.App.AppConfig.CookieSecret, vb, timestamp)
cookie := strings.Join([]string{vs, timestamp, sig}, "|")
c.SetCookie(NewCookie(name, cookie, age))
}
func (c *Action) GetSecureCookie(name string) (string, bool) {
for _, cookie := range c.Request.Cookies() {
if cookie.Name != name {
continue
}
parts := strings.SplitN(cookie.Value, "|", 3)
val := parts[0]
timestamp := parts[1]
sig := parts[2]
if getCookieSig(c.App.AppConfig.CookieSecret, []byte(val), timestamp) != sig {
return "", false
}
ts, _ := strconv.ParseInt(timestamp, 0, 64)
if time.Now().Unix()-31*86400 > ts {
return "", false
}
buf := bytes.NewBufferString(val)
encoder := base64.NewDecoder(base64.StdEncoding, buf)
res, _ := ioutil.ReadAll(encoder)
return string(res), true
}
return "", false
}
func (c *Action) | () string {
return c.Request.Method
}
func (c *Action) Go(m string, anotherc ...interface{}) error {
var t reflect.Type
if len(anotherc) > 0 {
t = reflect.TypeOf(anotherc[0]).Elem()
} else {
t = reflect.TypeOf(c.C.Interface()).Elem()
}
root, ok := c.App.Actions[t]
if !ok {
return NotFound()
}
uris := strings.Split(m, "?")
tag, ok := t.FieldByName(uris[0])
if !ok {
return NotFound()
}
tagStr := tag.Tag.Get("xweb")
if tagStr != "" {
p := tagStr
ts := strings.Split(tagStr, " ")
if len(ts) >= 2 {
p = ts[1]
}
rPath := root + p + m[len(uris[0]):]
rPath = strings.Replace(rPath, "//", "/", -1)
return c.Redirect(rPath)
} else {
return c.Redirect(root + m)
}
}
func (c *Action) Flush() {
flusher, _ := c.ResponseWriter.(http.Flusher)
flusher.Flush()
}
func (c *Action) BasePath() string {
return c.App.BasePath
}
func (c *Action) Namespace() string {
return c.App.Actions[c.C.Type()]
}
func (c *Action) Include(tmplName string) interface{} {
t := c.RootTemplate.New(tmplName)
t.Funcs(c.getFuncs())
content, err := c.getTemplate(tmplName)
if err != nil {
fmt.Printf("RenderTemplate %v read err\n", tmplName)
return ""
}
tmpl, err := t.Parse(string(content))
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
} else {
return template.HTML(string(tplcontent))
}
} else {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
}
func (c *Action) NamedRender(name, content string, params ...*T) error {
c.f["include"] = c.Include
c.f["XsrfFormHtml"] = c.XsrfFormHtml
c.f["XsrfValue"] = c.XsrfValue
c.RootTemplate = template.New(name)
if len(params) >= 1 {
for k, v := range *params[0] {
c.T[k] = v
}
if len(params) >= 2 {
for k, v := range *params[1] {
c.f[k] = v
}
}
}
c.RootTemplate.Funcs(c.getFuncs())
tmpl, err := c.RootTemplate.Parse(string(content))
if err == nil {
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err == nil {
_, err = c.ResponseWriter.Write(tplcontent)
}
}
}
return err
}
func (c *Action) getTemplate(tmpl string) ([]byte, error) {
if c.App.AppConfig.CacheTemplates {
return c.App.TemplateMgr.GetTemplate(tmpl)
}
path := c.App.getTemplatePath(tmpl)
if path == "" {
return nil, errors.New(fmt.Sprintf("No template file %v found", path))
}
return ioutil.ReadFile(path)
}
func (c *Action) Render(tmpl string, params ...*T) error {
content, err := c.getTemplate(tmpl)
if err == nil {
err = c.NamedRender(tmpl, string(content), params...)
}
return err
}
func (c *Action) getFuncs() template.FuncMap {
funcs := c.App.FuncMaps
if c.f != nil {
for k, v := range c.f {
funcs[k] = v
}
}
return funcs
}
func (c *Action) GetConfig(name string) interface{} {
return c.App.Config[name]
| Method | identifier_name |
action.go |
type Mapper struct {
}
type T map[string]interface{}
func XsrfName() string {
return XSRF_TAG
}
func (c *Action) XsrfValue() string {
var val string = ""
cookie, err := c.GetCookie(XSRF_TAG)
if err != nil {
val = uuid.NewRandom().String()
c.SetCookie(NewCookie(XSRF_TAG, val, c.App.AppConfig.SessionTimeout))
} else {
val = cookie.Value
}
return val
}
func (c *Action) XsrfFormHtml() template.HTML {
return template.HTML(fmt.Sprintf(`<input type="hidden" name="%v" value="%v"/>`,
XSRF_TAG, c.XsrfValue()))
}
// WriteString writes string data into the response object.
func (c *Action) WriteBytes(bytes []byte) error {
_, err := c.ResponseWriter.Write(bytes)
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
func (c *Action) Write(content string, values ...interface{}) error {
if len(values) > 0 {
content = fmt.Sprintf(content, values...)
}
//c.SetHeader("Content-Length", strconv.Itoa(len(content)))
_, err := c.ResponseWriter.Write([]byte(content))
if err != nil {
c.App.Server.Logger.Println("Error during write: ", err)
}
return err
}
// Abort is a helper method that sends an HTTP header and an optional
// body. It is useful for returning 4xx or 5xx errors.
// Once it has been called, any return value from the handler will
// not be written to the response.
func (c *Action) Abort(status int, body string) error {
c.ResponseWriter.WriteHeader(status)
_, err := c.ResponseWriter.Write([]byte(body))
return err
}
// Redirect is a helper method for 3xx redirects.
func (c *Action) Redirect(url string, status ...int) error {
s := 302
if len(status) > 0 {
s = status[0]
}
c.ResponseWriter.Header().Set("Location", url)
c.ResponseWriter.WriteHeader(s)
_, err := c.ResponseWriter.Write([]byte("Redirecting to: " + url))
return err
}
// Notmodified writes a 304 HTTP response
func (c *Action) NotModified() {
c.ResponseWriter.WriteHeader(304)
}
// NotFound writes a 404 HTTP response
func (c *Action) NotFound(message string) error {
return c.Abort(404, message)
}
// ContentType sets the Content-Type header for an HTTP response.
// For example, c.ContentType("json") sets the content-type to "application/json"
// If the supplied value contains a slash (/) it is set as the Content-Type
// verbatim. The return value is the content type as it was
// set, or an empty string if none was found.
func (c *Action) SetContentType(val string) string {
var ctype string
if strings.ContainsRune(val, '/') {
ctype = val
} else {
if !strings.HasPrefix(val, ".") {
val = "." + val
}
ctype = mime.TypeByExtension(val)
}
if ctype != "" {
c.SetHeader("Content-Type", ctype)
}
return ctype
}
// SetCookie adds a cookie header to the response.
func (c *Action) SetCookie(cookie *http.Cookie) {
c.AddHeader("Set-Cookie", cookie.String())
}
func (c *Action) GetCookie(cookieName string) (*http.Cookie, error) {
return c.Request.Cookie(cookieName)
}
func getCookieSig(key string, val []byte, timestamp string) string {
hm := hmac.New(sha1.New, []byte(key))
hm.Write(val)
hm.Write([]byte(timestamp))
hex := fmt.Sprintf("%02x", hm.Sum(nil))
return hex
}
func (c *Action) SetSecureCookie(name string, val string, age int64) {
//base64 encode the val
if len(c.App.AppConfig.CookieSecret) == 0 {
c.App.Server.Logger.Println("Secret Key for secure cookies has not been set. Please assign a cookie secret to web.Config.CookieSecret.")
return
}
var buf bytes.Buffer
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
encoder.Write([]byte(val))
encoder.Close()
vs := buf.String()
vb := buf.Bytes()
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
sig := getCookieSig(c.App.AppConfig.CookieSecret, vb, timestamp)
cookie := strings.Join([]string{vs, timestamp, sig}, "|")
c.SetCookie(NewCookie(name, cookie, age))
}
func (c *Action) GetSecureCookie(name string) (string, bool) {
for _, cookie := range c.Request.Cookies() {
if cookie.Name != name {
continue
}
parts := strings.SplitN(cookie.Value, "|", 3)
val := parts[0]
timestamp := parts[1]
sig := parts[2]
if getCookieSig(c.App.AppConfig.CookieSecret, []byte(val), timestamp) != sig {
return "", false
}
ts, _ := strconv.ParseInt(timestamp, 0, 64)
if time.Now().Unix()-31*86400 > ts {
return "", false
}
buf := bytes.NewBufferString(val)
encoder := base64.NewDecoder(base64.StdEncoding, buf)
res, _ := ioutil.ReadAll(encoder)
return string(res), true
}
return "", false
}
func (c *Action) Method() string {
return c.Request.Method
}
func (c *Action) Go(m string, anotherc ...interface{}) error {
var t reflect.Type
if len(anotherc) > 0 {
t = reflect.TypeOf(anotherc[0]).Elem()
} else {
t = reflect.TypeOf(c.C.Interface()).Elem()
}
root, ok := c.App.Actions[t]
if !ok {
return NotFound()
}
uris := strings.Split(m, "?")
tag, ok := t.FieldByName(uris[0])
if !ok {
return NotFound()
}
tagStr := tag.Tag.Get("xweb")
if tagStr != "" {
p := tagStr
ts := strings.Split(tagStr, " ")
if len(ts) >= 2 {
p = ts[1]
}
rPath := root + p + m[len(uris[0]):]
rPath = strings.Replace(rPath, "//", "/", -1)
return c.Redirect(rPath)
} else {
return c.Redirect(root + m)
}
}
func (c *Action) Flush() {
flusher, _ := c.ResponseWriter.(http.Flusher)
flusher.Flush()
}
func (c *Action) BasePath() string {
return c.App.BasePath
}
func (c *Action) Namespace() string {
return c.App.Actions[c.C.Type()]
}
func (c *Action) Include(tmplName string) interface{} {
t := c.RootTemplate.New(tmplName)
t.Funcs(c.getFuncs())
content, err := c.getTemplate(tmplName)
if err != nil {
fmt.Printf("RenderTemplate %v read err\n", tmplName)
return ""
}
tmpl, err := t.Parse(string(content))
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
} else {
return template.HTML(string(tplcontent))
}
} else {
fmt.Printf("Parse %v err: %v\n", tmplName, err)
return ""
}
}
func (c *Action) NamedRender(name, content string, params ...*T) error {
c.f["include"] = c.Include
c.f["XsrfFormHtml"] = c.XsrfFormHtml
c.f["XsrfValue"] = c.XsrfValue
c.RootTemplate = template.New(name)
if len(params) >= 1 {
for k, v := range *params[0] {
c.T[k] = v
}
if len(params) >= 2 {
for k, v := range *params[1] {
c.f[k] = v
}
}
}
c.RootTemplate.Funcs(c.getFuncs())
tmpl, err := c.RootTemplate.Parse(string(content))
if err == nil {
newbytes := bytes.NewBufferString("")
err = tmpl.Execute(newbytes, c.C.Elem().Interface())
if err == nil {
tplcontent, err := ioutil.ReadAll(newbytes)
if err == nil {
_, err = c.ResponseWriter.Write(tplcontent)
}
}
}
return err
}
func (c *Action) getTemplate(tmpl string) ([]byte, error) {
if c.App.AppConfig.CacheTemplates {
return c.App.TemplateMgr.GetTemplate(tmpl)
}
path := c.App.getTemplatePath(tmpl)
if path == "" {
return nil, errors.New | Session session.SessionStore
T T
f T
RootTemplate *template.Template
} | random_line_split | |
cnos_network_driver_rest.py | KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements CNOS config over REST API Client
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from networking_lenovo.ml2 import config as conf
from networking_lenovo.ml2 import constants as const
from networking_lenovo.ml2 import exceptions as cexc
from networking_lenovo.ml2 import nos_db_v2
from requests.utils import quote
import requests
from rest_client import LenovoRestClient
LOG = logging.getLogger(__name__)
class LenovoCNOSDriverREST(object):
"""CNOS Driver Main Class."""
VLAN_REST_OBJ = "nos/api/cfg/vlan/"
VLAN_IFACE_REST_OBJ = "nos/api/cfg/vlan_interface/"
REST_TCP_PORT_STR = "rest_tcp_port"
REST_DEFAULT_PORT = 8090
REST_DEFAULT_PORT_HTTPS = 443
REST_USE_HTTPS_STR = "use_ssl"
def __init__(self):
self.switches = conf.ML2MechLenovoConfig.nos_dict
| ############ Private Methods ############################
def _dbg_str(self, host, op, vlan_id,
vlan_name=None, interface=None, intf_type=None):
"""
Construct a string displayed in debug messages or exceptions
for the main operations
"""
dbg_fmt = "host %s %s vlan %d"
args_lst = [host, op, vlan_id]
if vlan_name:
dbg_fmt += " (%s)"
args_lst.append(vlan_name)
if interface and intf_type:
dbg_fmt += " on interface %s(type %s)"
args_lst.extend([interface, intf_type])
dbg_str = dbg_fmt % tuple(args_lst)
return dbg_str
def _connect(self, host):
""" Connect to the switch """
user = self.switches[host, const.USERNAME]
password = self.switches[host, const.PASSWORD]
https_str = self.switches.get((host, self.REST_USE_HTTPS_STR),
"true").lower()
use_https = True
if https_str != "true":
use_https = False
default_port = self.REST_DEFAULT_PORT
if use_https:
default_port = self.REST_DEFAULT_PORT_HTTPS
tcp_port = self.switches.get((host, self.REST_TCP_PORT_STR),
default_port)
conn = LenovoRestClient(host, user, password, tcp_port, use_https)
try:
conn.login()
except Exception as e:
raise cexc.NOSConnectFailed(nos_host=host, exc=e)
return conn
def _check_process_resp(self, resp, expected_fields=None):
"""
Check that a HTTP response was OK and in valid JSON format
If it was, check that the expected fields are present in JSON response
Otherwise it raises a NOSRestHTTPError exception
Returns:
the JSON response
"""
if resp.status_code != LenovoRestClient.RESP_CODE_OK:
raise cexc.NOSRestHTTPError(http_code=resp.status_code,
http_reason=resp.reason, http_op=resp.request.method,
url=resp.url, http_response=resp.text)
rj = resp.json()
if not expected_fields:
return rj
for field in expected_fields:
try:
val = rj[field]
except KeyError:
raise cexc.NOSJsonFieldNotFound(field=field, url=resp.url, json=rj)
return rj
def _create_vlan(self, conn, vlan_id, vlan_name):
"""
Internal method to create the vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
vlan_name - vlan name
"""
req_js = {}
req_js['vlan_id'] = vlan_id
req_js['vlan_name'] = vlan_name
req_js['admin_state'] = 'up'
resp = conn.post(self.VLAN_REST_OBJ, req_js)
self._check_process_resp(resp)
def _conf_intf(self, conn, interface, mode, pvid, vlan_list):
"""
Internal method to configure bridgeport for an interface
Parameters:
conn - connection handler
interface - interface identifier (name)
mode - 'access'(untagged) or 'trunk'(tagged)
pvid - interface default vlan id
vlan_list - list of vlans the interface belongs to
"""
if not vlan_list:
raise Exception('The interface should be in at least one vlan')
if (mode == 'access') and (len(vlan_list) > 1):
raise Exception('An access port cannot be in multiple vlans')
if pvid not in vlan_list:
raise Exception('The pvid should be in the list of vlans')
req_js = {}
req_js['if_name'] = interface
req_js['bridgeport_mode'] = mode
req_js['pvid'] = pvid
req_js['vlans'] = vlan_list
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.put(obj, req_js)
return resp
def _get_vlist(self, vlist):
"""
Internal method to obtain a vlan list from the JSON answer
from the switch
Parameters:
vlist - the list of vlans the port
or the special strings all or none
"""
if vlist == "all":
return list(range(1, 4095))
elif vlist == "none":
return []
elif type(vlist) is not list:
raise Exception("Unexpected vlan list: " + str(vlist))
else:
return vlist
def _add_intf_to_vlan(self, conn, vlan_id, interface):
"""
Internal method to add an interface to a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.append(vlan_id)
pvid = intf_info['pvid']
mode = 'trunk'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _rem_intf_from_vlan(self, conn, vlan_id, interface):
"""
Internal method to remove an interface from a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id not in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.remove(vlan_id)
pvid = intf_info['pvid']
if not new_vlist:
raise Exception('Port ' + str(interface) + ' was only in vlan ' + str(vlan_id))
if pvid == vlan_id:
pvid = new_vlist[0]
if len(new_vlist) > 1:
mode = 'trunk'
else:
mode = 'access'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _get_ifname(self, intf_type, interface):
"""
Internal method to obtain the interface name based on its type and number
Parameters:
intf_type - interface type (port or portchannel)
interface - interface number
"""
if intf_type == 'port':
ifname = 'Ethernet' + str(interface)
elif intf_type == 'portchannel':
ifname = 'po' + str(interface)
else:
raise Exception("Unknown interface type: " + intf_type)
return ifname
############# Public Methods ############################
def delete_vlan(self, host, vlan_id):
"""Delete a VLAN on CNOS Switch given the VLAN ID."""
dbg_str = self._dbg_str(host, "delete", vlan_id)
LOG.debug(dbg_str)
conn = self._connect(host)
obj = self.VLAN_REST_OBJ + str(vlan_id)
resp = conn.delete(obj)
conn.close()
def enable_vlan_on_trunk_int(self, host, vlan_id, intf_type, interface):
"""Enable a VLAN on a trunk interface."""
dbg_str = self._dbg_str(host, "enable", vlan_id,
interface=interface, intf_type=intf_type)
LOG.debug(dbg_str)
conn = self._connect(host)
try:
if_name = self._get_ifname(intf_type, interface)
self._add_intf_to_vlan(conn, vlan_id, if_name)
except Exception as e:
raise cexc.NOSConfigFailed(config=dbg_str, exc=e)
conn.close()
def disable_vlan_on_trunk | random_line_split | |
cnos_network_driver_rest.py | KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements CNOS config over REST API Client
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from networking_lenovo.ml2 import config as conf
from networking_lenovo.ml2 import constants as const
from networking_lenovo.ml2 import exceptions as cexc
from networking_lenovo.ml2 import nos_db_v2
from requests.utils import quote
import requests
from rest_client import LenovoRestClient
LOG = logging.getLogger(__name__)
class LenovoCNOSDriverREST(object):
"""CNOS Driver Main Class."""
VLAN_REST_OBJ = "nos/api/cfg/vlan/"
VLAN_IFACE_REST_OBJ = "nos/api/cfg/vlan_interface/"
REST_TCP_PORT_STR = "rest_tcp_port"
REST_DEFAULT_PORT = 8090
REST_DEFAULT_PORT_HTTPS = 443
REST_USE_HTTPS_STR = "use_ssl"
def __init__(self):
self.switches = conf.ML2MechLenovoConfig.nos_dict
############ Private Methods ############################
def _dbg_str(self, host, op, vlan_id,
vlan_name=None, interface=None, intf_type=None):
"""
Construct a string displayed in debug messages or exceptions
for the main operations
"""
dbg_fmt = "host %s %s vlan %d"
args_lst = [host, op, vlan_id]
if vlan_name:
dbg_fmt += " (%s)"
args_lst.append(vlan_name)
if interface and intf_type:
dbg_fmt += " on interface %s(type %s)"
args_lst.extend([interface, intf_type])
dbg_str = dbg_fmt % tuple(args_lst)
return dbg_str
def _connect(self, host):
""" Connect to the switch """
user = self.switches[host, const.USERNAME]
password = self.switches[host, const.PASSWORD]
https_str = self.switches.get((host, self.REST_USE_HTTPS_STR),
"true").lower()
use_https = True
if https_str != "true":
use_https = False
default_port = self.REST_DEFAULT_PORT
if use_https:
default_port = self.REST_DEFAULT_PORT_HTTPS
tcp_port = self.switches.get((host, self.REST_TCP_PORT_STR),
default_port)
conn = LenovoRestClient(host, user, password, tcp_port, use_https)
try:
conn.login()
except Exception as e:
raise cexc.NOSConnectFailed(nos_host=host, exc=e)
return conn
def _check_process_resp(self, resp, expected_fields=None):
"""
Check that a HTTP response was OK and in valid JSON format
If it was, check that the expected fields are present in JSON response
Otherwise it raises a NOSRestHTTPError exception
Returns:
the JSON response
"""
if resp.status_code != LenovoRestClient.RESP_CODE_OK:
raise cexc.NOSRestHTTPError(http_code=resp.status_code,
http_reason=resp.reason, http_op=resp.request.method,
url=resp.url, http_response=resp.text)
rj = resp.json()
if not expected_fields:
return rj
for field in expected_fields:
try:
val = rj[field]
except KeyError:
raise cexc.NOSJsonFieldNotFound(field=field, url=resp.url, json=rj)
return rj
def _create_vlan(self, conn, vlan_id, vlan_name):
"""
Internal method to create the vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
vlan_name - vlan name
"""
req_js = {}
req_js['vlan_id'] = vlan_id
req_js['vlan_name'] = vlan_name
req_js['admin_state'] = 'up'
resp = conn.post(self.VLAN_REST_OBJ, req_js)
self._check_process_resp(resp)
def _conf_intf(self, conn, interface, mode, pvid, vlan_list):
"""
Internal method to configure bridgeport for an interface
Parameters:
conn - connection handler
interface - interface identifier (name)
mode - 'access'(untagged) or 'trunk'(tagged)
pvid - interface default vlan id
vlan_list - list of vlans the interface belongs to
"""
if not vlan_list:
raise Exception('The interface should be in at least one vlan')
if (mode == 'access') and (len(vlan_list) > 1):
raise Exception('An access port cannot be in multiple vlans')
if pvid not in vlan_list:
raise Exception('The pvid should be in the list of vlans')
req_js = {}
req_js['if_name'] = interface
req_js['bridgeport_mode'] = mode
req_js['pvid'] = pvid
req_js['vlans'] = vlan_list
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.put(obj, req_js)
return resp
def _get_vlist(self, vlist):
"""
Internal method to obtain a vlan list from the JSON answer
from the switch
Parameters:
vlist - the list of vlans the port
or the special strings all or none
"""
if vlist == "all":
|
elif vlist == "none":
return []
elif type(vlist) is not list:
raise Exception("Unexpected vlan list: " + str(vlist))
else:
return vlist
def _add_intf_to_vlan(self, conn, vlan_id, interface):
"""
Internal method to add an interface to a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.append(vlan_id)
pvid = intf_info['pvid']
mode = 'trunk'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _rem_intf_from_vlan(self, conn, vlan_id, interface):
"""
Internal method to remove an interface from a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id not in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.remove(vlan_id)
pvid = intf_info['pvid']
if not new_vlist:
raise Exception('Port ' + str(interface) + ' was only in vlan ' + str(vlan_id))
if pvid == vlan_id:
pvid = new_vlist[0]
if len(new_vlist) > 1:
mode = 'trunk'
else:
mode = 'access'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _get_ifname(self, intf_type, interface):
"""
Internal method to obtain the interface name based on its type and number
Parameters:
intf_type - interface type (port or portchannel)
interface - interface number
"""
if intf_type == 'port':
ifname = 'Ethernet' + str(interface)
elif intf_type == 'portchannel':
ifname = 'po' + str(interface)
else:
raise Exception("Unknown interface type: " + intf_type)
return ifname
############# Public Methods ############################
def delete_vlan(self, host, vlan_id):
"""Delete a VLAN on CNOS Switch given the VLAN ID."""
dbg_str = self._dbg_str(host, "delete", vlan_id)
LOG.debug(dbg_str)
conn = self._connect(host)
obj = self.VLAN_REST_OBJ + str(vlan_id)
resp = conn.delete(obj)
conn.close()
def enable_vlan_on_trunk_int(self, host, vlan_id, intf_type, interface):
"""Enable a VLAN on a trunk interface."""
dbg_str = self._dbg_str(host, "enable", vlan_id,
interface=interface, intf_type=intf_type)
LOG.debug(dbg_str)
conn = self._connect(host)
try:
if_name = self._get_ifname(intf_type, interface)
self._add_intf_to_vlan(conn, vlan_id, if_name)
except Exception as e:
raise cexc.NOSConfigFailed(config=dbg_str, exc=e)
conn.close()
def disable_vlan_on_tr | return list(range(1, 4095)) | conditional_block |
cnos_network_driver_rest.py | KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements CNOS config over REST API Client
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from networking_lenovo.ml2 import config as conf
from networking_lenovo.ml2 import constants as const
from networking_lenovo.ml2 import exceptions as cexc
from networking_lenovo.ml2 import nos_db_v2
from requests.utils import quote
import requests
from rest_client import LenovoRestClient
LOG = logging.getLogger(__name__)
class LenovoCNOSDriverREST(object):
"""CNOS Driver Main Class."""
VLAN_REST_OBJ = "nos/api/cfg/vlan/"
VLAN_IFACE_REST_OBJ = "nos/api/cfg/vlan_interface/"
REST_TCP_PORT_STR = "rest_tcp_port"
REST_DEFAULT_PORT = 8090
REST_DEFAULT_PORT_HTTPS = 443
REST_USE_HTTPS_STR = "use_ssl"
def __init__(self):
self.switches = conf.ML2MechLenovoConfig.nos_dict
############ Private Methods ############################
def _dbg_str(self, host, op, vlan_id,
vlan_name=None, interface=None, intf_type=None):
"""
Construct a string displayed in debug messages or exceptions
for the main operations
"""
dbg_fmt = "host %s %s vlan %d"
args_lst = [host, op, vlan_id]
if vlan_name:
dbg_fmt += " (%s)"
args_lst.append(vlan_name)
if interface and intf_type:
dbg_fmt += " on interface %s(type %s)"
args_lst.extend([interface, intf_type])
dbg_str = dbg_fmt % tuple(args_lst)
return dbg_str
def _connect(self, host):
""" Connect to the switch """
user = self.switches[host, const.USERNAME]
password = self.switches[host, const.PASSWORD]
https_str = self.switches.get((host, self.REST_USE_HTTPS_STR),
"true").lower()
use_https = True
if https_str != "true":
use_https = False
default_port = self.REST_DEFAULT_PORT
if use_https:
default_port = self.REST_DEFAULT_PORT_HTTPS
tcp_port = self.switches.get((host, self.REST_TCP_PORT_STR),
default_port)
conn = LenovoRestClient(host, user, password, tcp_port, use_https)
try:
conn.login()
except Exception as e:
raise cexc.NOSConnectFailed(nos_host=host, exc=e)
return conn
def _check_process_resp(self, resp, expected_fields=None):
"""
Check that a HTTP response was OK and in valid JSON format
If it was, check that the expected fields are present in JSON response
Otherwise it raises a NOSRestHTTPError exception
Returns:
the JSON response
"""
if resp.status_code != LenovoRestClient.RESP_CODE_OK:
raise cexc.NOSRestHTTPError(http_code=resp.status_code,
http_reason=resp.reason, http_op=resp.request.method,
url=resp.url, http_response=resp.text)
rj = resp.json()
if not expected_fields:
return rj
for field in expected_fields:
try:
val = rj[field]
except KeyError:
raise cexc.NOSJsonFieldNotFound(field=field, url=resp.url, json=rj)
return rj
def _create_vlan(self, conn, vlan_id, vlan_name):
"""
Internal method to create the vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
vlan_name - vlan name
"""
req_js = {}
req_js['vlan_id'] = vlan_id
req_js['vlan_name'] = vlan_name
req_js['admin_state'] = 'up'
resp = conn.post(self.VLAN_REST_OBJ, req_js)
self._check_process_resp(resp)
def _conf_intf(self, conn, interface, mode, pvid, vlan_list):
"""
Internal method to configure bridgeport for an interface
Parameters:
conn - connection handler
interface - interface identifier (name)
mode - 'access'(untagged) or 'trunk'(tagged)
pvid - interface default vlan id
vlan_list - list of vlans the interface belongs to
"""
if not vlan_list:
raise Exception('The interface should be in at least one vlan')
if (mode == 'access') and (len(vlan_list) > 1):
raise Exception('An access port cannot be in multiple vlans')
if pvid not in vlan_list:
raise Exception('The pvid should be in the list of vlans')
req_js = {}
req_js['if_name'] = interface
req_js['bridgeport_mode'] = mode
req_js['pvid'] = pvid
req_js['vlans'] = vlan_list
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.put(obj, req_js)
return resp
def _get_vlist(self, vlist):
"""
Internal method to obtain a vlan list from the JSON answer
from the switch
Parameters:
vlist - the list of vlans the port
or the special strings all or none
"""
if vlist == "all":
return list(range(1, 4095))
elif vlist == "none":
return []
elif type(vlist) is not list:
raise Exception("Unexpected vlan list: " + str(vlist))
else:
return vlist
def _add_intf_to_vlan(self, conn, vlan_id, interface):
| pvid = intf_info['pvid']
mode = 'trunk'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _rem_intf_from_vlan(self, conn, vlan_id, interface):
"""
Internal method to remove an interface from a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id not in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.remove(vlan_id)
pvid = intf_info['pvid']
if not new_vlist:
raise Exception('Port ' + str(interface) + ' was only in vlan ' + str(vlan_id))
if pvid == vlan_id:
pvid = new_vlist[0]
if len(new_vlist) > 1:
mode = 'trunk'
else:
mode = 'access'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _get_ifname(self, intf_type, interface):
"""
Internal method to obtain the interface name based on its type and number
Parameters:
intf_type - interface type (port or portchannel)
interface - interface number
"""
if intf_type == 'port':
ifname = 'Ethernet' + str(interface)
elif intf_type == 'portchannel':
ifname = 'po' + str(interface)
else:
raise Exception("Unknown interface type: " + intf_type)
return ifname
############# Public Methods ############################
def delete_vlan(self, host, vlan_id):
"""Delete a VLAN on CNOS Switch given the VLAN ID."""
dbg_str = self._dbg_str(host, "delete", vlan_id)
LOG.debug(dbg_str)
conn = self._connect(host)
obj = self.VLAN_REST_OBJ + str(vlan_id)
resp = conn.delete(obj)
conn.close()
def enable_vlan_on_trunk_int(self, host, vlan_id, intf_type, interface):
"""Enable a VLAN on a trunk interface."""
dbg_str = self._dbg_str(host, "enable", vlan_id,
interface=interface, intf_type=intf_type)
LOG.debug(dbg_str)
conn = self._connect(host)
try:
if_name = self._get_ifname(intf_type, interface)
self._add_intf_to_vlan(conn, vlan_id, if_name)
except Exception as e:
raise cexc.NOSConfigFailed(config=dbg_str, exc=e)
conn.close()
def disable_vlan_on_trunk | """
Internal method to add an interface to a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.append(vlan_id)
| identifier_body |
cnos_network_driver_rest.py | KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements CNOS config over REST API Client
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from networking_lenovo.ml2 import config as conf
from networking_lenovo.ml2 import constants as const
from networking_lenovo.ml2 import exceptions as cexc
from networking_lenovo.ml2 import nos_db_v2
from requests.utils import quote
import requests
from rest_client import LenovoRestClient
LOG = logging.getLogger(__name__)
class LenovoCNOSDriverREST(object):
"""CNOS Driver Main Class."""
VLAN_REST_OBJ = "nos/api/cfg/vlan/"
VLAN_IFACE_REST_OBJ = "nos/api/cfg/vlan_interface/"
REST_TCP_PORT_STR = "rest_tcp_port"
REST_DEFAULT_PORT = 8090
REST_DEFAULT_PORT_HTTPS = 443
REST_USE_HTTPS_STR = "use_ssl"
def __init__(self):
self.switches = conf.ML2MechLenovoConfig.nos_dict
############ Private Methods ############################
def _dbg_str(self, host, op, vlan_id,
vlan_name=None, interface=None, intf_type=None):
"""
Construct a string displayed in debug messages or exceptions
for the main operations
"""
dbg_fmt = "host %s %s vlan %d"
args_lst = [host, op, vlan_id]
if vlan_name:
dbg_fmt += " (%s)"
args_lst.append(vlan_name)
if interface and intf_type:
dbg_fmt += " on interface %s(type %s)"
args_lst.extend([interface, intf_type])
dbg_str = dbg_fmt % tuple(args_lst)
return dbg_str
def _connect(self, host):
""" Connect to the switch """
user = self.switches[host, const.USERNAME]
password = self.switches[host, const.PASSWORD]
https_str = self.switches.get((host, self.REST_USE_HTTPS_STR),
"true").lower()
use_https = True
if https_str != "true":
use_https = False
default_port = self.REST_DEFAULT_PORT
if use_https:
default_port = self.REST_DEFAULT_PORT_HTTPS
tcp_port = self.switches.get((host, self.REST_TCP_PORT_STR),
default_port)
conn = LenovoRestClient(host, user, password, tcp_port, use_https)
try:
conn.login()
except Exception as e:
raise cexc.NOSConnectFailed(nos_host=host, exc=e)
return conn
def _check_process_resp(self, resp, expected_fields=None):
"""
Check that a HTTP response was OK and in valid JSON format
If it was, check that the expected fields are present in JSON response
Otherwise it raises a NOSRestHTTPError exception
Returns:
the JSON response
"""
if resp.status_code != LenovoRestClient.RESP_CODE_OK:
raise cexc.NOSRestHTTPError(http_code=resp.status_code,
http_reason=resp.reason, http_op=resp.request.method,
url=resp.url, http_response=resp.text)
rj = resp.json()
if not expected_fields:
return rj
for field in expected_fields:
try:
val = rj[field]
except KeyError:
raise cexc.NOSJsonFieldNotFound(field=field, url=resp.url, json=rj)
return rj
def | (self, conn, vlan_id, vlan_name):
"""
Internal method to create the vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
vlan_name - vlan name
"""
req_js = {}
req_js['vlan_id'] = vlan_id
req_js['vlan_name'] = vlan_name
req_js['admin_state'] = 'up'
resp = conn.post(self.VLAN_REST_OBJ, req_js)
self._check_process_resp(resp)
def _conf_intf(self, conn, interface, mode, pvid, vlan_list):
"""
Internal method to configure bridgeport for an interface
Parameters:
conn - connection handler
interface - interface identifier (name)
mode - 'access'(untagged) or 'trunk'(tagged)
pvid - interface default vlan id
vlan_list - list of vlans the interface belongs to
"""
if not vlan_list:
raise Exception('The interface should be in at least one vlan')
if (mode == 'access') and (len(vlan_list) > 1):
raise Exception('An access port cannot be in multiple vlans')
if pvid not in vlan_list:
raise Exception('The pvid should be in the list of vlans')
req_js = {}
req_js['if_name'] = interface
req_js['bridgeport_mode'] = mode
req_js['pvid'] = pvid
req_js['vlans'] = vlan_list
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.put(obj, req_js)
return resp
def _get_vlist(self, vlist):
"""
Internal method to obtain a vlan list from the JSON answer
from the switch
Parameters:
vlist - the list of vlans the port
or the special strings all or none
"""
if vlist == "all":
return list(range(1, 4095))
elif vlist == "none":
return []
elif type(vlist) is not list:
raise Exception("Unexpected vlan list: " + str(vlist))
else:
return vlist
def _add_intf_to_vlan(self, conn, vlan_id, interface):
"""
Internal method to add an interface to a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.append(vlan_id)
pvid = intf_info['pvid']
mode = 'trunk'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _rem_intf_from_vlan(self, conn, vlan_id, interface):
"""
Internal method to remove an interface from a vlan
Parameters:
conn - connection handler
vlan_id - vlan identifier
interface - interface identifier (name)
"""
obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')
resp = conn.get(obj)
intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])
crt_vlist = self._get_vlist(intf_info['vlans'])
if vlan_id not in crt_vlist:
return
new_vlist = crt_vlist[ : ]
new_vlist.remove(vlan_id)
pvid = intf_info['pvid']
if not new_vlist:
raise Exception('Port ' + str(interface) + ' was only in vlan ' + str(vlan_id))
if pvid == vlan_id:
pvid = new_vlist[0]
if len(new_vlist) > 1:
mode = 'trunk'
else:
mode = 'access'
resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)
self._check_process_resp(resp)
def _get_ifname(self, intf_type, interface):
"""
Internal method to obtain the interface name based on its type and number
Parameters:
intf_type - interface type (port or portchannel)
interface - interface number
"""
if intf_type == 'port':
ifname = 'Ethernet' + str(interface)
elif intf_type == 'portchannel':
ifname = 'po' + str(interface)
else:
raise Exception("Unknown interface type: " + intf_type)
return ifname
############# Public Methods ############################
def delete_vlan(self, host, vlan_id):
"""Delete a VLAN on CNOS Switch given the VLAN ID."""
dbg_str = self._dbg_str(host, "delete", vlan_id)
LOG.debug(dbg_str)
conn = self._connect(host)
obj = self.VLAN_REST_OBJ + str(vlan_id)
resp = conn.delete(obj)
conn.close()
def enable_vlan_on_trunk_int(self, host, vlan_id, intf_type, interface):
"""Enable a VLAN on a trunk interface."""
dbg_str = self._dbg_str(host, "enable", vlan_id,
interface=interface, intf_type=intf_type)
LOG.debug(dbg_str)
conn = self._connect(host)
try:
if_name = self._get_ifname(intf_type, interface)
self._add_intf_to_vlan(conn, vlan_id, if_name)
except Exception as e:
raise cexc.NOSConfigFailed(config=dbg_str, exc=e)
conn.close()
def disable_vlan_on_trunk | _create_vlan | identifier_name |
DiffUtils.py | USA.
#
##############################################################################
"""
Provide a feature not present into difflib, which is generate a colored diff
from a diff file/string.
This code is original form ERP5VCS and was moved to here for be used in
general ERP5.
XXX The organisation of DiffUtils should be reviewed and reorganised in a tool
if a general tool want to be provided.
"""
import os, re
from xml.sax.saxutils import escape
NBSP = ' '
NBSP_TAB = NBSP*8
NO_DIFF_COLOR = 'white'
MODIFIED_DIFF_COLOR = 'rgb(253, 228, 6);'#light orange
DELETED_DIFF_COLOR = 'rgb(253, 117, 74);'#light red
ADDITION_DIFF_COLOR = 'rgb(83, 253, 74);'#light green
class DiffFile(object):
"""
# Members :
- path : path of the modified file
- children : sub codes modified
- old_revision
- new_revision
"""
def __init__(self, raw_diff):
self.children = []
self.binary = raw_diff and '@@' not in raw_diff
if self.binary or not raw_diff:
return
self.header = raw_diff.split('@@')[0][:-1]
# Getting file path in header
self.path = self.header.split('====')[0][:-1].strip()
# Getting revisions in header
for line in self.header.splitlines():
if line.startswith('--- '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.old_revision = line.replace("--- ", "")
if line.startswith('+++ '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.new_revision = line.replace("+++ ", "")
# Splitting the body from the header
self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])
if not self.body.startswith('@@'):
self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])
# Now splitting modifications
first = True
tmp = []
for line in self.body.splitlines():
if line:
if line.startswith('@@') and not first:
self.children.append(CodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
else:
first = False
tmp.append(line)
self.children.append(CodeBlock(os.linesep.join(tmp)))
def __nonzero__(self):
return self.binary or bool(self.children)
def __len__(self):
return len(self.children)
toHTML__roles__ = None # public
def toHTML(self):
""" return HTML diff
"""
# Adding header of the table
if self.binary:
return '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'
if not self:
return ''
html_list = []
html_list.append('''
<table style="text-align: left; width: 100%%; border: 0;" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
</tr>''' % (self.old_revision, self.new_revision))
header_color = 'grey'
child_html_text = '''<tr><td style="background-color: %(headcolor)s">
</td><td style="background-color: black; width: 2px;"></td>
<td style="background-color: %(headcolor)s"> </td></tr><tr>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(oldline)s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(newline)s</td>
</tr>'''
for child in self.children:
# Adding line number of the modification
html_list.append( child_html_text % {'headcolor':header_color, 'oldline':child.old_line, 'newline':child.new_line} )
header_color = 'white'
# Adding diff of the modification
old_code_list = child.getOldCodeList()
new_code_list = child.getNewCodeList()
i = 0
for old_line_tuple in old_code_list:
new_line_tuple = new_code_list[i]
new_line = new_line_tuple[0] or ' '
old_line = old_line_tuple[0] or ' '
i += 1
html_list.append( '''<tr style="font-family: monospace">
<td style="background-color: %s">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: %s">%s</td>
</tr>'''%(old_line_tuple[1],
escape(old_line).replace(' ', NBSP).replace('\t', NBSP_TAB),
new_line_tuple[1],
escape(new_line).replace(' ', NBSP).replace('\t', NBSP_TAB))
)
html_list.append('''</tbody></table><br/>''')
return '\n'.join(html_list)
def getModifiedBlockList(self):
"""
Return a list of modified blocks
List contains tuples (block object : (old_modified_code, new_modified_code))
"""
if self.binary:
return []
block_list = []
for child in self.children:
old_line_list = [line.strip() for line, color in child.getOldCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
DELETED_DIFF_COLOR)]
new_line_list = [line.strip() for line, color in child.getNewCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
ADDITION_DIFF_COLOR)]
if old_line_list or new_line_list:
block_list.append((child,(old_line_list, new_line_list)))
return block_list
class CodeBlock:
"""
A code block contains several SubCodeBlocks
Members :
- old_line : line in old code (before modif)
- new line : line in new code (after modif)
Methods :
- getOldCodeList() : return code before modif
- getNewCodeList() : return code after modif
Note: the code returned is a list of tuples (code line, background color)
"""
def __init__(self, raw_diff):
# Splitting body and header
self.body = os.linesep.join(raw_diff.splitlines()[1:])
self.header = raw_diff.splitlines()[0]
# Getting modifications lines
tmp = re.search('^@@ -\d+', self.header)
self.old_line = tmp.string[tmp.start():tmp.end()][4:]
tmp = re.search('\+\d+', self.header)
self.new_line = tmp.string[tmp.start():tmp.end()][1:]
# Splitting modifications in SubCodeBlocks
in_modif = False
self.children = []
tmp = []
for line in self.body.splitlines():
if line:
if (line.startswith('+') or line.startswith('-')):
if in_modif:
tmp.append(line)
else:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = True
else:
if in_modif:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = False
else:
tmp.append(line)
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
def getOldCodeList(self):
""" Return code before modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getOldCodeList())
return tmp
def getNewCodeList(self):
|
class SubCodeBlock:
""" a SubCodeBlock contain 0 or 1 modification (not more)
"""
def __init__(self, code):
self.body = code
self.modification = self._getModif()
self.old_code_length = self._getOldCodeLength()
self.new_code_length = self._getNewCodeLength()
# Choosing background color
if self.modification == 'none':
self.color = NO_DIFF_COLOR
elif self.modification == 'change':
self.color = MODIFIED_DIFF_COLOR
elif self.modification == 'deletion':
self.color = DELETED_DIFF_COLOR
else: # addition
self.color = ADDITION_DIFF_COLOR
def _getModif(self):
""" Return type of modification :
addition, deletion, none
| """ Return code after modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getNewCodeList())
return tmp | identifier_body |
DiffUtils.py | # along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
"""
Provide a feature not present into difflib, which is generate a colored diff
from a diff file/string.
This code is original form ERP5VCS and was moved to here for be used in
general ERP5.
XXX The organisation of DiffUtils should be reviewed and reorganised in a tool
if a general tool want to be provided.
"""
import os, re
from xml.sax.saxutils import escape
NBSP = ' '
NBSP_TAB = NBSP*8
NO_DIFF_COLOR = 'white'
MODIFIED_DIFF_COLOR = 'rgb(253, 228, 6);'#light orange
DELETED_DIFF_COLOR = 'rgb(253, 117, 74);'#light red
ADDITION_DIFF_COLOR = 'rgb(83, 253, 74);'#light green
class DiffFile(object):
"""
# Members :
- path : path of the modified file
- children : sub codes modified
- old_revision
- new_revision
"""
def __init__(self, raw_diff):
self.children = []
self.binary = raw_diff and '@@' not in raw_diff
if self.binary or not raw_diff:
return
self.header = raw_diff.split('@@')[0][:-1]
# Getting file path in header
self.path = self.header.split('====')[0][:-1].strip()
# Getting revisions in header
for line in self.header.splitlines():
if line.startswith('--- '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.old_revision = line.replace("--- ", "")
if line.startswith('+++ '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.new_revision = line.replace("+++ ", "")
# Splitting the body from the header
self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])
if not self.body.startswith('@@'):
self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])
# Now splitting modifications
first = True
tmp = []
for line in self.body.splitlines():
if line:
if line.startswith('@@') and not first:
self.children.append(CodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
else:
first = False
tmp.append(line)
self.children.append(CodeBlock(os.linesep.join(tmp)))
def __nonzero__(self):
return self.binary or bool(self.children)
def __len__(self):
return len(self.children)
toHTML__roles__ = None # public
def toHTML(self):
""" return HTML diff
"""
# Adding header of the table
if self.binary:
return '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'
if not self:
return ''
html_list = []
html_list.append('''
<table style="text-align: left; width: 100%%; border: 0;" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
</tr>''' % (self.old_revision, self.new_revision))
header_color = 'grey'
child_html_text = '''<tr><td style="background-color: %(headcolor)s">
</td><td style="background-color: black; width: 2px;"></td>
<td style="background-color: %(headcolor)s"> </td></tr><tr>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(oldline)s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(newline)s</td>
</tr>'''
for child in self.children:
# Adding line number of the modification
html_list.append( child_html_text % {'headcolor':header_color, 'oldline':child.old_line, 'newline':child.new_line} )
header_color = 'white'
# Adding diff of the modification
old_code_list = child.getOldCodeList()
new_code_list = child.getNewCodeList()
i = 0
for old_line_tuple in old_code_list:
new_line_tuple = new_code_list[i]
new_line = new_line_tuple[0] or ' '
old_line = old_line_tuple[0] or ' '
i += 1
html_list.append( '''<tr style="font-family: monospace">
<td style="background-color: %s">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: %s">%s</td>
</tr>'''%(old_line_tuple[1],
escape(old_line).replace(' ', NBSP).replace('\t', NBSP_TAB),
new_line_tuple[1],
escape(new_line).replace(' ', NBSP).replace('\t', NBSP_TAB))
)
html_list.append('''</tbody></table><br/>''')
return '\n'.join(html_list)
def getModifiedBlockList(self):
"""
Return a list of modified blocks
List contains tuples (block object : (old_modified_code, new_modified_code))
"""
if self.binary:
return []
block_list = []
for child in self.children:
old_line_list = [line.strip() for line, color in child.getOldCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
DELETED_DIFF_COLOR)]
new_line_list = [line.strip() for line, color in child.getNewCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
ADDITION_DIFF_COLOR)]
if old_line_list or new_line_list:
block_list.append((child,(old_line_list, new_line_list)))
return block_list
class CodeBlock:
"""
A code block contains several SubCodeBlocks
Members :
- old_line : line in old code (before modif)
- new line : line in new code (after modif)
Methods :
- getOldCodeList() : return code before modif
- getNewCodeList() : return code after modif
Note: the code returned is a list of tuples (code line, background color)
"""
def __init__(self, raw_diff):
# Splitting body and header
self.body = os.linesep.join(raw_diff.splitlines()[1:])
self.header = raw_diff.splitlines()[0]
# Getting modifications lines
tmp = re.search('^@@ -\d+', self.header)
self.old_line = tmp.string[tmp.start():tmp.end()][4:]
tmp = re.search('\+\d+', self.header)
self.new_line = tmp.string[tmp.start():tmp.end()][1:]
# Splitting modifications in SubCodeBlocks
in_modif = False
self.children = []
tmp = []
for line in self.body.splitlines():
if line:
if (line.startswith('+') or line.startswith('-')):
if in_modif:
tmp.append(line)
else:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = True
else:
if in_modif:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = False
else:
tmp.append(line)
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
def getOldCodeList(self):
""" Return code before modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getOldCodeList())
return tmp
def getNewCodeList(self):
""" Return code after modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getNewCodeList())
return tmp
class SubCodeBlock:
""" a SubCodeBlock contain 0 or 1 modification (not more)
"""
def __init__(self, code):
self.body = code
self.modification = self._getModif()
self.old_code_length = self._getOldCodeLength()
self.new_code_length = self._getNewCodeLength()
# Choosing background color
if self.modification == 'none':
self.color = NO_DIFF_COLOR
elif self.modification == 'change':
self.color = | #
# You should have received a copy of the GNU General Public License | random_line_split | |
DiffUtils.py | USA.
#
##############################################################################
"""
Provide a feature not present into difflib, which is generate a colored diff
from a diff file/string.
This code is original form ERP5VCS and was moved to here for be used in
general ERP5.
XXX The organisation of DiffUtils should be reviewed and reorganised in a tool
if a general tool want to be provided.
"""
import os, re
from xml.sax.saxutils import escape
NBSP = ' '
NBSP_TAB = NBSP*8
NO_DIFF_COLOR = 'white'
MODIFIED_DIFF_COLOR = 'rgb(253, 228, 6);'#light orange
DELETED_DIFF_COLOR = 'rgb(253, 117, 74);'#light red
ADDITION_DIFF_COLOR = 'rgb(83, 253, 74);'#light green
class DiffFile(object):
"""
# Members :
- path : path of the modified file
- children : sub codes modified
- old_revision
- new_revision
"""
def __init__(self, raw_diff):
self.children = []
self.binary = raw_diff and '@@' not in raw_diff
if self.binary or not raw_diff:
return
self.header = raw_diff.split('@@')[0][:-1]
# Getting file path in header
self.path = self.header.split('====')[0][:-1].strip()
# Getting revisions in header
for line in self.header.splitlines():
if line.startswith('--- '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.old_revision = line.replace("--- ", "")
if line.startswith('+++ '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.new_revision = line.replace("+++ ", "")
# Splitting the body from the header
self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])
if not self.body.startswith('@@'):
self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])
# Now splitting modifications
first = True
tmp = []
for line in self.body.splitlines():
if line:
if line.startswith('@@') and not first:
self.children.append(CodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
else:
first = False
tmp.append(line)
self.children.append(CodeBlock(os.linesep.join(tmp)))
def __nonzero__(self):
return self.binary or bool(self.children)
def __len__(self):
return len(self.children)
toHTML__roles__ = None # public
def toHTML(self):
""" return HTML diff
"""
# Adding header of the table
if self.binary:
return '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'
if not self:
return ''
html_list = []
html_list.append('''
<table style="text-align: left; width: 100%%; border: 0;" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
</tr>''' % (self.old_revision, self.new_revision))
header_color = 'grey'
child_html_text = '''<tr><td style="background-color: %(headcolor)s">
</td><td style="background-color: black; width: 2px;"></td>
<td style="background-color: %(headcolor)s"> </td></tr><tr>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(oldline)s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(newline)s</td>
</tr>'''
for child in self.children:
# Adding line number of the modification
html_list.append( child_html_text % {'headcolor':header_color, 'oldline':child.old_line, 'newline':child.new_line} )
header_color = 'white'
# Adding diff of the modification
old_code_list = child.getOldCodeList()
new_code_list = child.getNewCodeList()
i = 0
for old_line_tuple in old_code_list:
new_line_tuple = new_code_list[i]
new_line = new_line_tuple[0] or ' '
old_line = old_line_tuple[0] or ' '
i += 1
html_list.append( '''<tr style="font-family: monospace">
<td style="background-color: %s">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: %s">%s</td>
</tr>'''%(old_line_tuple[1],
escape(old_line).replace(' ', NBSP).replace('\t', NBSP_TAB),
new_line_tuple[1],
escape(new_line).replace(' ', NBSP).replace('\t', NBSP_TAB))
)
html_list.append('''</tbody></table><br/>''')
return '\n'.join(html_list)
def getModifiedBlockList(self):
"""
Return a list of modified blocks
List contains tuples (block object : (old_modified_code, new_modified_code))
"""
if self.binary:
return []
block_list = []
for child in self.children:
old_line_list = [line.strip() for line, color in child.getOldCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
DELETED_DIFF_COLOR)]
new_line_list = [line.strip() for line, color in child.getNewCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
ADDITION_DIFF_COLOR)]
if old_line_list or new_line_list:
block_list.append((child,(old_line_list, new_line_list)))
return block_list
class CodeBlock:
"""
A code block contains several SubCodeBlocks
Members :
- old_line : line in old code (before modif)
- new line : line in new code (after modif)
Methods :
- getOldCodeList() : return code before modif
- getNewCodeList() : return code after modif
Note: the code returned is a list of tuples (code line, background color)
"""
def __init__(self, raw_diff):
# Splitting body and header
self.body = os.linesep.join(raw_diff.splitlines()[1:])
self.header = raw_diff.splitlines()[0]
# Getting modifications lines
tmp = re.search('^@@ -\d+', self.header)
self.old_line = tmp.string[tmp.start():tmp.end()][4:]
tmp = re.search('\+\d+', self.header)
self.new_line = tmp.string[tmp.start():tmp.end()][1:]
# Splitting modifications in SubCodeBlocks
in_modif = False
self.children = []
tmp = []
for line in self.body.splitlines():
if line:
if (line.startswith('+') or line.startswith('-')):
if in_modif:
tmp.append(line)
else:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = True
else:
if in_modif:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = False
else:
tmp.append(line)
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
def getOldCodeList(self):
""" Return code before modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getOldCodeList())
return tmp
def getNewCodeList(self):
""" Return code after modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getNewCodeList())
return tmp
class SubCodeBlock:
""" a SubCodeBlock contain 0 or 1 modification (not more)
"""
def __init__(self, code):
self.body = code
self.modification = self._getModif()
self.old_code_length = self._getOldCodeLength()
self.new_code_length = self._getNewCodeLength()
# Choosing background color
if self.modification == 'none':
self.color = NO_DIFF_COLOR
elif self.modification == 'change':
self.color = MODIFIED_DIFF_COLOR
elif self.modification == 'deletion':
self.color = DELETED_DIFF_COLOR
else: # addition
self.color = ADDITION_DIFF_COLOR
def | (self):
""" Return type of modification :
addition, deletion, none
| _getModif | identifier_name |
DiffUtils.py | _TAB = NBSP*8
NO_DIFF_COLOR = 'white'
MODIFIED_DIFF_COLOR = 'rgb(253, 228, 6);'#light orange
DELETED_DIFF_COLOR = 'rgb(253, 117, 74);'#light red
ADDITION_DIFF_COLOR = 'rgb(83, 253, 74);'#light green
class DiffFile(object):
"""
# Members :
- path : path of the modified file
- children : sub codes modified
- old_revision
- new_revision
"""
def __init__(self, raw_diff):
self.children = []
self.binary = raw_diff and '@@' not in raw_diff
if self.binary or not raw_diff:
return
self.header = raw_diff.split('@@')[0][:-1]
# Getting file path in header
self.path = self.header.split('====')[0][:-1].strip()
# Getting revisions in header
for line in self.header.splitlines():
if line.startswith('--- '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.old_revision = line.replace("--- ", "")
if line.startswith('+++ '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.new_revision = line.replace("+++ ", "")
# Splitting the body from the header
self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])
if not self.body.startswith('@@'):
self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])
# Now splitting modifications
first = True
tmp = []
for line in self.body.splitlines():
if line:
if line.startswith('@@') and not first:
self.children.append(CodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
else:
first = False
tmp.append(line)
self.children.append(CodeBlock(os.linesep.join(tmp)))
def __nonzero__(self):
return self.binary or bool(self.children)
def __len__(self):
return len(self.children)
toHTML__roles__ = None # public
def toHTML(self):
""" return HTML diff
"""
# Adding header of the table
if self.binary:
return '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'
if not self:
return ''
html_list = []
html_list.append('''
<table style="text-align: left; width: 100%%; border: 0;" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
</tr>''' % (self.old_revision, self.new_revision))
header_color = 'grey'
child_html_text = '''<tr><td style="background-color: %(headcolor)s">
</td><td style="background-color: black; width: 2px;"></td>
<td style="background-color: %(headcolor)s"> </td></tr><tr>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(oldline)s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(newline)s</td>
</tr>'''
for child in self.children:
# Adding line number of the modification
html_list.append( child_html_text % {'headcolor':header_color, 'oldline':child.old_line, 'newline':child.new_line} )
header_color = 'white'
# Adding diff of the modification
old_code_list = child.getOldCodeList()
new_code_list = child.getNewCodeList()
i = 0
for old_line_tuple in old_code_list:
new_line_tuple = new_code_list[i]
new_line = new_line_tuple[0] or ' '
old_line = old_line_tuple[0] or ' '
i += 1
html_list.append( '''<tr style="font-family: monospace">
<td style="background-color: %s">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: %s">%s</td>
</tr>'''%(old_line_tuple[1],
escape(old_line).replace(' ', NBSP).replace('\t', NBSP_TAB),
new_line_tuple[1],
escape(new_line).replace(' ', NBSP).replace('\t', NBSP_TAB))
)
html_list.append('''</tbody></table><br/>''')
return '\n'.join(html_list)
def getModifiedBlockList(self):
"""
Return a list of modified blocks
List contains tuples (block object : (old_modified_code, new_modified_code))
"""
if self.binary:
return []
block_list = []
for child in self.children:
old_line_list = [line.strip() for line, color in child.getOldCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
DELETED_DIFF_COLOR)]
new_line_list = [line.strip() for line, color in child.getNewCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
ADDITION_DIFF_COLOR)]
if old_line_list or new_line_list:
block_list.append((child,(old_line_list, new_line_list)))
return block_list
class CodeBlock:
"""
A code block contains several SubCodeBlocks
Members :
- old_line : line in old code (before modif)
- new line : line in new code (after modif)
Methods :
- getOldCodeList() : return code before modif
- getNewCodeList() : return code after modif
Note: the code returned is a list of tuples (code line, background color)
"""
def __init__(self, raw_diff):
# Splitting body and header
self.body = os.linesep.join(raw_diff.splitlines()[1:])
self.header = raw_diff.splitlines()[0]
# Getting modifications lines
tmp = re.search('^@@ -\d+', self.header)
self.old_line = tmp.string[tmp.start():tmp.end()][4:]
tmp = re.search('\+\d+', self.header)
self.new_line = tmp.string[tmp.start():tmp.end()][1:]
# Splitting modifications in SubCodeBlocks
in_modif = False
self.children = []
tmp = []
for line in self.body.splitlines():
if line:
if (line.startswith('+') or line.startswith('-')):
if in_modif:
tmp.append(line)
else:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = True
else:
if in_modif:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = False
else:
tmp.append(line)
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
def getOldCodeList(self):
""" Return code before modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getOldCodeList())
return tmp
def getNewCodeList(self):
""" Return code after modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getNewCodeList())
return tmp
class SubCodeBlock:
""" a SubCodeBlock contain 0 or 1 modification (not more)
"""
def __init__(self, code):
self.body = code
self.modification = self._getModif()
self.old_code_length = self._getOldCodeLength()
self.new_code_length = self._getNewCodeLength()
# Choosing background color
if self.modification == 'none':
self.color = NO_DIFF_COLOR
elif self.modification == 'change':
self.color = MODIFIED_DIFF_COLOR
elif self.modification == 'deletion':
self.color = DELETED_DIFF_COLOR
else: # addition
self.color = ADDITION_DIFF_COLOR
def _getModif(self):
""" Return type of modification :
addition, deletion, none
"""
nb_plus = 0
nb_minus = 0
for line in self.body.splitlines():
if line.startswith("-"):
nb_minus -= 1
elif line.startswith("+"):
nb_plus += 1
if (nb_plus == 0 and nb_minus == 0):
return 'none'
if (nb_minus == 0):
return 'addition'
if (nb_plus == 0):
| return 'deletion' | conditional_block | |
memorycache.js | // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
var utility = require('./utility.js');
//
// Simple in memory caching
//
// A simple, synchronous in memory cache is used to implement "working memory"
// of a server. This is intended to be a small, per node.js instance for
// caching frequently accessed data.
//
// It implements a struct LRU based on the ordering of Javascript object map
// property inserts, and Object.keys() indexes.
//
//
// Usage:
//
// var memoryCache = require('./memorycache.js');
//
// var maxEntries = 100;
// var trimCount = 10;
//
// var cache = new memoryCache.MemoryCache(maxEntries, trimCount);
//
// cache.set(key, val);
//
// var val = cache.get(key);
//
//
// maxEntries - Maximum entries for cache.
//
// A value of 0 means no limit.
//
// Remember, a cache without reasonable bounds is a memory leak.
//
// Since this is a strict LRU cache, a maxEntries value too small in
// relation to total unique requests may constantly push valid entries out
// of the cache.
//
// trimCount - Number of entries to trim when maxEntries is reached.
//
// A reasonable fraction of maxEntries ensures CPU time
// is not spent trimming the cache for each entry added.
//
// maxEntries/10, maxEntries/4, etc. is recommended.
//
// If trimCount == 0, no trimming occurs and set will fail when
// at capacity. trim(count) may be called manually in this case.
//
// This is useful for support storage models in which deletion
// of older entries is not desired, such as an in memory database
// with a fixed capacity.
//
function MemoryCache(maxEntries, trimCount) |
//
// Set a value into the cache.
//
// If the cache is at capacity, timeCount entries are
// removed.
//
// A reference to the object is stored by the key, the object
// is not copied.
//
// val - object reference
//
// Returns:
// true - entry was entered
// false - cache is full, and trimCount was specified as 0 at construction
//
MemoryCache.prototype.set = function(key, val) {
if ((this.maxEntries != 0) && (this.entryCount >= this.maxEntries)) {
if (this.trimCount == 0) return false;
this.trim(this.trimCount);
}
// map is indexed by string value
this.cacheMap[key] = val;
this.entryCount++;
return true;
}
//
// key - string key value to use
//
// Returns "undefined" if no entry.
//
MemoryCache.prototype.get = function(key) {
var val = this.cacheMap[key];
return val;
}
//
// key - string key value to use
//
// No return value
//
MemoryCache.prototype.remove = function(key) {
delete this.cacheMap[key];
}
//
// Return entryCount
//
MemoryCache.prototype.getEntryCount = function() {
return this.entryCount;
}
//
// A trimCount keeps from having to process the trim operation
// for each individual overflow.
//
MemoryCache.prototype.trim = function(trimCount) {
var thisTrim = 0;
var key;
var keys = Object.keys(this.cacheMap);
var thisTrim = keys.length;
if (thisTrim > trimCount) {
thisTrim = trimCount;
}
// Delete the first keys since they are the oldest
for (var index = 0; index < thisTrim; index++) {
key = keys[index];
delete this.cacheMap[key];
}
this.entryCount = this.entryCount - thisTrim;
}
//
// Return the array of keys
//
MemoryCache.prototype.getKeys = function() {
return Object.keys(this.cacheMap);
}
//
// enumerateChildNames
//
// Enumerate and return child names. This just returns the names, not
// the objects themselves. The object themselves can be retrieved by
// calling getItem() on the returned names.
//
// If includeDescendants == false, just the names of the direct children
// are returned.
//
// If includeDescendants == true, the names of the children and any
// descendants are included. This can result in a large number of items
// returned if the data set is large.
//
// This is used to walk, or discover objects in the sparse namespace
// maintained by the data store.
//
// itemNamePrefix - Path name to enumerate children for.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
//
// result is an array of childNames.
//
// Example:
//
// If the object store has the following entries:
//
// /accounts/1/sensors/1
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
// Then a query of parentName /accounts/1/sensors/1 with
// includeDescendants == false would return:
//
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings
//
// If includeDescendants == true, then the additional items
// below would also be returned:
//
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
MemoryCache.prototype.enumerateChildNames =
function(parentName, startIndex, itemCount, includeDescendants) {
//
// In order to return just one instance of a name an
// object is used as a string indexed array/hashtable
//
var o = {};
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return null;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
o[key] = true; // a boolean is lowest possible storage
}
}
else {
// Validate if name is an immediate decendent
child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
o[child] = true; // a boolean is lowest possible storage
}
}
}
// Now convert to the property names
var childNames = Object.keys(o);
return childNames;
}
//
// Enumerate up to itemCount items that are immediate decendents of parentName.
//
// parentName - Path name to match item entries on.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
//
// result is an array of {itemName: "name", item: {...}}
//
MemoryCache.prototype.enumerateItems =
function(parentName, startIndex, itemCount, includeDescendants) {
var items = [];
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return items;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push(o);
itemCount--;
}
}
else {
// Validate if name is an immediate decendent and an object
var child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
var o = {itemName: key, item: this.cacheMap[key]}
items | {
this.moduleName = "MemoryCache";
this.maxEntries = maxEntries;
this.trimCount = trimCount;
this.entryCount = 0;
// A Javascript object is a map
this.cacheMap = new Object();
} | identifier_body |
memorycache.js | ;
//
// var cache = new memoryCache.MemoryCache(maxEntries, trimCount);
//
// cache.set(key, val);
//
// var val = cache.get(key);
//
//
// maxEntries - Maximum entries for cache.
//
// A value of 0 means no limit.
//
// Remember, a cache without reasonable bounds is a memory leak.
//
// Since this is a strict LRU cache, a maxEntries value too small in
// relation to total unique requests may constantly push valid entries out
// of the cache.
//
// trimCount - Number of entries to trim when maxEntries is reached.
//
// A reasonable fraction of maxEntries ensures CPU time
// is not spent trimming the cache for each entry added.
//
// maxEntries/10, maxEntries/4, etc. is recommended.
//
// If trimCount == 0, no trimming occurs and set will fail when
// at capacity. trim(count) may be called manually in this case.
//
// This is useful for support storage models in which deletion
// of older entries is not desired, such as an in memory database
// with a fixed capacity.
//
function MemoryCache(maxEntries, trimCount) {
this.moduleName = "MemoryCache";
this.maxEntries = maxEntries;
this.trimCount = trimCount;
this.entryCount = 0;
// A Javascript object is a map
this.cacheMap = new Object();
}
//
// Set a value into the cache.
//
// If the cache is at capacity, timeCount entries are
// removed.
//
// A reference to the object is stored by the key, the object
// is not copied.
//
// val - object reference
//
// Returns:
// true - entry was entered
// false - cache is full, and trimCount was specified as 0 at construction
//
MemoryCache.prototype.set = function(key, val) {
if ((this.maxEntries != 0) && (this.entryCount >= this.maxEntries)) {
if (this.trimCount == 0) return false;
this.trim(this.trimCount);
}
// map is indexed by string value
this.cacheMap[key] = val;
this.entryCount++;
return true;
}
//
// key - string key value to use
//
// Returns "undefined" if no entry.
//
MemoryCache.prototype.get = function(key) {
var val = this.cacheMap[key];
return val;
}
//
// key - string key value to use
//
// No return value
//
MemoryCache.prototype.remove = function(key) {
delete this.cacheMap[key];
}
//
// Return entryCount
//
MemoryCache.prototype.getEntryCount = function() {
return this.entryCount;
}
//
// A trimCount keeps from having to process the trim operation
// for each individual overflow.
//
MemoryCache.prototype.trim = function(trimCount) {
var thisTrim = 0;
var key;
var keys = Object.keys(this.cacheMap);
var thisTrim = keys.length;
if (thisTrim > trimCount) {
thisTrim = trimCount;
}
// Delete the first keys since they are the oldest
for (var index = 0; index < thisTrim; index++) {
key = keys[index];
delete this.cacheMap[key];
}
this.entryCount = this.entryCount - thisTrim;
}
//
// Return the array of keys
//
MemoryCache.prototype.getKeys = function() {
return Object.keys(this.cacheMap);
}
//
// enumerateChildNames
//
// Enumerate and return child names. This just returns the names, not
// the objects themselves. The object themselves can be retrieved by
// calling getItem() on the returned names.
//
// If includeDescendants == false, just the names of the direct children
// are returned.
//
// If includeDescendants == true, the names of the children and any
// descendants are included. This can result in a large number of items
// returned if the data set is large.
//
// This is used to walk, or discover objects in the sparse namespace
// maintained by the data store.
//
// itemNamePrefix - Path name to enumerate children for.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
//
// result is an array of childNames.
//
// Example:
//
// If the object store has the following entries:
//
// /accounts/1/sensors/1
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
// Then a query of parentName /accounts/1/sensors/1 with
// includeDescendants == false would return:
//
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings
//
// If includeDescendants == true, then the additional items
// below would also be returned:
//
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
MemoryCache.prototype.enumerateChildNames =
function(parentName, startIndex, itemCount, includeDescendants) {
//
// In order to return just one instance of a name an
// object is used as a string indexed array/hashtable
//
var o = {};
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return null;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
o[key] = true; // a boolean is lowest possible storage
}
}
else {
// Validate if name is an immediate decendent
child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
o[child] = true; // a boolean is lowest possible storage
}
}
}
// Now convert to the property names
var childNames = Object.keys(o);
return childNames;
}
//
// Enumerate up to itemCount items that are immediate decendents of parentName.
//
// parentName - Path name to match item entries on.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
//
// result is an array of {itemName: "name", item: {...}}
//
MemoryCache.prototype.enumerateItems =
function(parentName, startIndex, itemCount, includeDescendants) {
var items = [];
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return items;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push(o);
itemCount--;
}
}
else {
// Validate if name is an immediate decendent and an object
var child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push(o);
itemCount--;
}
}
}
return items;
}
//
// Return the last n items that are immediate children of parentName.
//
// parentName - Path name to match item entries on
//
// itemCount - Count of items to return.
//
// callback(error, result)
//
// result is an array of {itemName: "name", item: {...}}
//
MemoryCache.prototype.enumerateLastItems = function(parentName, itemCount) {
var items = [];
var keys = Object.keys(this.cacheMap);
if (itemCount > keys.length) {
itemCount = keys.length;
}
for (var index = keys.length - 1; (index >= 0) && (itemCount > 0); index--) | {
var key = keys[index];
// Validate if name is an immediate decendent and an object
var child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push(o);
itemCount--;
}
} | conditional_block | |
memorycache.js | // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
var utility = require('./utility.js');
//
// Simple in memory caching
//
// A simple, synchronous in memory cache is used to implement "working memory"
// of a server. This is intended to be a small, per node.js instance for
// caching frequently accessed data.
//
// It implements a struct LRU based on the ordering of Javascript object map
// property inserts, and Object.keys() indexes.
//
//
// Usage:
//
// var memoryCache = require('./memorycache.js');
//
// var maxEntries = 100;
// var trimCount = 10;
//
// var cache = new memoryCache.MemoryCache(maxEntries, trimCount);
//
// cache.set(key, val);
//
// var val = cache.get(key);
//
//
// maxEntries - Maximum entries for cache.
//
// A value of 0 means no limit.
//
// Remember, a cache without reasonable bounds is a memory leak.
//
// Since this is a strict LRU cache, a maxEntries value too small in
// relation to total unique requests may constantly push valid entries out
// of the cache.
//
// trimCount - Number of entries to trim when maxEntries is reached.
//
// A reasonable fraction of maxEntries ensures CPU time
// is not spent trimming the cache for each entry added.
//
// maxEntries/10, maxEntries/4, etc. is recommended.
//
// If trimCount == 0, no trimming occurs and set will fail when
// at capacity. trim(count) may be called manually in this case.
//
// This is useful for support storage models in which deletion
// of older entries is not desired, such as an in memory database
// with a fixed capacity.
//
function | (maxEntries, trimCount) {
this.moduleName = "MemoryCache";
this.maxEntries = maxEntries;
this.trimCount = trimCount;
this.entryCount = 0;
// A Javascript object is a map
this.cacheMap = new Object();
}
//
// Set a value into the cache.
//
// If the cache is at capacity, timeCount entries are
// removed.
//
// A reference to the object is stored by the key, the object
// is not copied.
//
// val - object reference
//
// Returns:
// true - entry was entered
// false - cache is full, and trimCount was specified as 0 at construction
//
MemoryCache.prototype.set = function(key, val) {
if ((this.maxEntries != 0) && (this.entryCount >= this.maxEntries)) {
if (this.trimCount == 0) return false;
this.trim(this.trimCount);
}
// map is indexed by string value
this.cacheMap[key] = val;
this.entryCount++;
return true;
}
//
// key - string key value to use
//
// Returns "undefined" if no entry.
//
MemoryCache.prototype.get = function(key) {
var val = this.cacheMap[key];
return val;
}
//
// key - string key value to use
//
// No return value
//
MemoryCache.prototype.remove = function(key) {
delete this.cacheMap[key];
}
//
// Return entryCount
//
MemoryCache.prototype.getEntryCount = function() {
return this.entryCount;
}
//
// A trimCount keeps from having to process the trim operation
// for each individual overflow.
//
MemoryCache.prototype.trim = function(trimCount) {
var thisTrim = 0;
var key;
var keys = Object.keys(this.cacheMap);
var thisTrim = keys.length;
if (thisTrim > trimCount) {
thisTrim = trimCount;
}
// Delete the first keys since they are the oldest
for (var index = 0; index < thisTrim; index++) {
key = keys[index];
delete this.cacheMap[key];
}
this.entryCount = this.entryCount - thisTrim;
}
//
// Return the array of keys
//
MemoryCache.prototype.getKeys = function() {
return Object.keys(this.cacheMap);
}
//
// enumerateChildNames
//
// Enumerate and return child names. This just returns the names, not
// the objects themselves. The object themselves can be retrieved by
// calling getItem() on the returned names.
//
// If includeDescendants == false, just the names of the direct children
// are returned.
//
// If includeDescendants == true, the names of the children and any
// descendants are included. This can result in a large number of items
// returned if the data set is large.
//
// This is used to walk, or discover objects in the sparse namespace
// maintained by the data store.
//
// itemNamePrefix - Path name to enumerate children for.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
//
// result is an array of childNames.
//
// Example:
//
// If the object store has the following entries:
//
// /accounts/1/sensors/1
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
// Then a query of parentName /accounts/1/sensors/1 with
// includeDescendants == false would return:
//
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings
//
// If includeDescendants == true, then the additional items
// below would also be returned:
//
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
MemoryCache.prototype.enumerateChildNames =
function(parentName, startIndex, itemCount, includeDescendants) {
//
// In order to return just one instance of a name an
// object is used as a string indexed array/hashtable
//
var o = {};
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return null;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
o[key] = true; // a boolean is lowest possible storage
}
}
else {
// Validate if name is an immediate decendent
child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
o[child] = true; // a boolean is lowest possible storage
}
}
}
// Now convert to the property names
var childNames = Object.keys(o);
return childNames;
}
//
// Enumerate up to itemCount items that are immediate decendents of parentName.
//
// parentName - Path name to match item entries on.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
//
// result is an array of {itemName: "name", item: {...}}
//
MemoryCache.prototype.enumerateItems =
function(parentName, startIndex, itemCount, includeDescendants) {
var items = [];
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return items;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push(o);
itemCount--;
}
}
else {
// Validate if name is an immediate decendent and an object
var child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push | MemoryCache | identifier_name |
memorycache.js | you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
var utility = require('./utility.js');
//
// Simple in memory caching
//
// A simple, synchronous in memory cache is used to implement "working memory"
// of a server. This is intended to be a small, per node.js instance for
// caching frequently accessed data.
//
// It implements a struct LRU based on the ordering of Javascript object map
// property inserts, and Object.keys() indexes.
//
//
// Usage:
//
// var memoryCache = require('./memorycache.js');
//
// var maxEntries = 100;
// var trimCount = 10;
//
// var cache = new memoryCache.MemoryCache(maxEntries, trimCount);
//
// cache.set(key, val);
//
// var val = cache.get(key);
//
//
// maxEntries - Maximum entries for cache.
//
// A value of 0 means no limit.
//
// Remember, a cache without reasonable bounds is a memory leak.
//
// Since this is a strict LRU cache, a maxEntries value too small in
// relation to total unique requests may constantly push valid entries out
// of the cache.
//
// trimCount - Number of entries to trim when maxEntries is reached.
//
// A reasonable fraction of maxEntries ensures CPU time
// is not spent trimming the cache for each entry added.
//
// maxEntries/10, maxEntries/4, etc. is recommended.
//
// If trimCount == 0, no trimming occurs and set will fail when
// at capacity. trim(count) may be called manually in this case.
//
// This is useful for support storage models in which deletion
// of older entries is not desired, such as an in memory database
// with a fixed capacity.
//
function MemoryCache(maxEntries, trimCount) {
this.moduleName = "MemoryCache";
this.maxEntries = maxEntries;
this.trimCount = trimCount;
this.entryCount = 0;
// A Javascript object is a map
this.cacheMap = new Object();
}
//
// Set a value into the cache.
//
// If the cache is at capacity, timeCount entries are
// removed.
//
// A reference to the object is stored by the key, the object
// is not copied.
//
// val - object reference
//
// Returns:
// true - entry was entered
// false - cache is full, and trimCount was specified as 0 at construction
//
MemoryCache.prototype.set = function(key, val) {
if ((this.maxEntries != 0) && (this.entryCount >= this.maxEntries)) {
if (this.trimCount == 0) return false;
this.trim(this.trimCount);
}
// map is indexed by string value
this.cacheMap[key] = val;
this.entryCount++;
return true;
}
//
// key - string key value to use
//
// Returns "undefined" if no entry.
//
MemoryCache.prototype.get = function(key) {
var val = this.cacheMap[key];
return val;
}
//
// key - string key value to use
//
// No return value
//
MemoryCache.prototype.remove = function(key) {
delete this.cacheMap[key];
}
//
// Return entryCount
//
MemoryCache.prototype.getEntryCount = function() {
return this.entryCount;
}
//
// A trimCount keeps from having to process the trim operation
// for each individual overflow.
//
MemoryCache.prototype.trim = function(trimCount) {
var thisTrim = 0;
var key;
var keys = Object.keys(this.cacheMap);
var thisTrim = keys.length;
if (thisTrim > trimCount) {
thisTrim = trimCount;
}
// Delete the first keys since they are the oldest
for (var index = 0; index < thisTrim; index++) {
key = keys[index];
delete this.cacheMap[key];
}
this.entryCount = this.entryCount - thisTrim;
}
//
// Return the array of keys
//
MemoryCache.prototype.getKeys = function() {
return Object.keys(this.cacheMap);
}
//
// enumerateChildNames
//
// Enumerate and return child names. This just returns the names, not
// the objects themselves. The object themselves can be retrieved by
// calling getItem() on the returned names.
//
// If includeDescendants == false, just the names of the direct children
// are returned.
//
// If includeDescendants == true, the names of the children and any
// descendants are included. This can result in a large number of items
// returned if the data set is large.
//
// This is used to walk, or discover objects in the sparse namespace
// maintained by the data store.
//
// itemNamePrefix - Path name to enumerate children for.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
| //
// Example:
//
// If the object store has the following entries:
//
// /accounts/1/sensors/1
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
// Then a query of parentName /accounts/1/sensors/1 with
// includeDescendants == false would return:
//
// /accounts/1/sensors/1/settings
// /accounts/1/sensors/1/readings
//
// If includeDescendants == true, then the additional items
// below would also be returned:
//
// /accounts/1/sensors/1/readings/2015-11-11T15:41:26.969Z
// /accounts/1/sensors/1/readings/2015-11-11T15:41:27.992Z
//
MemoryCache.prototype.enumerateChildNames =
function(parentName, startIndex, itemCount, includeDescendants) {
//
// In order to return just one instance of a name an
// object is used as a string indexed array/hashtable
//
var o = {};
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return null;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
o[key] = true; // a boolean is lowest possible storage
}
}
else {
// Validate if name is an immediate decendent
child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
o[child] = true; // a boolean is lowest possible storage
}
}
}
// Now convert to the property names
var childNames = Object.keys(o);
return childNames;
}
//
// Enumerate up to itemCount items that are immediate decendents of parentName.
//
// parentName - Path name to match item entries on.
//
// startingIndex - Index to start from to allow enumeration through a large set
//
// itemCount - Maximum count of items to return.
//
// includeDescendants - If false, only immediate children are returned.
// If true, all descendants of parent path are returned.
//
// callback(error, result)
//
// result is an array of {itemName: "name", item: {...}}
//
MemoryCache.prototype.enumerateItems =
function(parentName, startIndex, itemCount, includeDescendants) {
var items = [];
var keys = Object.keys(this.cacheMap);
if (startIndex > keys.length) {
return items;
}
var itemsLeft = keys.length - startIndex;
if (itemCount > itemsLeft) {
itemCount = itemsLeft;
}
for (var index = startIndex; (index < keys.length) && (itemCount > 0); index++) {
var key = keys[index];
if (includeDescendants) {
if (key.search(parentName) == 0) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push(o);
itemCount--;
}
}
else {
// Validate if name is an immediate decendent and an object
var child = utility.getImmediateChildObject(parentName, key);
if (child != null) {
var o = {itemName: key, item: this.cacheMap[key]}
items.push | //
// result is an array of childNames.
| random_line_split |
__init__.py | GRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')] |
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
| return reads | random_line_split |
__init__.py | _STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
|
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
| if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index'])) | conditional_block |
__init__.py | _STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def | (self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
| __init__ | identifier_name |
__init__.py | GRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
|
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
| return False if self.get_barcode_mask() is None else True | identifier_body |
kidney_utils.py | format(chain.ndd_index))
ndd_used[chain.ndd_index] = True
for vtx_index in chain.vtx_indices:
if vtx_used[vtx_index]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx_index))
vtx_used[vtx_index] = True
for cycle in opt_result.cycles:
for vtx in cycle:
if vtx_used[vtx.id]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx.id))
vtx_used[vtx.id] = True
# cycle and chain caps are respected
for chain in opt_result.chains:
if len(chain.vtx_indices) > max_chain:
raise KidneyOptimException("The chain cap is violated")
for cycle in opt_result.cycles:
if len(cycle) > max_cycle:
raise KidneyOptimException("The cycle cap is violated")
if not min_chain is None:
for chain in opt_result.chains:
if len(chain.vtx_indices) < min_chain:
raise KidneyOptimException("The min-chain cap is violated")
# # min chain length is respected
# if cfg.min_chain_len is not None:
# for chain in opt_result.chains:
# if len(set(chain.vtx_indices)) < cfg.min_chain_len:
# raise KidneyOptimException("The chain is below the min length (%d):\n %s" %
# (cfg.min_chain_len,chain.display()))
# chains do not contain loops
for chain in opt_result.chains:
if len(set(chain.vtx_indices)) < len(chain.vtx_indices):
raise KidneyOptimException("The chain contains loops:\n %s" % chain.display())
def get_dist_from_nearest_ndd(digraph, ndds):
""" For each donor-patient pair V, this returns the length of the
shortest path from an NDD to V, or 999999999 if no path from an NDD
to V exists.
"""
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for ndd in ndds:
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
return distances
def find_vertex_chain_participation(digraph, ndds,max_chain):
""" For each donor-patient pair V, add a property "can_be_in_chain_list", | initiated by ndd i (True if v is within the chain cap of ndd i, False otherwise)
"""
for v in digraph.vs:
v.can_be_in_chain_list = [False for _ in ndds]
for i_ndd,ndd in enumerate(ndds):
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
for v,dist in zip(digraph.vs,distances):
if dist <= max_chain:
v.can_be_in_chain_list[i_ndd] = True
def find_selected_path(v_id, next_vv):
path = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
path.append(v_id)
return path
def find_selected_cycle(v_id, next_vv):
cycle = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
if v_id in cycle:
return cycle
else:
cycle.append(v_id)
return None
def get_optimal_chains(digraph, ndds, edge_success_prob=1):
# Chain edges
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
for var in e.grb_vars
if var.x > 0.1} # changed to Xn from x by Duncan
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.1:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
# added by duncan
def get_optimal_chains_pctsp(digraph, ndds):
# Chain edges
edge_success_prob = 1.0
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
if e.edge_var.x > 0.5}
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.5:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
def selected_edges_to_cycles(digraph, cycle_start_vv, cycle_next_vv):
cycles = [find_selected_cycle(start_v, cycle_next_vv) for start_v in cycle_start_vv]
# Remove "cycles" that are really part of a chain
cycles = [c for c in cycles if c is not None]
# Remove duplicated cycles
cycles = [c for c in cycles if c[0] == min(c)]
# Use vertices instead of indices
return [[digraph.vs[v_id] for v_id in c] for c in cycles]
# return True if cycle c contains edge e
# c is a list of kidney_digraph.Vertex objects (with the first vertex not repeated
# edge is a kidney_digraph.Edge objects
def cycle_contains_edge(c,e):
if e.src in c:
i = c.index(e.src)
if e.tgt == c[(i+1) % len(c)]:
return True
else:
return False
return False
# -------------------------------------------------------------------------------------------------
#
# Functions for Variable Uncertainty Budget
#
# -------------------------------------------------------------------------------------------------
from scipy.special import binom
from scipy.optimize import minimize
import math
def B_bound(num_E,gamma):
'''
The upper-bound on probability that realized edge weights fall outside of the U-set:
Assuming symmetric interval uncertainty, and realized edge weights symmetrically distributed about
their nominal value.
From Bertsimas, Price of Robustness
'''
eta = (gamma + num_E)/2.0
fl_eta = int(math.floor(eta))
mu = float(eta - fl_eta)
return math.pow(2,-num_E)*((1.0-mu)*binom(num_E,fl_eta)
+ sum( binom(num_E,l) for l in range(fl_eta+1,int(num_E)+1) ))
def gamma_symmetric_edge_weights(x_norm,epsilon):
'''
Variable budget function for symmetric cost uncertainty (from Poss & Bergamo)
input:
- x_norm : number of edges in the solution
- epsilon : protection level (realized edge weights will be outside of U-set with prob. epsilon
'''
# the first constraint is that B_bound <= epsilon,
# the second is that gamma >= 0
# the third is that gamma <= x_norm
constr = ({'type':'ineq',
'fun':lambda g: epsilon - B_bound(x_norm,g)
},
{'type':'ineq',
'fun':lambda g: g},
{'type': 'ineq',
| which is a list of booleans: can_be_in_chain_list[i] = True if v can be in a chain | random_line_split |
kidney_utils.py | # no vertex or NDD is used twice
ndd_used = [False] * len(ndds)
vtx_used = [False] * len(digraph.vs)
for chain in opt_result.chains:
if ndd_used[chain.ndd_index]:
raise KidneyOptimException("NDD {} used more than once".format(chain.ndd_index))
ndd_used[chain.ndd_index] = True
for vtx_index in chain.vtx_indices:
if vtx_used[vtx_index]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx_index))
vtx_used[vtx_index] = True
for cycle in opt_result.cycles:
for vtx in cycle:
if vtx_used[vtx.id]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx.id))
vtx_used[vtx.id] = True
# cycle and chain caps are respected
for chain in opt_result.chains:
if len(chain.vtx_indices) > max_chain:
raise KidneyOptimException("The chain cap is violated")
for cycle in opt_result.cycles:
if len(cycle) > max_cycle:
raise KidneyOptimException("The cycle cap is violated")
if not min_chain is None:
for chain in opt_result.chains:
if len(chain.vtx_indices) < min_chain:
raise KidneyOptimException("The min-chain cap is violated")
# # min chain length is respected
# if cfg.min_chain_len is not None:
# for chain in opt_result.chains:
# if len(set(chain.vtx_indices)) < cfg.min_chain_len:
# raise KidneyOptimException("The chain is below the min length (%d):\n %s" %
# (cfg.min_chain_len,chain.display()))
# chains do not contain loops
for chain in opt_result.chains:
if len(set(chain.vtx_indices)) < len(chain.vtx_indices):
raise KidneyOptimException("The chain contains loops:\n %s" % chain.display())
def get_dist_from_nearest_ndd(digraph, ndds):
""" For each donor-patient pair V, this returns the length of the
shortest path from an NDD to V, or 999999999 if no path from an NDD
to V exists.
"""
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for ndd in ndds:
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
return distances
def find_vertex_chain_participation(digraph, ndds,max_chain):
""" For each donor-patient pair V, add a property "can_be_in_chain_list",
which is a list of booleans: can_be_in_chain_list[i] = True if v can be in a chain
initiated by ndd i (True if v is within the chain cap of ndd i, False otherwise)
"""
for v in digraph.vs:
v.can_be_in_chain_list = [False for _ in ndds]
for i_ndd,ndd in enumerate(ndds):
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
for v,dist in zip(digraph.vs,distances):
if dist <= max_chain:
v.can_be_in_chain_list[i_ndd] = True
def find_selected_path(v_id, next_vv):
path = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
path.append(v_id)
return path
def find_selected_cycle(v_id, next_vv):
cycle = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
if v_id in cycle:
return cycle
else:
cycle.append(v_id)
return None
def get_optimal_chains(digraph, ndds, edge_success_prob=1):
# Chain edges
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
for var in e.grb_vars
if var.x > 0.1} # changed to Xn from x by Duncan
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.1:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
# added by duncan
def get_optimal_chains_pctsp(digraph, ndds):
# Chain edges
edge_success_prob = 1.0
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
if e.edge_var.x > 0.5}
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.5:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
def selected_edges_to_cycles(digraph, cycle_start_vv, cycle_next_vv):
cycles = [find_selected_cycle(start_v, cycle_next_vv) for start_v in cycle_start_vv]
# Remove "cycles" that are really part of a chain
cycles = [c for c in cycles if c is not None]
# Remove duplicated cycles
cycles = [c for c in cycles if c[0] == min(c)]
# Use vertices instead of indices
return [[digraph.vs[v_id] for v_id in c] for c in cycles]
# return True if cycle c contains edge e
# c is a list of kidney_digraph.Vertex objects (with the first vertex not repeated
# edge is a kidney_digraph.Edge objects
def cycle_contains_edge(c,e):
if e.src in c:
i = c.index(e.src)
if e.tgt == c[(i+1) % len(c)]:
return True
else:
return False
return False
# -------------------------------------------------------------------------------------------------
#
# Functions for Variable Uncertainty Budget
#
# -------------------------------------------------------------------------------------------------
from scipy.special import binom
from scipy.optimize import minimize
import | """Check that the solution is valid.
This method checks that:
- all used edges exist
- no vertex or NDD is used twice (which also ensures that no edge is used twice)
- cycle and chain caps are respected
- chain does not contain cycle (check for repeated tgt vertices)
"""
# all used edges exist
for chain in opt_result.chains:
if chain.vtx_indices[0] not in [e.tgt.id for e in ndds[chain.ndd_index].edges]:
raise KidneyOptimException("Edge from NDD {} to vertex {} is used but does not exist".format(
chain.ndd_index, chain.vtx_indices[0]))
for cycle in opt_result.cycles:
for i in range(len(cycle)):
if digraph.adj_mat[cycle[i-1].id][cycle[i].id] is None:
raise KidneyOptimException("Edge from vertex {} to vertex {} is used but does not exist".format(
cycle[i-1].id, cycle[i].id))
| identifier_body | |
kidney_utils.py |
# no vertex or NDD is used twice
ndd_used = [False] * len(ndds)
vtx_used = [False] * len(digraph.vs)
for chain in opt_result.chains:
if ndd_used[chain.ndd_index]:
raise KidneyOptimException("NDD {} used more than once".format(chain.ndd_index))
ndd_used[chain.ndd_index] = True
for vtx_index in chain.vtx_indices:
if vtx_used[vtx_index]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx_index))
vtx_used[vtx_index] = True
for cycle in opt_result.cycles:
for vtx in cycle:
if vtx_used[vtx.id]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx.id))
vtx_used[vtx.id] = True
# cycle and chain caps are respected
for chain in opt_result.chains:
if len(chain.vtx_indices) > max_chain:
raise KidneyOptimException("The chain cap is violated")
for cycle in opt_result.cycles:
if len(cycle) > max_cycle:
raise KidneyOptimException("The cycle cap is violated")
if not min_chain is None:
for chain in opt_result.chains:
if len(chain.vtx_indices) < min_chain:
raise KidneyOptimException("The min-chain cap is violated")
# # min chain length is respected
# if cfg.min_chain_len is not None:
# for chain in opt_result.chains:
# if len(set(chain.vtx_indices)) < cfg.min_chain_len:
# raise KidneyOptimException("The chain is below the min length (%d):\n %s" %
# (cfg.min_chain_len,chain.display()))
# chains do not contain loops
for chain in opt_result.chains:
if len(set(chain.vtx_indices)) < len(chain.vtx_indices):
raise KidneyOptimException("The chain contains loops:\n %s" % chain.display())
def get_dist_from_nearest_ndd(digraph, ndds):
""" For each donor-patient pair V, this returns the length of the
shortest path from an NDD to V, or 999999999 if no path from an NDD
to V exists.
"""
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for ndd in ndds:
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
return distances
def find_vertex_chain_participation(digraph, ndds,max_chain):
""" For each donor-patient pair V, add a property "can_be_in_chain_list",
which is a list of booleans: can_be_in_chain_list[i] = True if v can be in a chain
initiated by ndd i (True if v is within the chain cap of ndd i, False otherwise)
"""
for v in digraph.vs:
v.can_be_in_chain_list = [False for _ in ndds]
for i_ndd,ndd in enumerate(ndds):
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
for v,dist in zip(digraph.vs,distances):
if dist <= max_chain:
v.can_be_in_chain_list[i_ndd] = True
def find_selected_path(v_id, next_vv):
path = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
path.append(v_id)
return path
def find_selected_cycle(v_id, next_vv):
cycle = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
if v_id in cycle:
return cycle
else:
cycle.append(v_id)
return None
def get_optimal_chains(digraph, ndds, edge_success_prob=1):
# Chain edges
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
for var in e.grb_vars
if var.x > 0.1} # changed to Xn from x by Duncan
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.1:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
# added by duncan
def get_optimal_chains_pctsp(digraph, ndds):
# Chain edges
edge_success_prob = 1.0
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
if e.edge_var.x > 0.5}
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.5:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
def selected_edges_to_cycles(digraph, cycle_start_vv, cycle_next_vv):
cycles = [find_selected_cycle(start_v, cycle_next_vv) for start_v in cycle_start_vv]
# Remove "cycles" that are really part of a chain
cycles = [c for c in cycles if c is not None]
# Remove duplicated cycles
cycles = [c for c in cycles if c[0] == min(c)]
# Use vertices instead of indices
return [[digraph.vs[v_id] for v_id in c] for c in cycles]
# return True if cycle c contains edge e
# c is a list of kidney_digraph.Vertex objects (with the first vertex not repeated
# edge is a kidney_digraph.Edge objects
def cycle_contains_edge(c,e):
if e.src in c:
i = c.index(e.src)
if e.tgt == c[(i+1) % len(c)]:
return True
else:
return False
return False
# -------------------------------------------------------------------------------------------------
#
# Functions for Variable Uncertainty Budget
#
# -------------------------------------------------------------------------------------------------
from scipy.special import binom
from scipy.optimize import minimize
import math
def B_bound(num_E,gamma):
'''
The upper-bound on probability that realized edge weights fall outside of the U-set:
Assuming symmetric interval uncertainty, and realized edge weights symmetrically distributed about
their nominal value.
From Bertsimas, Price of Robustness
'''
eta = (gamma + num_E)/2.0
fl_eta = int(math.floor(eta))
mu = float(eta - fl_eta)
return math.pow(2,-num_E)*((1.0-mu)*binom(num_E,fl_eta)
+ sum( binom(num_E,l) for l in range(fl_eta+1,int(num_E)+1) ))
def gamma_symmetric_edge_weights(x_norm,epsilon):
'''
Variable budget function for symmetric cost uncertainty (from Poss & Bergamo)
input:
- x_norm : number of edges in the solution
| raise KidneyOptimException("Edge from vertex {} to vertex {} is used but does not exist".format(
cycle[i-1].id, cycle[i].id)) | conditional_block | |
kidney_utils.py | (chain.ndd_index))
ndd_used[chain.ndd_index] = True
for vtx_index in chain.vtx_indices:
if vtx_used[vtx_index]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx_index))
vtx_used[vtx_index] = True
for cycle in opt_result.cycles:
for vtx in cycle:
if vtx_used[vtx.id]:
raise KidneyOptimException("Vertex {} used more than once".format(vtx.id))
vtx_used[vtx.id] = True
# cycle and chain caps are respected
for chain in opt_result.chains:
if len(chain.vtx_indices) > max_chain:
raise KidneyOptimException("The chain cap is violated")
for cycle in opt_result.cycles:
if len(cycle) > max_cycle:
raise KidneyOptimException("The cycle cap is violated")
if not min_chain is None:
for chain in opt_result.chains:
if len(chain.vtx_indices) < min_chain:
raise KidneyOptimException("The min-chain cap is violated")
# # min chain length is respected
# if cfg.min_chain_len is not None:
# for chain in opt_result.chains:
# if len(set(chain.vtx_indices)) < cfg.min_chain_len:
# raise KidneyOptimException("The chain is below the min length (%d):\n %s" %
# (cfg.min_chain_len,chain.display()))
# chains do not contain loops
for chain in opt_result.chains:
if len(set(chain.vtx_indices)) < len(chain.vtx_indices):
raise KidneyOptimException("The chain contains loops:\n %s" % chain.display())
def get_dist_from_nearest_ndd(digraph, ndds):
""" For each donor-patient pair V, this returns the length of the
shortest path from an NDD to V, or 999999999 if no path from an NDD
to V exists.
"""
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for ndd in ndds:
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
return distances
def find_vertex_chain_participation(digraph, ndds,max_chain):
""" For each donor-patient pair V, add a property "can_be_in_chain_list",
which is a list of booleans: can_be_in_chain_list[i] = True if v can be in a chain
initiated by ndd i (True if v is within the chain cap of ndd i, False otherwise)
"""
for v in digraph.vs:
v.can_be_in_chain_list = [False for _ in ndds]
for i_ndd,ndd in enumerate(ndds):
# Get a set of donor-patient pairs who are the target of an edge from an NDD
ndd_targets = set()
for edge in ndd.edges:
ndd_targets.add(edge.tgt)
# Breadth-first search
q = deque(ndd_targets)
distances = [999999999] * len(digraph.vs)
for v in ndd_targets:
distances[v.id] = 1
while q:
v = q.popleft()
for e in v.edges:
w = e.tgt
if distances[w.id] == 999999999:
distances[w.id] = distances[v.id] + 1
q.append(w)
for v,dist in zip(digraph.vs,distances):
if dist <= max_chain:
v.can_be_in_chain_list[i_ndd] = True
def | (v_id, next_vv):
path = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
path.append(v_id)
return path
def find_selected_cycle(v_id, next_vv):
cycle = [v_id]
while v_id in next_vv:
v_id = next_vv[v_id]
if v_id in cycle:
return cycle
else:
cycle.append(v_id)
return None
def get_optimal_chains(digraph, ndds, edge_success_prob=1):
# Chain edges
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
for var in e.grb_vars
if var.x > 0.1} # changed to Xn from x by Duncan
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.1:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
# added by duncan
def get_optimal_chains_pctsp(digraph, ndds):
# Chain edges
edge_success_prob = 1.0
chain_next_vv = {e.src.id: e.tgt.id
for e in digraph.es
if e.edge_var.x > 0.5}
optimal_chains = []
for i, ndd in enumerate(ndds):
for e in ndd.edges:
if e.edge_var.x > 0.5:
vtx_indices = find_selected_path(e.tgt.id, chain_next_vv)
# Get score of edge from NDD
score = e.score * edge_success_prob
# Add scores of edges between vertices
for j in range(len(vtx_indices) - 1):
score += digraph.adj_mat[vtx_indices[j]][vtx_indices[j + 1]].score * edge_success_prob ** (j + 2)
optimal_chains.append(kidney_ndds.Chain(i, vtx_indices, score))
return optimal_chains
def selected_edges_to_cycles(digraph, cycle_start_vv, cycle_next_vv):
cycles = [find_selected_cycle(start_v, cycle_next_vv) for start_v in cycle_start_vv]
# Remove "cycles" that are really part of a chain
cycles = [c for c in cycles if c is not None]
# Remove duplicated cycles
cycles = [c for c in cycles if c[0] == min(c)]
# Use vertices instead of indices
return [[digraph.vs[v_id] for v_id in c] for c in cycles]
# return True if cycle c contains edge e
# c is a list of kidney_digraph.Vertex objects (with the first vertex not repeated
# edge is a kidney_digraph.Edge objects
def cycle_contains_edge(c,e):
if e.src in c:
i = c.index(e.src)
if e.tgt == c[(i+1) % len(c)]:
return True
else:
return False
return False
# -------------------------------------------------------------------------------------------------
#
# Functions for Variable Uncertainty Budget
#
# -------------------------------------------------------------------------------------------------
from scipy.special import binom
from scipy.optimize import minimize
import math
def B_bound(num_E,gamma):
'''
The upper-bound on probability that realized edge weights fall outside of the U-set:
Assuming symmetric interval uncertainty, and realized edge weights symmetrically distributed about
their nominal value.
From Bertsimas, Price of Robustness
'''
eta = (gamma + num_E)/2.0
fl_eta = int(math.floor(eta))
mu = float(eta - fl_eta)
return math.pow(2,-num_E)*((1.0-mu)*binom(num_E,fl_eta)
+ sum( binom(num_E,l) for l in range(fl_eta+1,int(num_E)+1) ))
def gamma_symmetric_edge_weights(x_norm,epsilon):
'''
Variable budget function for symmetric cost uncertainty (from Poss & Bergamo)
input:
- x_norm : number of edges in the solution
- epsilon : protection level (realized edge weights will be outside of U-set with prob. epsilon
'''
# the first constraint is that B_bound <= epsilon,
# the second is that gamma >= 0
# the third is that gamma <= x_norm
constr = ({'type':'ineq',
'fun':lambda g: epsilon - B_bound(x_norm,g)
},
{'type':'ineq',
'fun':lambda g: g},
{'type': 'ineq',
| find_selected_path | identifier_name |
mapi.js | (var i = 0; i < document.links.length; i++) document.links[i].onfocus = function() {
this.blur()
}
var script = document.createElement("script");
script.setAttribute("src", "static/guide.js?v=1.0");
script.setAttribute("type", "text/javascript");
script.setAttribute("charset", "utf-8");
setTimeout(function() {
document.getElementsByTagName('head')[0].appendChild(script);
}, 1);
}
//button display and hide
function Contextdisplay(whichID) {
document.getElementById(whichID).style.display = (document.getElementById(whichID).style.display != 'block' ? 'block' : 'none');
}
//change icon on class reference page----by xieyangxin
function changeIcon(whichID, target) {
if (document.getElementById(whichID).style.display != 'block') {
target.className += ' ' + whichID[0] + 'ccsubmenu';
} else {
target.className = whichID[0] + 'csubmenu';
}
}
//button display hidden
function displayHidden(whichID) {
document.getElementById(whichID).style.display = 'none';
}
//button display block
function displayBlock(whichID) {
document.getElementById(whichID).style.display = 'block';
}
//修改, by wjp
if (typeof BMap != 'undefined') {
var map = new BMap.Map("mmap");
var point = new BMap.Point(116.307852, 40.057031);
map.centerAndZoom(point, 15);
var opts = {
width: 250, // 信息窗口宽度
height: 80, // 信息窗口高度
title: "<a href='http://j.map.baidu.com/K3IXc' style='color:#CC5522;font-weight:800'>百度大厦<a>" // 信息窗口标题
}
var infoWindow = new BMap.InfoWindow("地址:北京市海淀区上地十街10号<br/>电话:(010)59928888", opts); // 创建信息窗口对象
map.openInfoWindow(infoWindow, map.getCenter()); // 打开信息窗口
}
//banner slide
//$(function(){
var time = 2000,
slideEl = $("#slideContent"),
indexAt = 1,
slideHode = false;
var indexBannerSrc = ["sdk-v.jpg", "house.jpg", "developer.jpg", "iOS-SDK-v2.1.0.jpg", "match.jpg"];
//var indexBannerSrc=["sdk-v.jpg","addURI.png","ad-geosdk2.jpg","ad-cloud.jpg","ad-card2.jpg"];
var changeLength = indexBannerSrc.length;
var changeBanner = function(index) {
if (!slideHode) {
$('img:not(.current)', slideEl).css({
'opacity': '0.1',
'filter': 'alpha(opacity=30)'
});
var $curImg = $('img.current', slideEl);
$curImg.fadeTo("normal", "0.3", function() {
$curImg.removeClass('current'); //current class make img visible
nowImg = slideEl.find("img").eq(index);
nowImg.addClass('current').fadeTo("normal", "1");
bg = 'static/img/' + nowImg.attr('id') + '.png';
$("#content .hdp").css({
'background-image': 'url(' + bg + ')',
'background-repeat': 'repeat-x'
});
slideEl.children(".focus-content").find("a").removeClass("current").eq(changeLength - index - 1).addClass("current");
indexAt = index + 1;
indexAt %= changeLength;
});
}
};
var slide = function() {
setTimeout(function() {
changeBanner(indexAt);
setTimeout(slide, time);
}, time);
};
//pre load other slide image
/*
for(var i=1;i<changeLength;i++){
var img = new Image();
img.src = "http://developer.baidu.com/map/static/img/"+indexBannerSrc[i];
}
*/
slide();
$(".focus-content").bind({
"click": function(e) {
var $target = $(e.target);
if ($target.context.tagName.toLowerCase() == "a") {
e.stopPropagation();
var idx = parseInt($target.attr("idx"));
changeBanner(idx);
}
return false;
},
"mouseenter": function(e) {
slideHode = false;
},
"mouseleave": function(e) {
slideHode = true;
}
});
$(".img-content").bind({
"mouseenter": function() {
slideHode = true;
},
"mouseleave": function() {
slideHode = false;
}
});
//})();
/***********新闻首页图片轮播******************/
var NewsPlayer = function() {
/****
* container: 图片容器,
* numbers: 按钮容器,
* currentTitle: 信息栏
*/
/**
* 循环次数
*/
var LOOP_NUMBER = 20;
/**
* 遍历函数
*/
function each(arr, callback, context) {
if (arr.forEach) {
arr.forEach(callback, context);
} else {
for (var i = 0, len = arr.length; i < len; i++) {
callback.call(context, arr[i], i, arr);
}
}
}
/**
* 淡入效果
*/
function fadeIn(elem) {
setOpacity(elem, 0);
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function(i) {
var pos = i * 5;
setTimeout(function() {
setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 淡出效果
*/
function fadeOut(elem) {
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function(i) {
var pos = 100 - i * 5;
setTimeout(function() {
setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 设置透明度
*/
function setOpacity(elem, level) {
if (elem.filters) { //IE
elem.style.filter = "alpha(opacity=" + level + ")";
} else {
elem.style.opacity = level / 100;
}
}
/**
* 设置元素zIndex属性
*/
function setZIndex(elem, zIndex) {
elem.style.zIndex = zIndex;
}
return {
play: function(container, numbers, titles) {
var me = this,
targetIdx = 0, //目标图片序号
curIdx = 0, //现在图片序号
picCount = $(numbers).children.length;
//初始化信息
me.titles = $(titles);
| //第一张图片设置zIndex为10,其它图片透明度设置为透明(0代表透明,1代表不透明)
each(me.arrImgs, function(elem, idx, arr) {
if (idx == 0) {
elem.style.zIndex = 10;
} else {
setOpacity(elem, 0);
}
}, me);
//为所有的li添加点击事件
each(me.arrNums, function(elem, idx, arr) {
elem.onclick = function() {
me.fade(idx, curIdx);
curIdx = idx;
targetIdx = idx;
}
}, me);
//自动轮播
var handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
$(numbers).bind({
mouseover: function() {
clearInterval(handler)
},
mouseout: function() {
handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
}
});
},
/**
* 执行淡入淡出动作
*/
fade: function(idx, lastIdx) {
var me = this;
if (idx == lastIdx) {
return;
}
var lastImg = me.arrImgs[lastIdx], //上一个img
img = me.arrImgs[idx]; //当前img
setZIndex(lastImg, 1);
setZIndex(img, 10);
fadeOut(lastImg);
fadeIn(img);
each(me.arrNums, function(elem, elemIdx, arr) {
if (elemIdx != idx) {
me.arrNums[elemIdx].className = '';
} else {
me.arrNums[elemIdx].className = 'on';
}
}, me);
each(me.titles, function(elem | me.arrImgs = $(container).children('a');
me.arrNums = $(numbers).children();
me.arrNums[0].className = "on";
| random_line_split |
mapi.js | (var i = 0; i < document.links.length; i++) document.links[i].onfocus = function() {
this.blur()
}
var script = document.createElement("script");
script.setAttribute("src", "static/guide.js?v=1.0");
script.setAttribute("type", "text/javascript");
script.setAttribute("charset", "utf-8");
setTimeout(function() {
document.getElementsByTagName('head')[0].appendChild(script);
}, 1);
}
//button display and hide
function Contextdisplay(whichID) {
document.getElementById(whichID).style.display = (document.getElementById(whichID).style.display != 'block' ? 'block' : 'none');
}
//change icon on class reference page----by xieyangxin
function changeIcon(whichID, target) {
if (document.getElementById(whichID).style.display != 'block') {
target.className += ' ' + whichID[0] + 'ccsubmenu';
} else {
target.className = whichID[0] + 'csubmenu';
}
}
//button display hidden
function displayHidden(whichID) {
document.getElementById(whichID).style.display = 'none';
}
//button display block
function displayBlock(whichID) {
document.getElementById(whichID).style.display = 'block';
}
//修改, by wjp
if (typeof BMap != 'undefined') {
var map = new BMap.Map("mmap");
var point = new BMap.Point(116.307852, 40.057031);
map.centerAndZoom(point, 15);
var opts = {
width: 250, // 信息窗口宽度
height: 80, // 信息窗口高度
title: "<a href='http://j.map.baidu.com/K3IXc' style='color:#CC5522;font-weight:800'>百度大厦<a>" // 信息窗口标题
}
var infoWindow = new BMap.InfoWindow("地址:北京市海淀区上地十街10号<br/>电话:(010)59928888", opts); // 创建信息窗口对象
map.openInfoWindow(infoWindow, map.getCenter()); // 打开信息窗口
}
//banner slide
//$(function(){
var time = 2000,
slideEl = $("#slideContent"),
indexAt = 1,
slideHode = false;
var indexBannerSrc = ["sdk-v.jpg", "house.jpg", "developer.jpg", "iOS-SDK-v2.1.0.jpg", "match.jpg"];
//var indexBannerSrc=["sdk-v.jpg","addURI.png","ad-geosdk2.jpg","ad-cloud.jpg","ad-card2.jpg"];
var changeLength = indexBannerSrc.length;
var changeBanner = function(index) {
if (!slideHode) {
$('img:not(.current)', slideEl).css({
'opacity': '0.1',
'filter': 'alpha(opacity=30)'
});
var $curImg = $('img.current', slideEl);
$curImg.fadeTo("normal", "0.3", function() {
$curImg.removeClass('current'); //current class make img visible
nowImg = slideEl.find("img").eq(index);
nowImg.addClass('current').fadeTo("normal", "1");
bg = 'static/img/' + nowImg.attr('id') + '.png';
$("#content .hdp").css({
'background-image': 'url(' + bg + ')',
'background-repeat': 'repeat-x'
});
slideEl.children(".focus-content").find("a").removeClass("current").eq(changeLength - index - 1).addClass("current");
indexAt = index + 1;
indexAt %= changeLength;
});
}
};
var slide = function() {
setTimeout(function() {
changeBanner(indexAt);
setTimeout(slide, time);
}, time);
};
//pre load other slide image
/*
for(var i=1;i<changeLength;i++){
var img = new Image();
img.src = "http://developer.baidu.com/map/static/img/"+indexBannerSrc[i];
}
*/
slide();
$(".focus-content").bind({
"click": function(e) {
var $target = $(e.target);
if ($target.context.tagName.toLowerCase() == "a") {
e.stopPropagation();
var idx = parseInt($target.attr("idx"));
changeBanner(idx);
}
return false;
},
"mouseenter": function(e) {
slideHode = false;
},
"mouseleave": function(e) {
slideHode = true;
}
});
$(".img-content").bind({
"mouseenter": function() {
slideHode = true;
},
"mouseleave": function() {
slideHode = false;
}
});
//})();
/***********新闻首页图片轮播******************/
var NewsPlayer = function() {
/****
* container: 图片容器,
* numbers: 按钮容器,
* currentTitle: 信息栏
*/
/**
* 循环次数
*/
var LOOP_NUMBER = 20;
/**
* 遍历函数
*/
function each(arr, callback, context) {
if (arr.forEach) {
arr.forEach(callback, context);
} else {
for (var i = 0, len = arr.length; i < len; i++) {
callback.call(context, arr[i], i, arr);
}
}
}
/**
* 淡入效果
*/
function fadeIn(elem) {
setOpacity(elem, 0);
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function(i) {
var pos = i * 5;
setTimeout(function() {
| setTimeout(function() {
setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 设置透明度
*/
function setOpacity(elem, level) {
if (elem.filters) { //IE
elem.style.filter = "alpha(opacity=" + level + ")";
} else {
elem.style.opacity = level / 100;
}
}
/**
* 设置元素zIndex属性
*/
function setZIndex(elem, zIndex) {
elem.style.zIndex = zIndex;
}
return {
play: function(container, numbers, titles) {
var me = this,
targetIdx = 0, //目标图片序号
curIdx = 0, //现在图片序号
picCount = $(numbers).children.length;
//初始化信息
me.titles = $(titles);
me.arrImgs = $(container).children('a');
me.arrNums = $(numbers).children();
me.arrNums[0].className = "on";
//第一张图片设置zIndex为10,其它图片透明度设置为透明(0代表透明,1代表不透明)
each(me.arrImgs, function(elem, idx, arr) {
if (idx == 0) {
elem.style.zIndex = 10;
} else {
setOpacity(elem, 0);
}
}, me);
//为所有的li添加点击事件
each(me.arrNums, function(elem, idx, arr) {
elem.onclick = function() {
me.fade(idx, curIdx);
curIdx = idx;
targetIdx = idx;
}
}, me);
//自动轮播
var handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
$(numbers).bind({
mouseover: function() {
clearInterval(handler)
},
mouseout: function() {
handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
}
});
},
/**
* 执行淡入淡出动作
*/
fade: function(idx, lastIdx) {
var me = this;
if (idx == lastIdx) {
return;
}
var lastImg = me.arrImgs[lastIdx], //上一个img
img = me.arrImgs[idx]; //当前img
setZIndex(lastImg, 1);
setZIndex(img, 10);
fadeOut(lastImg);
fadeIn(img);
each(me.arrNums, function(elem, elemIdx, arr) {
if (elemIdx != idx) {
me.arrNums[elemIdx].className = '';
} else {
me.arrNums[elemIdx].className = 'on';
}
}, me);
each(me.titles, function(elem | setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 淡出效果
*/
function fadeOut(elem) {
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function(i) {
var pos = 100 - i * 5;
| identifier_body |
mapi.js | (var i = 0; i < document.links.length; i++) document.links[i].onfocus = function() {
this.blur()
}
var script = document.createElement("script");
script.setAttribute("src", "static/guide.js?v=1.0");
script.setAttribute("type", "text/javascript");
script.setAttribute("charset", "utf-8");
setTimeout(function() {
document.getElementsByTagName('head')[0].appendChild(script);
}, 1);
}
//button display and hide
function Contextdisplay(whichID) {
document.getElementById(whichID).style.display = (document.getElementById(whichID).style.display != 'block' ? 'block' : 'none');
}
//change icon on class reference page----by xieyangxin
function changeIcon(whichID, target) {
if (document.getElementById(whichID).style.display != 'block') {
target.className += ' ' + whichID[0] + 'ccsubmenu';
} else {
target.className = whichID[0] + 'csubmenu';
}
}
//button display hidden
function displayHidden(whichID) {
document.getElementById(whichID).style.display = 'none';
}
//button display block
function displayBlock(whichID) {
document.getElementById(whichID).style.display = 'block';
}
//修改, by wjp
if (typeof BMap != 'undefined') {
var map = new BMap.Map("mmap");
var point = new BMap.Point(116.307852, 40.057031);
map.centerAndZoom(point, 15);
var opts = {
width: 250, // 信息窗口宽度
height: 80, // 信息窗口高度
title: "<a href='http://j.map.baidu.com/K3IXc' style='color:#CC5522;font-weight:800'>百度大厦<a>" // 信息窗口标题
}
var infoWindow = new BMap.InfoWindow("地址:北京市海淀区上地十街10号<br/>电话:(010)59928888", opts); // 创建信息窗口对象
map.openInfoWindow(infoWindow, map.getCenter()); // 打开信息窗口
}
//banner slide
//$(function(){
var time = 2000,
slideEl = $("#slideContent"),
indexAt = 1,
slideHode = false;
var indexBannerSrc = ["sdk-v.jpg", "house.jpg", "developer.jpg", "iOS-SDK-v2.1.0.jpg", "match.jpg"];
//var indexBannerSrc=["sdk-v.jpg","addURI.png","ad-geosdk2.jpg","ad-cloud.jpg","ad-card2.jpg"];
var changeLength = indexBannerSrc.length;
var changeBanner = function(index) {
if (!slideHode) {
$('img:not(.current)', slideEl).css({
'opacity': '0.1',
'filter': 'alpha(opacity=30)'
});
var $curImg = $('img.current', slideEl);
$curImg.fadeTo("normal", "0.3", function() {
$curImg.removeClass('current'); //current class make img visible
nowImg = slideEl.find("img").eq(index);
nowImg.addClass('current').fadeTo("normal", "1");
bg = 'static/img/' + nowImg.attr('id') + '.png';
$("#content .hdp").css({
'background-image': 'url(' + bg + ')',
'background-repeat': 'repeat-x'
});
slideEl.children(".focus-content").find("a").removeClass("current").eq(changeLength - index - 1).addClass("current");
indexAt = index + 1;
indexAt %= changeLength;
});
}
};
var slide = function() {
setTimeout(function() {
changeBanner(indexAt);
setTimeout(slide, time);
}, time);
};
//pre load other slide image
/*
for(var i=1;i<changeLength;i++){
var img = new Image();
img.src = "http://developer.baidu.com/map/static/img/"+indexBannerSrc[i];
}
*/
slide();
$(".focus-content").bind({
"click": function(e) {
var $target = $(e.target);
if ($target.context.tagName.toLowerCase() == "a") {
e.stopPropagation();
var idx = parseInt($target.attr("idx"));
changeBanner(idx);
}
return false;
},
"mouseenter": function(e) {
slideHode = false;
},
"mouseleave": function(e) {
slideHode = true;
}
});
$(".img-content").bind({
"mouseenter": function() {
slideHode = true;
},
"mouseleave": function() {
slideHode = false;
}
});
//})();
/***********新闻首页图片轮播******************/
var NewsPlayer = function() {
/****
* container: 图片容器,
* numbers: 按钮容器,
* currentTitle: 信息栏
*/
/**
* 循环次数
*/
var LOOP_NUMBER = 20;
/**
* 遍历函数
*/
function each(arr, callback, context) {
if (arr.forEach) {
arr.forEach(callback, context);
} else {
for (var i = 0, len = arr.length; i < len; i++) {
callback.call(context, arr[i], i, arr);
}
}
}
/**
* 淡入效果
*/
function fadeIn(elem) {
setOpacity(elem, 0);
| i) {
var pos = i * 5;
setTimeout(function() {
setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 淡出效果
*/
function fadeOut(elem) {
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function(i) {
var pos = 100 - i * 5;
setTimeout(function() {
setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 设置透明度
*/
function setOpacity(elem, level) {
if (elem.filters) { //IE
elem.style.filter = "alpha(opacity=" + level + ")";
} else {
elem.style.opacity = level / 100;
}
}
/**
* 设置元素zIndex属性
*/
function setZIndex(elem, zIndex) {
elem.style.zIndex = zIndex;
}
return {
play: function(container, numbers, titles) {
var me = this,
targetIdx = 0, //目标图片序号
curIdx = 0, //现在图片序号
picCount = $(numbers).children.length;
//初始化信息
me.titles = $(titles);
me.arrImgs = $(container).children('a');
me.arrNums = $(numbers).children();
me.arrNums[0].className = "on";
//第一张图片设置zIndex为10,其它图片透明度设置为透明(0代表透明,1代表不透明)
each(me.arrImgs, function(elem, idx, arr) {
if (idx == 0) {
elem.style.zIndex = 10;
} else {
setOpacity(elem, 0);
}
}, me);
//为所有的li添加点击事件
each(me.arrNums, function(elem, idx, arr) {
elem.onclick = function() {
me.fade(idx, curIdx);
curIdx = idx;
targetIdx = idx;
}
}, me);
//自动轮播
var handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
$(numbers).bind({
mouseover: function() {
clearInterval(handler)
},
mouseout: function() {
handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
}
});
},
/**
* 执行淡入淡出动作
*/
fade: function(idx, lastIdx) {
var me = this;
if (idx == lastIdx) {
return;
}
var lastImg = me.arrImgs[lastIdx], //上一个img
img = me.arrImgs[idx]; //当前img
setZIndex(lastImg, 1);
setZIndex(img, 10);
fadeOut(lastImg);
fadeIn(img);
each(me.arrNums, function(elem, elemIdx, arr) {
if (elemIdx != idx) {
me.arrNums[elemIdx].className = '';
} else {
me.arrNums[elemIdx].className = 'on';
}
}, me);
each(me.titles, function |
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function( | conditional_block |
mapi.js | (var i = 0; i < document.links.length; i++) document.links[i].onfocus = function() {
this.blur()
}
var script = document.createElement("script");
script.setAttribute("src", "static/guide.js?v=1.0");
script.setAttribute("type", "text/javascript");
script.setAttribute("charset", "utf-8");
setTimeout(function() {
document.getElementsByTagName('head')[0].appendChild(script);
}, 1);
}
//button display and hide
function Contextdisplay(whichID) {
document.getElementById(whichID).style.display = (document.getElementById(whichID).style.display != 'block' ? 'block' : 'none');
}
//change icon on class reference page----by xieyangxin
function changeIcon(whichID, target) {
if (document.getElementById(whichID).style.display != 'block') {
target.className += ' ' + whichID[0] + 'ccsubmenu';
} else {
target.className = whichID[0] + 'csubmenu';
}
}
//button display hidden
function displayHidden(whichID) {
document.getElementById(whichID).style.display = 'none';
}
//button display block
function displayBlock(whichID | ument.getElementById(whichID).style.display = 'block';
}
//修改, by wjp
if (typeof BMap != 'undefined') {
var map = new BMap.Map("mmap");
var point = new BMap.Point(116.307852, 40.057031);
map.centerAndZoom(point, 15);
var opts = {
width: 250, // 信息窗口宽度
height: 80, // 信息窗口高度
title: "<a href='http://j.map.baidu.com/K3IXc' style='color:#CC5522;font-weight:800'>百度大厦<a>" // 信息窗口标题
}
var infoWindow = new BMap.InfoWindow("地址:北京市海淀区上地十街10号<br/>电话:(010)59928888", opts); // 创建信息窗口对象
map.openInfoWindow(infoWindow, map.getCenter()); // 打开信息窗口
}
//banner slide
//$(function(){
var time = 2000,
slideEl = $("#slideContent"),
indexAt = 1,
slideHode = false;
var indexBannerSrc = ["sdk-v.jpg", "house.jpg", "developer.jpg", "iOS-SDK-v2.1.0.jpg", "match.jpg"];
//var indexBannerSrc=["sdk-v.jpg","addURI.png","ad-geosdk2.jpg","ad-cloud.jpg","ad-card2.jpg"];
var changeLength = indexBannerSrc.length;
var changeBanner = function(index) {
if (!slideHode) {
$('img:not(.current)', slideEl).css({
'opacity': '0.1',
'filter': 'alpha(opacity=30)'
});
var $curImg = $('img.current', slideEl);
$curImg.fadeTo("normal", "0.3", function() {
$curImg.removeClass('current'); //current class make img visible
nowImg = slideEl.find("img").eq(index);
nowImg.addClass('current').fadeTo("normal", "1");
bg = 'static/img/' + nowImg.attr('id') + '.png';
$("#content .hdp").css({
'background-image': 'url(' + bg + ')',
'background-repeat': 'repeat-x'
});
slideEl.children(".focus-content").find("a").removeClass("current").eq(changeLength - index - 1).addClass("current");
indexAt = index + 1;
indexAt %= changeLength;
});
}
};
var slide = function() {
setTimeout(function() {
changeBanner(indexAt);
setTimeout(slide, time);
}, time);
};
//pre load other slide image
/*
for(var i=1;i<changeLength;i++){
var img = new Image();
img.src = "http://developer.baidu.com/map/static/img/"+indexBannerSrc[i];
}
*/
slide();
$(".focus-content").bind({
"click": function(e) {
var $target = $(e.target);
if ($target.context.tagName.toLowerCase() == "a") {
e.stopPropagation();
var idx = parseInt($target.attr("idx"));
changeBanner(idx);
}
return false;
},
"mouseenter": function(e) {
slideHode = false;
},
"mouseleave": function(e) {
slideHode = true;
}
});
$(".img-content").bind({
"mouseenter": function() {
slideHode = true;
},
"mouseleave": function() {
slideHode = false;
}
});
//})();
/***********新闻首页图片轮播******************/
var NewsPlayer = function() {
/****
* container: 图片容器,
* numbers: 按钮容器,
* currentTitle: 信息栏
*/
/**
* 循环次数
*/
var LOOP_NUMBER = 20;
/**
* 遍历函数
*/
function each(arr, callback, context) {
if (arr.forEach) {
arr.forEach(callback, context);
} else {
for (var i = 0, len = arr.length; i < len; i++) {
callback.call(context, arr[i], i, arr);
}
}
}
/**
* 淡入效果
*/
function fadeIn(elem) {
setOpacity(elem, 0);
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function(i) {
var pos = i * 5;
setTimeout(function() {
setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 淡出效果
*/
function fadeOut(elem) {
for (var i = 0; i <= LOOP_NUMBER; i++) {
(function(i) {
var pos = 100 - i * 5;
setTimeout(function() {
setOpacity(elem, pos);
}, i * 25);
})(i);
}
}
/**
* 设置透明度
*/
function setOpacity(elem, level) {
if (elem.filters) { //IE
elem.style.filter = "alpha(opacity=" + level + ")";
} else {
elem.style.opacity = level / 100;
}
}
/**
* 设置元素zIndex属性
*/
function setZIndex(elem, zIndex) {
elem.style.zIndex = zIndex;
}
return {
play: function(container, numbers, titles) {
var me = this,
targetIdx = 0, //目标图片序号
curIdx = 0, //现在图片序号
picCount = $(numbers).children.length;
//初始化信息
me.titles = $(titles);
me.arrImgs = $(container).children('a');
me.arrNums = $(numbers).children();
me.arrNums[0].className = "on";
//第一张图片设置zIndex为10,其它图片透明度设置为透明(0代表透明,1代表不透明)
each(me.arrImgs, function(elem, idx, arr) {
if (idx == 0) {
elem.style.zIndex = 10;
} else {
setOpacity(elem, 0);
}
}, me);
//为所有的li添加点击事件
each(me.arrNums, function(elem, idx, arr) {
elem.onclick = function() {
me.fade(idx, curIdx);
curIdx = idx;
targetIdx = idx;
}
}, me);
//自动轮播
var handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
$(numbers).bind({
mouseover: function() {
clearInterval(handler)
},
mouseout: function() {
handler = setInterval(function() {
if (targetIdx < me.arrNums.length - 1) {
targetIdx++;
} else {
targetIdx = 0;
}
me.fade(targetIdx, curIdx);
curIdx = targetIdx;
}, 2000);
}
});
},
/**
* 执行淡入淡出动作
*/
fade: function(idx, lastIdx) {
var me = this;
if (idx == lastIdx) {
return;
}
var lastImg = me.arrImgs[lastIdx], //上一个img
img = me.arrImgs[idx]; //当前img
setZIndex(lastImg, 1);
setZIndex(img, 10);
fadeOut(lastImg);
fadeIn(img);
each(me.arrNums, function(elem, elemIdx, arr) {
if (elemIdx != idx) {
me.arrNums[elemIdx].className = '';
} else {
me.arrNums[elemIdx].className = 'on';
}
}, me);
each(me.titles, function(elem, | ) {
doc | identifier_name |
episode.rs | UL` byte. This lets us do
/// a range query on the set when given the episode ID to find the TV show ID.
const TVSHOWS: &str = "episode.tvshows.fst";
/// An episode index that supports retrieving season and episode information
/// quickly.
#[derive(Debug)]
pub struct Index {
seasons: fst::Set,
tvshows: fst::Set,
}
impl Index {
/// Open an episode index from the given index directory.
pub fn open<P: AsRef<Path>>(index_dir: P) -> Result<Index> {
let index_dir = index_dir.as_ref();
// We claim it is safe to open the following memory map because we
// don't mutate them and no other process (should) either.
let seasons = unsafe { fst_set_file(index_dir.join(SEASONS))? };
let tvshows = unsafe { fst_set_file(index_dir.join(TVSHOWS))? };
Ok(Index {
seasons: seasons,
tvshows: tvshows,
})
}
/// Create an episode index from the given IMDb data directory and write
/// it to the given index directory. If an episode index already exists,
/// then it is overwritten.
pub fn create<P1: AsRef<Path>, P2: AsRef<Path>>(
data_dir: P1,
index_dir: P2,
) -> Result<Index> {
let data_dir = data_dir.as_ref();
let index_dir = index_dir.as_ref();
let mut buf = vec![];
let mut seasons = fst_set_builder_file(index_dir.join(SEASONS))?;
let mut tvshows = fst_set_builder_file(index_dir.join(TVSHOWS))?;
let mut episodes = read_sorted_episodes(data_dir)?;
for episode in &episodes {
buf.clear();
write_episode(episode, &mut buf)?;
seasons.insert(&buf).map_err(Error::fst)?;
}
episodes.sort_by(|e1, e2| {
(&e1.id, &e1.tvshow_id).cmp(&(&e2.id, &e2.tvshow_id))
});
for episode in &episodes {
buf.clear();
write_tvshow(&episode, &mut buf)?;
tvshows.insert(&buf).map_err(Error::fst)?;
}
seasons.finish().map_err(Error::fst)?;
tvshows.finish().map_err(Error::fst)?;
log::info!("{} episodes indexed", episodes.len());
Index::open(index_dir)
}
/// Return a sequence of episodes for the given TV show IMDb identifier.
///
/// The episodes are sorted in order of season number and episode number.
/// Episodes without season/episode numbers are sorted after episodes with
/// numbers.
pub fn seasons(&self, tvshow_id: &[u8]) -> Result<Vec<Episode>> {
let mut upper = tvshow_id.to_vec();
upper.push(0xFF);
let mut episodes = vec![];
let mut stream = self.seasons.range()
.ge(tvshow_id)
.le(upper)
.into_stream();
while let Some(episode_bytes) = stream.next() {
episodes.push(read_episode(episode_bytes)?);
}
Ok(episodes)
}
/// Return a sequence of episodes for the given TV show IMDb identifier and
/// season number.
///
/// The episodes are sorted in order of episode number. Episodes without
/// episode numbers are sorted after episodes with numbers.
pub fn episodes(
&self,
tvshow_id: &[u8],
season: u32,
) -> Result<Vec<Episode>> {
let mut lower = tvshow_id.to_vec();
lower.push(0x00);
lower.extend_from_slice(&u32_to_bytes(season));
lower.extend_from_slice(&u32_to_bytes(0));
let mut upper = tvshow_id.to_vec();
upper.push(0x00);
upper.extend_from_slice(&u32_to_bytes(season));
upper.extend_from_slice(&u32_to_bytes(u32::MAX));
let mut episodes = vec![];
let mut stream = self.seasons.range()
.ge(lower)
.le(upper)
.into_stream();
while let Some(episode_bytes) = stream.next() {
episodes.push(read_episode(episode_bytes)?);
}
Ok(episodes)
}
/// Return the episode information for the given episode IMDb identifier.
///
/// If no episode information for the given ID exists, then `None` is
/// returned.
pub fn episode(&self, episode_id: &[u8]) -> Result<Option<Episode>> {
let mut upper = episode_id.to_vec();
upper.push(0xFF);
let mut stream = self.tvshows.range()
.ge(episode_id)
.le(upper)
.into_stream();
while let Some(tvshow_bytes) = stream.next() {
return Ok(Some(read_tvshow(tvshow_bytes)?));
}
Ok(None)
}
}
fn read_sorted_episodes(data_dir: &Path) -> Result<Vec<Episode>> {
// We claim it is safe to open the following memory map because we don't
// mutate them and no other process (should) either.
let mut rdr = csv_file(data_dir.join(IMDB_EPISODE))?;
let mut records = vec![];
for result in rdr.deserialize() {
let record: Episode = result.map_err(Error::csv)?;
records.push(record);
}
records.sort_by(cmp_episode);
Ok(records)
}
fn cmp_episode(ep1: &Episode, ep2: &Episode) -> cmp::Ordering {
let k1 = (
&ep1.tvshow_id,
ep1.season.unwrap_or(u32::MAX),
ep1.episode.unwrap_or(u32::MAX),
&ep1.id,
);
let k2 = (
&ep2.tvshow_id,
ep2.season.unwrap_or(u32::MAX),
ep2.episode.unwrap_or(u32::MAX),
&ep2.id,
);
k1.cmp(&k2)
}
fn read_episode(bytes: &[u8]) -> Result<Episode> {
let nul = match bytes.iter().position(|&b| b == 0) {
Some(nul) => nul,
None => bug!("could not find nul byte"),
};
let tvshow_id = match String::from_utf8(bytes[..nul].to_vec()) {
Err(err) => bug!("tvshow_id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
let mut i = nul + 1;
let season = from_optional_u32(&bytes[i..]);
i += 4;
let epnum = from_optional_u32(&bytes[i..]);
i += 4;
let id = match String::from_utf8(bytes[i..].to_vec()) {
Err(err) => bug!("episode id invalid UTF-8: {}", err),
Ok(id) => id,
};
Ok(Episode {
id: id,
tvshow_id: tvshow_id,
season: season,
episode: epnum,
})
}
fn write_episode(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> {
if ep.tvshow_id.as_bytes().iter().any(|&b| b == 0) {
bug!("unsupported tvshow id (with NUL byte) for {:?}", ep);
}
buf.extend_from_slice(ep.tvshow_id.as_bytes());
buf.push(0x00);
buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?));
buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?));
buf.extend_from_slice(ep.id.as_bytes());
Ok(())
}
fn read_tvshow(bytes: &[u8]) -> Result<Episode> {
let nul = match bytes.iter().position(|&b| b == 0) {
Some(nul) => nul, | Err(err) => bug!("episode id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
let mut i = nul + 1;
let season = from_optional_u32(&bytes[i..]);
i += 4;
let epnum = from_optional_u32(&bytes[i..]);
i += 4;
let tvshow_id = match String::from_utf8(bytes[i..].to_vec()) {
Err(err) => bug!("tvshow_id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
Ok(Episode {
id: id,
tvshow_id: tvshow_id,
season: season,
episode: epnum,
})
}
fn write_tvshow(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> {
if ep.id.as_bytes().iter().any(|&b| b == 0) {
bug!("unsupported episode id (with NUL byte) for {:?}", ep);
}
buf.extend_from_slice(ep.id.as_bytes());
buf.push(0x00);
buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?));
buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?));
buf.extend_from_slice(ep.tvshow_id.as_bytes());
| None => bug!("could not find nul byte"),
};
let id = match String::from_utf8(bytes[..nul].to_vec()) { | random_line_split |
episode.rs | let Some(episode_bytes) = stream.next() {
episodes.push(read_episode(episode_bytes)?);
}
Ok(episodes)
}
/// Return a sequence of episodes for the given TV show IMDb identifier and
/// season number.
///
/// The episodes are sorted in order of episode number. Episodes without
/// episode numbers are sorted after episodes with numbers.
pub fn episodes(
&self,
tvshow_id: &[u8],
season: u32,
) -> Result<Vec<Episode>> {
let mut lower = tvshow_id.to_vec();
lower.push(0x00);
lower.extend_from_slice(&u32_to_bytes(season));
lower.extend_from_slice(&u32_to_bytes(0));
let mut upper = tvshow_id.to_vec();
upper.push(0x00);
upper.extend_from_slice(&u32_to_bytes(season));
upper.extend_from_slice(&u32_to_bytes(u32::MAX));
let mut episodes = vec![];
let mut stream = self.seasons.range()
.ge(lower)
.le(upper)
.into_stream();
while let Some(episode_bytes) = stream.next() {
episodes.push(read_episode(episode_bytes)?);
}
Ok(episodes)
}
/// Return the episode information for the given episode IMDb identifier.
///
/// If no episode information for the given ID exists, then `None` is
/// returned.
pub fn episode(&self, episode_id: &[u8]) -> Result<Option<Episode>> {
let mut upper = episode_id.to_vec();
upper.push(0xFF);
let mut stream = self.tvshows.range()
.ge(episode_id)
.le(upper)
.into_stream();
while let Some(tvshow_bytes) = stream.next() {
return Ok(Some(read_tvshow(tvshow_bytes)?));
}
Ok(None)
}
}
fn read_sorted_episodes(data_dir: &Path) -> Result<Vec<Episode>> {
// We claim it is safe to open the following memory map because we don't
// mutate them and no other process (should) either.
let mut rdr = csv_file(data_dir.join(IMDB_EPISODE))?;
let mut records = vec![];
for result in rdr.deserialize() {
let record: Episode = result.map_err(Error::csv)?;
records.push(record);
}
records.sort_by(cmp_episode);
Ok(records)
}
fn cmp_episode(ep1: &Episode, ep2: &Episode) -> cmp::Ordering {
let k1 = (
&ep1.tvshow_id,
ep1.season.unwrap_or(u32::MAX),
ep1.episode.unwrap_or(u32::MAX),
&ep1.id,
);
let k2 = (
&ep2.tvshow_id,
ep2.season.unwrap_or(u32::MAX),
ep2.episode.unwrap_or(u32::MAX),
&ep2.id,
);
k1.cmp(&k2)
}
fn read_episode(bytes: &[u8]) -> Result<Episode> {
let nul = match bytes.iter().position(|&b| b == 0) {
Some(nul) => nul,
None => bug!("could not find nul byte"),
};
let tvshow_id = match String::from_utf8(bytes[..nul].to_vec()) {
Err(err) => bug!("tvshow_id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
let mut i = nul + 1;
let season = from_optional_u32(&bytes[i..]);
i += 4;
let epnum = from_optional_u32(&bytes[i..]);
i += 4;
let id = match String::from_utf8(bytes[i..].to_vec()) {
Err(err) => bug!("episode id invalid UTF-8: {}", err),
Ok(id) => id,
};
Ok(Episode {
id: id,
tvshow_id: tvshow_id,
season: season,
episode: epnum,
})
}
fn write_episode(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> {
if ep.tvshow_id.as_bytes().iter().any(|&b| b == 0) {
bug!("unsupported tvshow id (with NUL byte) for {:?}", ep);
}
buf.extend_from_slice(ep.tvshow_id.as_bytes());
buf.push(0x00);
buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?));
buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?));
buf.extend_from_slice(ep.id.as_bytes());
Ok(())
}
fn read_tvshow(bytes: &[u8]) -> Result<Episode> {
let nul = match bytes.iter().position(|&b| b == 0) {
Some(nul) => nul,
None => bug!("could not find nul byte"),
};
let id = match String::from_utf8(bytes[..nul].to_vec()) {
Err(err) => bug!("episode id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
let mut i = nul + 1;
let season = from_optional_u32(&bytes[i..]);
i += 4;
let epnum = from_optional_u32(&bytes[i..]);
i += 4;
let tvshow_id = match String::from_utf8(bytes[i..].to_vec()) {
Err(err) => bug!("tvshow_id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
Ok(Episode {
id: id,
tvshow_id: tvshow_id,
season: season,
episode: epnum,
})
}
fn write_tvshow(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> {
if ep.id.as_bytes().iter().any(|&b| b == 0) {
bug!("unsupported episode id (with NUL byte) for {:?}", ep);
}
buf.extend_from_slice(ep.id.as_bytes());
buf.push(0x00);
buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?));
buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?));
buf.extend_from_slice(ep.tvshow_id.as_bytes());
Ok(())
}
fn from_optional_u32(bytes: &[u8]) -> Option<u32> {
match BE::read_u32(bytes) {
u32::MAX => None,
x => Some(x),
}
}
fn to_optional_season(ep: &Episode) -> Result<u32> {
match ep.season {
None => Ok(u32::MAX),
Some(x) => {
if x == u32::MAX {
bug!("unsupported season number {} for {:?}", x, ep);
}
Ok(x)
}
}
}
fn to_optional_epnum(ep: &Episode) -> Result<u32> {
match ep.episode {
None => Ok(u32::MAX),
Some(x) => {
if x == u32::MAX {
bug!("unsupported episode number {} for {:?}", x, ep);
}
Ok(x)
}
}
}
fn u32_to_bytes(n: u32) -> [u8; 4] {
let mut buf = [0u8; 4];
BE::write_u32(&mut buf, n);
buf
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use crate::index::tests::TestContext;
use super::Index;
#[test]
fn basics() {
let ctx = TestContext::new("small");
let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap();
let eps = idx.seasons(b"tt0096697").unwrap();
let mut counts: HashMap<u32, u32> = HashMap::new();
for ep in eps {
*counts.entry(ep.season.unwrap()).or_insert(0) += 1;
}
assert_eq!(counts.len(), 3);
assert_eq!(counts[&1], 13);
assert_eq!(counts[&2], 22);
assert_eq!(counts[&3], 24);
}
#[test]
fn by_season() {
let ctx = TestContext::new("small");
let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap();
let eps = idx.episodes(b"tt0096697", 2).unwrap();
let mut counts: HashMap<u32, u32> = HashMap::new();
for ep in eps {
*counts.entry(ep.season.unwrap()).or_insert(0) += 1;
}
println!("{:?}", counts);
assert_eq!(counts.len(), 1);
assert_eq!(counts[&2], 22);
}
#[test]
fn tvshow() | {
let ctx = TestContext::new("small");
let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap();
let ep = idx.episode(b"tt0701063").unwrap().unwrap();
assert_eq!(ep.tvshow_id, "tt0096697");
} | identifier_body | |
episode.rs | We claim it is safe to open the following memory map because we
// don't mutate them and no other process (should) either.
let seasons = unsafe { fst_set_file(index_dir.join(SEASONS))? };
let tvshows = unsafe { fst_set_file(index_dir.join(TVSHOWS))? };
Ok(Index {
seasons: seasons,
tvshows: tvshows,
})
}
/// Create an episode index from the given IMDb data directory and write
/// it to the given index directory. If an episode index already exists,
/// then it is overwritten.
pub fn create<P1: AsRef<Path>, P2: AsRef<Path>>(
data_dir: P1,
index_dir: P2,
) -> Result<Index> {
let data_dir = data_dir.as_ref();
let index_dir = index_dir.as_ref();
let mut buf = vec![];
let mut seasons = fst_set_builder_file(index_dir.join(SEASONS))?;
let mut tvshows = fst_set_builder_file(index_dir.join(TVSHOWS))?;
let mut episodes = read_sorted_episodes(data_dir)?;
for episode in &episodes {
buf.clear();
write_episode(episode, &mut buf)?;
seasons.insert(&buf).map_err(Error::fst)?;
}
episodes.sort_by(|e1, e2| {
(&e1.id, &e1.tvshow_id).cmp(&(&e2.id, &e2.tvshow_id))
});
for episode in &episodes {
buf.clear();
write_tvshow(&episode, &mut buf)?;
tvshows.insert(&buf).map_err(Error::fst)?;
}
seasons.finish().map_err(Error::fst)?;
tvshows.finish().map_err(Error::fst)?;
log::info!("{} episodes indexed", episodes.len());
Index::open(index_dir)
}
/// Return a sequence of episodes for the given TV show IMDb identifier.
///
/// The episodes are sorted in order of season number and episode number.
/// Episodes without season/episode numbers are sorted after episodes with
/// numbers.
pub fn seasons(&self, tvshow_id: &[u8]) -> Result<Vec<Episode>> {
let mut upper = tvshow_id.to_vec();
upper.push(0xFF);
let mut episodes = vec![];
let mut stream = self.seasons.range()
.ge(tvshow_id)
.le(upper)
.into_stream();
while let Some(episode_bytes) = stream.next() {
episodes.push(read_episode(episode_bytes)?);
}
Ok(episodes)
}
/// Return a sequence of episodes for the given TV show IMDb identifier and
/// season number.
///
/// The episodes are sorted in order of episode number. Episodes without
/// episode numbers are sorted after episodes with numbers.
pub fn episodes(
&self,
tvshow_id: &[u8],
season: u32,
) -> Result<Vec<Episode>> {
let mut lower = tvshow_id.to_vec();
lower.push(0x00);
lower.extend_from_slice(&u32_to_bytes(season));
lower.extend_from_slice(&u32_to_bytes(0));
let mut upper = tvshow_id.to_vec();
upper.push(0x00);
upper.extend_from_slice(&u32_to_bytes(season));
upper.extend_from_slice(&u32_to_bytes(u32::MAX));
let mut episodes = vec![];
let mut stream = self.seasons.range()
.ge(lower)
.le(upper)
.into_stream();
while let Some(episode_bytes) = stream.next() {
episodes.push(read_episode(episode_bytes)?);
}
Ok(episodes)
}
/// Return the episode information for the given episode IMDb identifier.
///
/// If no episode information for the given ID exists, then `None` is
/// returned.
pub fn episode(&self, episode_id: &[u8]) -> Result<Option<Episode>> {
let mut upper = episode_id.to_vec();
upper.push(0xFF);
let mut stream = self.tvshows.range()
.ge(episode_id)
.le(upper)
.into_stream();
while let Some(tvshow_bytes) = stream.next() {
return Ok(Some(read_tvshow(tvshow_bytes)?));
}
Ok(None)
}
}
fn read_sorted_episodes(data_dir: &Path) -> Result<Vec<Episode>> {
// We claim it is safe to open the following memory map because we don't
// mutate them and no other process (should) either.
let mut rdr = csv_file(data_dir.join(IMDB_EPISODE))?;
let mut records = vec![];
for result in rdr.deserialize() {
let record: Episode = result.map_err(Error::csv)?;
records.push(record);
}
records.sort_by(cmp_episode);
Ok(records)
}
fn cmp_episode(ep1: &Episode, ep2: &Episode) -> cmp::Ordering {
let k1 = (
&ep1.tvshow_id,
ep1.season.unwrap_or(u32::MAX),
ep1.episode.unwrap_or(u32::MAX),
&ep1.id,
);
let k2 = (
&ep2.tvshow_id,
ep2.season.unwrap_or(u32::MAX),
ep2.episode.unwrap_or(u32::MAX),
&ep2.id,
);
k1.cmp(&k2)
}
fn read_episode(bytes: &[u8]) -> Result<Episode> {
let nul = match bytes.iter().position(|&b| b == 0) {
Some(nul) => nul,
None => bug!("could not find nul byte"),
};
let tvshow_id = match String::from_utf8(bytes[..nul].to_vec()) {
Err(err) => bug!("tvshow_id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
let mut i = nul + 1;
let season = from_optional_u32(&bytes[i..]);
i += 4;
let epnum = from_optional_u32(&bytes[i..]);
i += 4;
let id = match String::from_utf8(bytes[i..].to_vec()) {
Err(err) => bug!("episode id invalid UTF-8: {}", err),
Ok(id) => id,
};
Ok(Episode {
id: id,
tvshow_id: tvshow_id,
season: season,
episode: epnum,
})
}
fn write_episode(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> {
if ep.tvshow_id.as_bytes().iter().any(|&b| b == 0) {
bug!("unsupported tvshow id (with NUL byte) for {:?}", ep);
}
buf.extend_from_slice(ep.tvshow_id.as_bytes());
buf.push(0x00);
buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?));
buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?));
buf.extend_from_slice(ep.id.as_bytes());
Ok(())
}
fn read_tvshow(bytes: &[u8]) -> Result<Episode> {
let nul = match bytes.iter().position(|&b| b == 0) {
Some(nul) => nul,
None => bug!("could not find nul byte"),
};
let id = match String::from_utf8(bytes[..nul].to_vec()) {
Err(err) => bug!("episode id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
let mut i = nul + 1;
let season = from_optional_u32(&bytes[i..]);
i += 4;
let epnum = from_optional_u32(&bytes[i..]);
i += 4;
let tvshow_id = match String::from_utf8(bytes[i..].to_vec()) {
Err(err) => bug!("tvshow_id invalid UTF-8: {}", err),
Ok(tvshow_id) => tvshow_id,
};
Ok(Episode {
id: id,
tvshow_id: tvshow_id,
season: season,
episode: epnum,
})
}
fn write_tvshow(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> {
if ep.id.as_bytes().iter().any(|&b| b == 0) {
bug!("unsupported episode id (with NUL byte) for {:?}", ep);
}
buf.extend_from_slice(ep.id.as_bytes());
buf.push(0x00);
buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?));
buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?));
buf.extend_from_slice(ep.tvshow_id.as_bytes());
Ok(())
}
fn from_optional_u32(bytes: &[u8]) -> Option<u32> {
match BE::read_u32(bytes) {
u32::MAX => None,
x => Some(x),
}
}
fn to_optional_season(ep: &Episode) -> Result<u32> {
match ep.season {
None => Ok(u32::MAX),
Some(x) => {
if x == u32::MAX {
bug!("unsupported season number {} for {:?}", x, ep);
}
Ok(x)
}
}
}
fn | to_optional_epnum | identifier_name | |
import_export_classes.py | _on_fail=False, verbose=True):
self.processed = 0
self.failed = 0
self.failed_ids = []
self.missing_keys = Counter()
self.raise_on_fail = raise_on_fail
self.verbose = verbose
from .basic_utils import dotkeys
def _detect_zip(self, path):
filename = os.path.basename(path)
for zip_ext in ["gz", "bz2"]:
if filename[-len(zip_ext) :] == zip_ext:
return zip_ext
return False
def open_file(self, filename, mode="r", force=False, compression="autodetect"):
if mode not in ["w", "wb", "wt", "a", "ab", "at"] and not os.path.exists(
filename
):
logger.warning("File not found at {filename}".format(filename=filename))
if compression == "autodetect":
compression = self._detect_zip(filename)
if not compression:
return open(filename, mode=mode)
else:
filename += "." + compression
if compression == "gz":
return gzip.open(filename, mode=mode)
if compression == "bz2":
return bz2.open(filename, mode=mode)
return fileobj
def open_dir(
self, path, mode="r", match=".*", force=False, compression="autodetect"
):
"""Generator that yields all files in given directory
Parameters
----
path : string
A path in which to look for files
mode : string (default='r')
The mode to open files, such as `r` for reading UTF-8, `w` to write
match : string (default='.*')
a regular expression to match to filenames
force : bool (default=False)
Whether to return files for writing if they already exist
compression : string (default="autodetect")
Type of compression to use
"""
matcher = re.compile(match)
for filename in os.listdir(path):
# ignore non-matching filenames
if not matcher.search(filename):
continue
fileobj = self.open_file(
os.path.join(path, filename),
mode=mode,
force=force,
compression=compression,
)
yield fileobj
def _process_by_batch(self, iterable, batchsize=100):
batchnum = 0
batch = []
for i in iterable:
batch.append(i)
if len(batch) == batchsize:
yield batch
batch = []
if batch:
yield batch
class Importer(BaseImportExport):
"""Base class for data importers"""
functiontype = "importer"
def _ingest(self, iterable, doctype):
"""Ingest document (batch)
Parameters
----
iterable : iterable
A list, generator or other iterable that yields documents or
batches of documents to be stored in elasticsearch. This method
should be called from the `load` method implemented in a specific
importer
doctype : string
A string to set the doctype of the added documents
"""
self.doctype = doctype
# handle batches
if type(iterable) == list:
i = [self._add_metadata(ii.get("_source", ii)) for ii in iterable]
# handle individual docs
else:
i = self._add_metadata(iterable.get("_source", iterable))
# Save document(s) using document base-class method
self._save_document(i)
def _apply_mapping(self, document, mapping):
"""Apply a given mapping to a document
Parameters
---
document : dict
A document as loaded by the load function
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
If mapping is empty, the file contents are assumed to be ingested
as is.
Returns
---
dict
a new document ready for elasticsearch, containing all keys from
the mapping found in the document
"""
if not mapping:
return document
new_document = {v: document[k] for k, v in mapping.items() if k in document}
# Keep track of missing keys
self.missing_keys.update([k for k in mapping if k not in document])
# Document errors for missing documents
if not new_document:
self.failed += 1
self.failed_ids.append(
document.get("id", document.get("ID", document.get("_id", None)))
)
return new_document
def load(self):
""" To be implemented in subclasses
normally called through the 'run' method. Please add to your documentation:
Parameters
---
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
<fields needed for the load function>
Yields
----
dict
raw document to be procesed and indexed
"""
raise NotImplementedError
yield document
def run(self, mapping={}, *args, **kwargs):
"""uses the documents from the load method in batches """
self.processed = 0
for batch in self._process_by_batch(self.load(*args, **kwargs)):
batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))
for doc in batch:
self._ingest(iterable=doc, doctype=doc["doctype"])
self.processed += 1
logger.info("Added {} documents to the database.".format(self.processed))
class Exporter(BaseImportExport):
"""Base class for exporting"""
# set to_file to `False` for subclasses that do not export to files
# for instance when writing to external databases
to_file = True
batchsize = 100
def __init__(self, *args, **kwargs):
BaseImportExport.__init__(self, *args, **kwargs)
self.fileobj = None
self.extension = ""
def save(self, batch_of_documents, destination="exports", *args, **kwargs):
"""To be implemented in subclass
This method should process batches of documents by exporting them in
whatever format implemented in the exporter.
Parameters
----
batch_of_documents : list of dicts
a list containing the dict of each document as represented in
elasticsearch
...<arguments specific to exporter>
"""
raise NotImplementedError
def _flatten_doc(self, document, include_meta=False, include_html=False):
"""Utility to convert elasticsearch documents to a flat representation
Parameters
---
document : dict
A dictionary which may include nested fields
Returns
----
dict
A dictionary where values are all strings, Nested keys are
merged by '.'
"""
flat_dict = {}
for k, v in document.items():
if k == "META" and not include_meta:
continue
if k == "htmlsource" and not include_html:
continue
if type(v) == str:
flat_dict[k] = v
elif type(v) == list:
flat_dict[k] = str(v)
elif type(v) == dict:
for kk, vv in self._flatten_doc(v).items():
flat_dict["{k}.{kk}".format(k=k, kk=kk)] = vv
else:
try:
flat_dict[k] = str(v)
except:
logger.warning("Unable to ready field {k} for writing".format(k=k))
return flat_dict
def _retrieve(self, query):
for doc in document_generator(query):
self.processed += 1
yield doc
def _makefile(self, filename, mode="wt", force=False, compression=False):
filepath = os.path.dirname(filename)
os.makedirs(filepath, exist_ok=True)
# handle cases when a path instead of a filename is provided
if os.path.isdir(filename):
now = time.localtime()
newname = "INCA_export_{now.tm_year}_{now.tm_mon}_{now.tm_mday}_{now.tm_hour}_{now.tm_min}_{now.tm_sec}.{extension}".format(
now=now, extension=self.extension
)
filename = os.path.join(filename, newname)
if self.extension not in filename:
filename = "{filename}.{extension}".format(
filename=filename, extension=self.extension
)
if filename in os.listdir(filepath) and not force:
logger.warning(
"file called {filename} already exists, either provide new filename"
"or set `overwrite=True`".format(filename=filename)
)
return False
else:
self.fileobj = self.open_file(
filename, mode=mode, force=force, compression=compression
)
return self.fileobj
def | (
self,
query="*",
destination="exports/",
overwrite=False,
batchsize=None,
*args,
**kwargs
):
"""Exports documents from the INCA elasticsearch index
DO NOT OVERWRITE
This method is the common-caller for the exporter functionality. Common
functionality such as passing the query to ES, retrieving documents,
making sure a file exists. The `save` method should implement a
batch-wise handling of elasticsearch documents, for instances by
writing them to a file.
Parameters
---
query : string or dict | run | identifier_name |
import_export_classes.py | _on_fail=False, verbose=True):
self.processed = 0
self.failed = 0
self.failed_ids = []
self.missing_keys = Counter()
self.raise_on_fail = raise_on_fail
self.verbose = verbose
from .basic_utils import dotkeys
def _detect_zip(self, path):
filename = os.path.basename(path)
for zip_ext in ["gz", "bz2"]:
if filename[-len(zip_ext) :] == zip_ext:
return zip_ext
return False
def open_file(self, filename, mode="r", force=False, compression="autodetect"):
if mode not in ["w", "wb", "wt", "a", "ab", "at"] and not os.path.exists(
filename
):
logger.warning("File not found at {filename}".format(filename=filename))
if compression == "autodetect":
compression = self._detect_zip(filename)
if not compression:
return open(filename, mode=mode)
else:
filename += "." + compression
if compression == "gz":
return gzip.open(filename, mode=mode)
if compression == "bz2":
return bz2.open(filename, mode=mode)
return fileobj
def open_dir(
self, path, mode="r", match=".*", force=False, compression="autodetect"
):
"""Generator that yields all files in given directory
Parameters
----
path : string
A path in which to look for files
mode : string (default='r')
The mode to open files, such as `r` for reading UTF-8, `w` to write
match : string (default='.*')
a regular expression to match to filenames
force : bool (default=False)
Whether to return files for writing if they already exist
compression : string (default="autodetect")
Type of compression to use
"""
matcher = re.compile(match)
for filename in os.listdir(path):
# ignore non-matching filenames
if not matcher.search(filename):
continue
fileobj = self.open_file(
os.path.join(path, filename),
mode=mode,
force=force,
compression=compression,
)
yield fileobj
def _process_by_batch(self, iterable, batchsize=100):
batchnum = 0
batch = []
for i in iterable:
batch.append(i)
if len(batch) == batchsize:
yield batch
batch = []
if batch:
yield batch
class Importer(BaseImportExport):
"""Base class for data importers"""
functiontype = "importer"
def _ingest(self, iterable, doctype):
"""Ingest document (batch)
Parameters
----
iterable : iterable
A list, generator or other iterable that yields documents or
batches of documents to be stored in elasticsearch. This method
should be called from the `load` method implemented in a specific
importer
doctype : string
A string to set the doctype of the added documents
"""
self.doctype = doctype
# handle batches
if type(iterable) == list:
i = [self._add_metadata(ii.get("_source", ii)) for ii in iterable]
# handle individual docs
else:
i = self._add_metadata(iterable.get("_source", iterable))
# Save document(s) using document base-class method
self._save_document(i)
def _apply_mapping(self, document, mapping):
| """
if not mapping:
return document
new_document = {v: document[k] for k, v in mapping.items() if k in document}
# Keep track of missing keys
self.missing_keys.update([k for k in mapping if k not in document])
# Document errors for missing documents
if not new_document:
self.failed += 1
self.failed_ids.append(
document.get("id", document.get("ID", document.get("_id", None)))
)
return new_document
def load(self):
""" To be implemented in subclasses
normally called through the 'run' method. Please add to your documentation:
Parameters
---
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
<fields needed for the load function>
Yields
----
dict
raw document to be procesed and indexed
"""
raise NotImplementedError
yield document
def run(self, mapping={}, *args, **kwargs):
"""uses the documents from the load method in batches """
self.processed = 0
for batch in self._process_by_batch(self.load(*args, **kwargs)):
batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))
for doc in batch:
self._ingest(iterable=doc, doctype=doc["doctype"])
self.processed += 1
logger.info("Added {} documents to the database.".format(self.processed))
class Exporter(BaseImportExport):
"""Base class for exporting"""
# set to_file to `False` for subclasses that do not export to files
# for instance when writing to external databases
to_file = True
batchsize = 100
def __init__(self, *args, **kwargs):
BaseImportExport.__init__(self, *args, **kwargs)
self.fileobj = None
self.extension = ""
def save(self, batch_of_documents, destination="exports", *args, **kwargs):
"""To be implemented in subclass
This method should process batches of documents by exporting them in
whatever format implemented in the exporter.
Parameters
----
batch_of_documents : list of dicts
a list containing the dict of each document as represented in
elasticsearch
...<arguments specific to exporter>
"""
raise NotImplementedError
def _flatten_doc(self, document, include_meta=False, include_html=False):
"""Utility to convert elasticsearch documents to a flat representation
Parameters
---
document : dict
A dictionary which may include nested fields
Returns
----
dict
A dictionary where values are all strings, Nested keys are
merged by '.'
"""
flat_dict = {}
for k, v in document.items():
if k == "META" and not include_meta:
continue
if k == "htmlsource" and not include_html:
continue
if type(v) == str:
flat_dict[k] = v
elif type(v) == list:
flat_dict[k] = str(v)
elif type(v) == dict:
for kk, vv in self._flatten_doc(v).items():
flat_dict["{k}.{kk}".format(k=k, kk=kk)] = vv
else:
try:
flat_dict[k] = str(v)
except:
logger.warning("Unable to ready field {k} for writing".format(k=k))
return flat_dict
def _retrieve(self, query):
for doc in document_generator(query):
self.processed += 1
yield doc
def _makefile(self, filename, mode="wt", force=False, compression=False):
filepath = os.path.dirname(filename)
os.makedirs(filepath, exist_ok=True)
# handle cases when a path instead of a filename is provided
if os.path.isdir(filename):
now = time.localtime()
newname = "INCA_export_{now.tm_year}_{now.tm_mon}_{now.tm_mday}_{now.tm_hour}_{now.tm_min}_{now.tm_sec}.{extension}".format(
now=now, extension=self.extension
)
filename = os.path.join(filename, newname)
if self.extension not in filename:
filename = "{filename}.{extension}".format(
filename=filename, extension=self.extension
)
if filename in os.listdir(filepath) and not force:
logger.warning(
"file called {filename} already exists, either provide new filename"
"or set `overwrite=True`".format(filename=filename)
)
return False
else:
self.fileobj = self.open_file(
filename, mode=mode, force=force, compression=compression
)
return self.fileobj
def run(
self,
query="*",
destination="exports/",
overwrite=False,
batchsize=None,
*args,
**kwargs
):
"""Exports documents from the INCA elasticsearch index
DO NOT OVERWRITE
This method is the common-caller for the exporter functionality. Common
functionality such as passing the query to ES, retrieving documents,
making sure a file exists. The `save` method should implement a
batch-wise handling of elasticsearch documents, for instances by
writing them to a file.
Parameters
---
query : string or dict
| """Apply a given mapping to a document
Parameters
---
document : dict
A document as loaded by the load function
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
If mapping is empty, the file contents are assumed to be ingested
as is.
Returns
---
dict
a new document ready for elasticsearch, containing all keys from
the mapping found in the document | identifier_body |
import_export_classes.py | path):
filename = os.path.basename(path)
for zip_ext in ["gz", "bz2"]:
if filename[-len(zip_ext) :] == zip_ext:
return zip_ext
return False
def open_file(self, filename, mode="r", force=False, compression="autodetect"):
if mode not in ["w", "wb", "wt", "a", "ab", "at"] and not os.path.exists(
filename
):
logger.warning("File not found at {filename}".format(filename=filename))
if compression == "autodetect":
compression = self._detect_zip(filename)
if not compression:
return open(filename, mode=mode)
else:
filename += "." + compression
if compression == "gz":
return gzip.open(filename, mode=mode)
if compression == "bz2":
return bz2.open(filename, mode=mode)
return fileobj
def open_dir(
self, path, mode="r", match=".*", force=False, compression="autodetect"
):
"""Generator that yields all files in given directory
Parameters
----
path : string
A path in which to look for files
mode : string (default='r')
The mode to open files, such as `r` for reading UTF-8, `w` to write
match : string (default='.*')
a regular expression to match to filenames
force : bool (default=False)
Whether to return files for writing if they already exist
compression : string (default="autodetect")
Type of compression to use
"""
matcher = re.compile(match)
for filename in os.listdir(path):
# ignore non-matching filenames
if not matcher.search(filename):
continue
fileobj = self.open_file(
os.path.join(path, filename),
mode=mode,
force=force,
compression=compression,
)
yield fileobj
def _process_by_batch(self, iterable, batchsize=100):
batchnum = 0
batch = []
for i in iterable:
batch.append(i)
if len(batch) == batchsize:
yield batch
batch = []
if batch:
yield batch
class Importer(BaseImportExport):
"""Base class for data importers"""
functiontype = "importer"
def _ingest(self, iterable, doctype):
"""Ingest document (batch)
Parameters
----
iterable : iterable
A list, generator or other iterable that yields documents or
batches of documents to be stored in elasticsearch. This method
should be called from the `load` method implemented in a specific
importer
doctype : string
A string to set the doctype of the added documents
"""
self.doctype = doctype
# handle batches
if type(iterable) == list:
i = [self._add_metadata(ii.get("_source", ii)) for ii in iterable]
# handle individual docs
else:
i = self._add_metadata(iterable.get("_source", iterable))
# Save document(s) using document base-class method
self._save_document(i)
def _apply_mapping(self, document, mapping):
"""Apply a given mapping to a document
Parameters
---
document : dict
A document as loaded by the load function
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
If mapping is empty, the file contents are assumed to be ingested
as is.
Returns
---
dict
a new document ready for elasticsearch, containing all keys from
the mapping found in the document
"""
if not mapping:
return document
new_document = {v: document[k] for k, v in mapping.items() if k in document}
# Keep track of missing keys
self.missing_keys.update([k for k in mapping if k not in document])
# Document errors for missing documents
if not new_document:
self.failed += 1
self.failed_ids.append(
document.get("id", document.get("ID", document.get("_id", None)))
)
return new_document
def load(self):
""" To be implemented in subclasses
normally called through the 'run' method. Please add to your documentation:
Parameters
---
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
<fields needed for the load function>
Yields
----
dict
raw document to be procesed and indexed
"""
raise NotImplementedError
yield document
def run(self, mapping={}, *args, **kwargs):
"""uses the documents from the load method in batches """
self.processed = 0
for batch in self._process_by_batch(self.load(*args, **kwargs)):
batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))
for doc in batch:
self._ingest(iterable=doc, doctype=doc["doctype"])
self.processed += 1
logger.info("Added {} documents to the database.".format(self.processed))
class Exporter(BaseImportExport):
"""Base class for exporting"""
# set to_file to `False` for subclasses that do not export to files
# for instance when writing to external databases
to_file = True
batchsize = 100
def __init__(self, *args, **kwargs):
BaseImportExport.__init__(self, *args, **kwargs)
self.fileobj = None
self.extension = ""
def save(self, batch_of_documents, destination="exports", *args, **kwargs):
"""To be implemented in subclass
This method should process batches of documents by exporting them in
whatever format implemented in the exporter.
Parameters
----
batch_of_documents : list of dicts
a list containing the dict of each document as represented in
elasticsearch
...<arguments specific to exporter>
"""
raise NotImplementedError
def _flatten_doc(self, document, include_meta=False, include_html=False):
"""Utility to convert elasticsearch documents to a flat representation
Parameters
---
document : dict
A dictionary which may include nested fields
Returns
----
dict
A dictionary where values are all strings, Nested keys are
merged by '.'
"""
flat_dict = {}
for k, v in document.items():
if k == "META" and not include_meta:
continue
if k == "htmlsource" and not include_html:
continue
if type(v) == str:
flat_dict[k] = v
elif type(v) == list:
flat_dict[k] = str(v)
elif type(v) == dict:
for kk, vv in self._flatten_doc(v).items():
flat_dict["{k}.{kk}".format(k=k, kk=kk)] = vv
else:
try:
flat_dict[k] = str(v)
except:
logger.warning("Unable to ready field {k} for writing".format(k=k))
return flat_dict
def _retrieve(self, query):
for doc in document_generator(query):
self.processed += 1
yield doc
def _makefile(self, filename, mode="wt", force=False, compression=False):
filepath = os.path.dirname(filename)
os.makedirs(filepath, exist_ok=True)
# handle cases when a path instead of a filename is provided
if os.path.isdir(filename):
now = time.localtime()
newname = "INCA_export_{now.tm_year}_{now.tm_mon}_{now.tm_mday}_{now.tm_hour}_{now.tm_min}_{now.tm_sec}.{extension}".format(
now=now, extension=self.extension
)
filename = os.path.join(filename, newname)
if self.extension not in filename:
filename = "{filename}.{extension}".format(
filename=filename, extension=self.extension
)
if filename in os.listdir(filepath) and not force:
logger.warning(
"file called {filename} already exists, either provide new filename"
"or set `overwrite=True`".format(filename=filename)
)
return False
else:
self.fileobj = self.open_file(
filename, mode=mode, force=force, compression=compression
)
return self.fileobj
def run(
self,
query="*",
destination="exports/",
overwrite=False,
batchsize=None,
*args,
**kwargs
):
"""Exports documents from the INCA elasticsearch index
DO NOT OVERWRITE
This method is the common-caller for the exporter functionality. Common
functionality such as passing the query to ES, retrieving documents,
making sure a file exists. The `save` method should implement a
batch-wise handling of elasticsearch documents, for instances by
writing them to a file.
Parameters
---
query : string or dict
The query to select elasticsearch records to export
destination : string
The destination to which to export records. If the subclass
`to_file` property is set to `True`, a fileobject will be opened
to that location. |
If the destination is a folder, a filename will be generated. | random_line_split | |
import_export_classes.py | _on_fail=False, verbose=True):
self.processed = 0
self.failed = 0
self.failed_ids = []
self.missing_keys = Counter()
self.raise_on_fail = raise_on_fail
self.verbose = verbose
from .basic_utils import dotkeys
def _detect_zip(self, path):
filename = os.path.basename(path)
for zip_ext in ["gz", "bz2"]:
if filename[-len(zip_ext) :] == zip_ext:
return zip_ext
return False
def open_file(self, filename, mode="r", force=False, compression="autodetect"):
if mode not in ["w", "wb", "wt", "a", "ab", "at"] and not os.path.exists(
filename
):
logger.warning("File not found at {filename}".format(filename=filename))
if compression == "autodetect":
compression = self._detect_zip(filename)
if not compression:
return open(filename, mode=mode)
else:
|
if compression == "gz":
return gzip.open(filename, mode=mode)
if compression == "bz2":
return bz2.open(filename, mode=mode)
return fileobj
def open_dir(
self, path, mode="r", match=".*", force=False, compression="autodetect"
):
"""Generator that yields all files in given directory
Parameters
----
path : string
A path in which to look for files
mode : string (default='r')
The mode to open files, such as `r` for reading UTF-8, `w` to write
match : string (default='.*')
a regular expression to match to filenames
force : bool (default=False)
Whether to return files for writing if they already exist
compression : string (default="autodetect")
Type of compression to use
"""
matcher = re.compile(match)
for filename in os.listdir(path):
# ignore non-matching filenames
if not matcher.search(filename):
continue
fileobj = self.open_file(
os.path.join(path, filename),
mode=mode,
force=force,
compression=compression,
)
yield fileobj
def _process_by_batch(self, iterable, batchsize=100):
batchnum = 0
batch = []
for i in iterable:
batch.append(i)
if len(batch) == batchsize:
yield batch
batch = []
if batch:
yield batch
class Importer(BaseImportExport):
"""Base class for data importers"""
functiontype = "importer"
def _ingest(self, iterable, doctype):
"""Ingest document (batch)
Parameters
----
iterable : iterable
A list, generator or other iterable that yields documents or
batches of documents to be stored in elasticsearch. This method
should be called from the `load` method implemented in a specific
importer
doctype : string
A string to set the doctype of the added documents
"""
self.doctype = doctype
# handle batches
if type(iterable) == list:
i = [self._add_metadata(ii.get("_source", ii)) for ii in iterable]
# handle individual docs
else:
i = self._add_metadata(iterable.get("_source", iterable))
# Save document(s) using document base-class method
self._save_document(i)
def _apply_mapping(self, document, mapping):
"""Apply a given mapping to a document
Parameters
---
document : dict
A document as loaded by the load function
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
If mapping is empty, the file contents are assumed to be ingested
as is.
Returns
---
dict
a new document ready for elasticsearch, containing all keys from
the mapping found in the document
"""
if not mapping:
return document
new_document = {v: document[k] for k, v in mapping.items() if k in document}
# Keep track of missing keys
self.missing_keys.update([k for k in mapping if k not in document])
# Document errors for missing documents
if not new_document:
self.failed += 1
self.failed_ids.append(
document.get("id", document.get("ID", document.get("_id", None)))
)
return new_document
def load(self):
""" To be implemented in subclasses
normally called through the 'run' method. Please add to your documentation:
Parameters
---
mapping : dict
A dictionary that specifies the from_key :=> to_key relation
between loaded documents and documents as they should be indexed
by elasticsearch.
<fields needed for the load function>
Yields
----
dict
raw document to be procesed and indexed
"""
raise NotImplementedError
yield document
def run(self, mapping={}, *args, **kwargs):
"""uses the documents from the load method in batches """
self.processed = 0
for batch in self._process_by_batch(self.load(*args, **kwargs)):
batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))
for doc in batch:
self._ingest(iterable=doc, doctype=doc["doctype"])
self.processed += 1
logger.info("Added {} documents to the database.".format(self.processed))
class Exporter(BaseImportExport):
"""Base class for exporting"""
# set to_file to `False` for subclasses that do not export to files
# for instance when writing to external databases
to_file = True
batchsize = 100
def __init__(self, *args, **kwargs):
BaseImportExport.__init__(self, *args, **kwargs)
self.fileobj = None
self.extension = ""
def save(self, batch_of_documents, destination="exports", *args, **kwargs):
"""To be implemented in subclass
This method should process batches of documents by exporting them in
whatever format implemented in the exporter.
Parameters
----
batch_of_documents : list of dicts
a list containing the dict of each document as represented in
elasticsearch
...<arguments specific to exporter>
"""
raise NotImplementedError
def _flatten_doc(self, document, include_meta=False, include_html=False):
"""Utility to convert elasticsearch documents to a flat representation
Parameters
---
document : dict
A dictionary which may include nested fields
Returns
----
dict
A dictionary where values are all strings, Nested keys are
merged by '.'
"""
flat_dict = {}
for k, v in document.items():
if k == "META" and not include_meta:
continue
if k == "htmlsource" and not include_html:
continue
if type(v) == str:
flat_dict[k] = v
elif type(v) == list:
flat_dict[k] = str(v)
elif type(v) == dict:
for kk, vv in self._flatten_doc(v).items():
flat_dict["{k}.{kk}".format(k=k, kk=kk)] = vv
else:
try:
flat_dict[k] = str(v)
except:
logger.warning("Unable to ready field {k} for writing".format(k=k))
return flat_dict
def _retrieve(self, query):
for doc in document_generator(query):
self.processed += 1
yield doc
def _makefile(self, filename, mode="wt", force=False, compression=False):
filepath = os.path.dirname(filename)
os.makedirs(filepath, exist_ok=True)
# handle cases when a path instead of a filename is provided
if os.path.isdir(filename):
now = time.localtime()
newname = "INCA_export_{now.tm_year}_{now.tm_mon}_{now.tm_mday}_{now.tm_hour}_{now.tm_min}_{now.tm_sec}.{extension}".format(
now=now, extension=self.extension
)
filename = os.path.join(filename, newname)
if self.extension not in filename:
filename = "{filename}.{extension}".format(
filename=filename, extension=self.extension
)
if filename in os.listdir(filepath) and not force:
logger.warning(
"file called {filename} already exists, either provide new filename"
"or set `overwrite=True`".format(filename=filename)
)
return False
else:
self.fileobj = self.open_file(
filename, mode=mode, force=force, compression=compression
)
return self.fileobj
def run(
self,
query="*",
destination="exports/",
overwrite=False,
batchsize=None,
*args,
**kwargs
):
"""Exports documents from the INCA elasticsearch index
DO NOT OVERWRITE
This method is the common-caller for the exporter functionality. Common
functionality such as passing the query to ES, retrieving documents,
making sure a file exists. The `save` method should implement a
batch-wise handling of elasticsearch documents, for instances by
writing them to a file.
Parameters
---
query : string or dict
| filename += "." + compression | conditional_block |
main.rs | > std::fmt::Display for CsvDesc<'a> {
fn | (&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"{} {} {:?}",
self.file_path.display(),
self.delimiter,
self.quote
)
}
}
fn parse_args<'a>(
path_arg: &'a String,
delimiter_arg: &'a String,
quote_arg: &'a String,
) -> Result<CsvDesc<'a>, &'static str> {
let csv_file_path = Path::new(path_arg);
let csv_delimiter = match delimiter_arg.chars().next() {
Some(result) => result,
None => return Err("incorrect delimiter"),
};
let csv_quote = quote_arg.chars().next();
Ok(CsvDesc {
file_path: &csv_file_path,
delimiter: csv_delimiter,
quote: csv_quote,
})
}
fn get_csv_cols(csv_desc: &CsvDesc) -> Result<Vec<String>, String> {
let csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
let csv_reader = BufReader::new(csv_file);
let mut csv_line_iter = csv_reader.lines();
let csv_header: String = match csv_line_iter.next() {
Some(result) => match result {
Err(why) => return Err(format!("error getting csv header: {}", why)),
Ok(header) => header,
},
None => return Err("csv header reading failed".to_string()),
};
let csv_cols: Vec<String> = {
let cols_iter = csv_header.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
Ok(csv_cols)
}
fn build_index(csv_desc: &CsvDesc) -> Result<HashMap<String, u64>, String> {
let mut csv_index = HashMap::new();
let csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
let csv_reader = BufReader::new(csv_file);
let mut csv_line_iter = csv_reader.lines();
let mut offset_in_file: u64 = 0;
let mut expected_col_count = 0;
let mut row_idx = 0;
loop {
let csv_row: String = match csv_line_iter.next() {
Some(result) => match result {
Err(why) => return Err(format!("error getting csv row: {}", why)),
Ok(header) => header,
},
None => break,
};
let csv_cols: Vec<String> = {
let cols_iter = csv_row.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
let curr_col_count = csv_cols.len();
if curr_col_count == 0 {
return Err(format!("zero columns in row{}", row_idx));
}
if expected_col_count != 0 && expected_col_count != curr_col_count {
return Err(format!(
"{} columns in row #{}, {} expected",
curr_col_count, row_idx, expected_col_count
));
}
expected_col_count = curr_col_count;
row_idx += 1;
let key = format!("{}{}", csv_cols[0], csv_cols[1]);
csv_index.insert(key, offset_in_file);
offset_in_file += (csv_row.len() + 1) as u64;
}
Ok(csv_index)
}
fn get_csv_row(csv_desc: &CsvDesc, line_offset: u64) -> Result<Vec<String>, String> {
let mut csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
if let Err(e) = csv_file.seek(SeekFrom::Start(line_offset)) {
return Err(format!("error seeking offset: {}", e));
}
let mut csv_reader = BufReader::new(csv_file);
let mut row_buff = String::new();
match csv_reader.read_line(&mut row_buff) {
Ok(_n) => {
if row_buff.ends_with("\n") {
row_buff.pop();
}
}
Err(e) => return Err(format!("error gettig csv row: {}", e)),
};
let result: Vec<String> = {
let cols_iter = row_buff.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
Ok(result)
}
static USAGE: &'static str = "
Usage: rusty-csv-diff <csv1> <delim1> <quote1> <csv2> <delim2> <quote2>
rusty-csv-diff --help
Options:
-h, --help Show this message.
";
#[derive(Debug, Deserialize)]
struct Args {
arg_csv1: String,
arg_delim1: String,
arg_quote1: String,
arg_csv2: String,
arg_delim2: String,
arg_quote2: String,
}
fn main() {
/*
1. Parse arguments
2. Open CSV files
3. Get columns (cols_N)
4. Get intersection of those two sets of columns(cols_to_compare)
5. Create {column name : column index in cols_N} dicts
6. Create {CSV_col_value : CSV row index in file} dicts,
where CSV_col_value is a unique key made of the value of several CSV columns.
For example, {Alex38 : 76}. Here the name and age form a unique key for the 76th CSV row.
7. Get intersection of key sets of dicts from step 6 (row_keys_to_compare)
8. Loop through row_keys_to_compare, use dicts from step 6 to get line numbers for CSV files
8.1 Loop through cols_to_compare, use dicts from step 5 to extract column values from CSV rows
8.2 Compare values
Input parameters: CSV paths, delimiters, quotes
For example, ./main file_1.csv "," "'" file_2.csv " " ""
*/
/*** 0 ***/
log::set_logger(&MY_LOGGER).unwrap();
log::set_max_level(LevelFilter::Error);
/*** 1 ***/
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
let csv_desc_1: CsvDesc = match parse_args(&args.arg_csv1, &args.arg_delim1, &args.arg_quote1) {
Err(why) => panic!("error parsing arguments for CSV #1: {}", why),
Ok(result) => result,
};
let csv_desc_2: CsvDesc = match parse_args(&args.arg_csv2, &args.arg_delim2, &args.arg_quote2) {
Err(why) => panic!("error parsing arguments for CSV #2: {}", why),
Ok(result) => result,
};
/*** 2&3 ***/
let csv_cols_1: Vec<String> = match get_csv_cols(&csv_desc_1) {
Err(why) => panic!("couldn't get columns: {}", why),
Ok(cols) => cols,
};
let csv_cols_2: Vec<String> = match get_csv_cols(&csv_desc_2) {
Err(why) => panic!("couldn't get columns: {}", why),
Ok(cols) => cols,
};
/*** 5 ***/
let mut csv_col_index_1 = HashMap::new();
for i in 0..csv_cols_1.len() {
let key = csv_cols_1[i].clone();
if csv_col_index_1.contains_key(&key) {
panic!("duplicate column found in CSV #1: {}", key);
};
csv_col_index_1.insert(key, i);
}
info!("{:?}", csv_col_index_1);
let mut csv_col_index_2 = HashMap::new();
for i in 0..csv_cols_2.len() {
let key = csv_cols_2[i].clone();
if csv_col_index_2.contains_key(&key) {
panic!("duplicate column found in CSV #1: {}", key);
};
csv_col_index_2.insert(key, i);
}
info!("{:?}", csv_col_index_2);
/*** 4 ***/
let mut cols_to_compare = HashSet::new();
for col_1 in csv_col_index_1.keys() {
if csv_col_index_2.contains_key(col_1) {
cols_to_compare.insert(col_1);
};
}
info!("{:?}", cols_to_compare);
/*** 6 ***/
// let's assume that the unique key is (col_0 + col_1)
let csv | fmt | identifier_name |
main.rs |
}
struct CsvDesc<'a> {
file_path: &'a Path,
delimiter: char,
quote: Option<char>,
}
impl<'a> std::fmt::Display for CsvDesc<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"{} {} {:?}",
self.file_path.display(),
self.delimiter,
self.quote
)
}
}
fn parse_args<'a>(
path_arg: &'a String,
delimiter_arg: &'a String,
quote_arg: &'a String,
) -> Result<CsvDesc<'a>, &'static str> {
let csv_file_path = Path::new(path_arg);
let csv_delimiter = match delimiter_arg.chars().next() {
Some(result) => result,
None => return Err("incorrect delimiter"),
};
let csv_quote = quote_arg.chars().next();
Ok(CsvDesc {
file_path: &csv_file_path,
delimiter: csv_delimiter,
quote: csv_quote,
})
}
fn get_csv_cols(csv_desc: &CsvDesc) -> Result<Vec<String>, String> {
let csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
let csv_reader = BufReader::new(csv_file);
let mut csv_line_iter = csv_reader.lines();
let csv_header: String = match csv_line_iter.next() {
Some(result) => match result {
Err(why) => return Err(format!("error getting csv header: {}", why)),
Ok(header) => header,
},
None => return Err("csv header reading failed".to_string()),
};
let csv_cols: Vec<String> = {
let cols_iter = csv_header.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
Ok(csv_cols)
}
fn build_index(csv_desc: &CsvDesc) -> Result<HashMap<String, u64>, String> {
let mut csv_index = HashMap::new();
let csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
let csv_reader = BufReader::new(csv_file);
let mut csv_line_iter = csv_reader.lines();
let mut offset_in_file: u64 = 0;
let mut expected_col_count = 0;
let mut row_idx = 0;
loop {
let csv_row: String = match csv_line_iter.next() {
Some(result) => match result {
Err(why) => return Err(format!("error getting csv row: {}", why)),
Ok(header) => header,
},
None => break,
};
let csv_cols: Vec<String> = {
let cols_iter = csv_row.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
let curr_col_count = csv_cols.len();
if curr_col_count == 0 {
return Err(format!("zero columns in row{}", row_idx));
}
if expected_col_count != 0 && expected_col_count != curr_col_count {
return Err(format!(
"{} columns in row #{}, {} expected",
curr_col_count, row_idx, expected_col_count
));
}
expected_col_count = curr_col_count;
row_idx += 1;
let key = format!("{}{}", csv_cols[0], csv_cols[1]);
csv_index.insert(key, offset_in_file);
offset_in_file += (csv_row.len() + 1) as u64;
}
Ok(csv_index)
}
fn get_csv_row(csv_desc: &CsvDesc, line_offset: u64) -> Result<Vec<String>, String> {
let mut csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
if let Err(e) = csv_file.seek(SeekFrom::Start(line_offset)) {
return Err(format!("error seeking offset: {}", e));
}
let mut csv_reader = BufReader::new(csv_file);
let mut row_buff = String::new();
match csv_reader.read_line(&mut row_buff) {
Ok(_n) => {
if row_buff.ends_with("\n") {
row_buff.pop();
}
}
Err(e) => return Err(format!("error gettig csv row: {}", e)),
};
let result: Vec<String> = {
let cols_iter = row_buff.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
Ok(result)
}
static USAGE: &'static str = "
Usage: rusty-csv-diff <csv1> <delim1> <quote1> <csv2> <delim2> <quote2>
rusty-csv-diff --help
Options:
-h, --help Show this message.
";
#[derive(Debug, Deserialize)]
struct Args {
arg_csv1: String,
arg_delim1: String,
arg_quote1: String,
arg_csv2: String,
arg_delim2: String,
arg_quote2: String,
}
fn main() {
/*
1. Parse arguments
2. Open CSV files
3. Get columns (cols_N)
4. Get intersection of those two sets of columns(cols_to_compare)
5. Create {column name : column index in cols_N} dicts
6. Create {CSV_col_value : CSV row index in file} dicts,
where CSV_col_value is a unique key made of the value of several CSV columns.
For example, {Alex38 : 76}. Here the name and age form a unique key for the 76th CSV row.
7. Get intersection of key sets of dicts from step 6 (row_keys_to_compare)
8. Loop through row_keys_to_compare, use dicts from step 6 to get line numbers for CSV files
8.1 Loop through cols_to_compare, use dicts from step 5 to extract column values from CSV rows
8.2 Compare values
Input parameters: CSV paths, delimiters, quotes
For example, ./main file_1.csv "," "'" file_2.csv " " ""
*/
/*** 0 ***/
log::set_logger(&MY_LOGGER).unwrap();
log::set_max_level(LevelFilter::Error);
/*** 1 ***/
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
let csv_desc_1: CsvDesc = match parse_args(&args.arg_csv1, &args.arg_delim1, &args.arg_quote1) {
Err(why) => panic!("error parsing arguments for CSV #1: {}", why),
Ok(result) => result,
};
let csv_desc_2: CsvDesc = match parse_args(&args.arg_csv2, &args.arg_delim2, &args.arg_quote2) {
Err(why) => panic!("error parsing arguments for CSV #2: {}", why),
Ok(result) => result,
};
/*** 2&3 ***/
let csv_cols_1: Vec<String> = match get_csv_cols(&csv_desc_1) {
Err(why) => panic!("couldn't get columns: {}", why),
Ok(cols) => cols,
};
let csv_cols_2: Vec<String> = match get_csv_cols(&csv_desc_2) {
Err(why) => panic!("couldn't get columns: {}", why),
Ok(cols) => cols,
};
/*** 5 ***/
let mut csv_col_index_1 = HashMap::new();
for i in 0..csv_cols_1.len() {
let key = csv_cols_1[i].clone();
if csv_col_index_1.contains_key(&key) {
panic!("duplicate column found in CSV #1: {}", key);
};
csv_col_index_1.insert(key, i);
}
info!("{:?}", csv_col_index_1);
let mut csv_col_index_2 = HashMap::new();
for i in 0..csv_cols_2.len() {
let key = csv_cols_2[i].clone();
if csv_col_index_2.contains_key(&key) {
panic!("duplicate column found in CSV #1: {}", key);
};
csv_col_index_2.insert(key, i);
}
info!("{:?}", csv_col_index_2);
/*** 4 ***/
let mut cols_to_compare = HashSet::new();
for col_1 in csv_col_index_1.keys() {
if csv_col_index_2.contains_key(col_1) {
cols_to_compare.insert(col_1);
};
}
info!("{:?}", | {} | identifier_body | |
main.rs | = BufReader::new(csv_file);
let mut csv_line_iter = csv_reader.lines();
let csv_header: String = match csv_line_iter.next() {
Some(result) => match result {
Err(why) => return Err(format!("error getting csv header: {}", why)),
Ok(header) => header,
},
None => return Err("csv header reading failed".to_string()),
};
let csv_cols: Vec<String> = {
let cols_iter = csv_header.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
Ok(csv_cols)
}
fn build_index(csv_desc: &CsvDesc) -> Result<HashMap<String, u64>, String> {
let mut csv_index = HashMap::new();
let csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
let csv_reader = BufReader::new(csv_file);
let mut csv_line_iter = csv_reader.lines();
let mut offset_in_file: u64 = 0;
let mut expected_col_count = 0;
let mut row_idx = 0;
loop {
let csv_row: String = match csv_line_iter.next() {
Some(result) => match result {
Err(why) => return Err(format!("error getting csv row: {}", why)),
Ok(header) => header,
},
None => break,
};
let csv_cols: Vec<String> = {
let cols_iter = csv_row.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
let curr_col_count = csv_cols.len();
if curr_col_count == 0 {
return Err(format!("zero columns in row{}", row_idx));
}
if expected_col_count != 0 && expected_col_count != curr_col_count {
return Err(format!(
"{} columns in row #{}, {} expected",
curr_col_count, row_idx, expected_col_count
));
}
expected_col_count = curr_col_count;
row_idx += 1;
let key = format!("{}{}", csv_cols[0], csv_cols[1]);
csv_index.insert(key, offset_in_file);
offset_in_file += (csv_row.len() + 1) as u64;
}
Ok(csv_index)
}
fn get_csv_row(csv_desc: &CsvDesc, line_offset: u64) -> Result<Vec<String>, String> {
let mut csv_file = match File::open(csv_desc.file_path) {
Err(why) => panic!(
"couldn't open csv @ {}: {}",
csv_desc.file_path.display(),
why
),
Ok(file) => file,
};
if let Err(e) = csv_file.seek(SeekFrom::Start(line_offset)) {
return Err(format!("error seeking offset: {}", e));
}
let mut csv_reader = BufReader::new(csv_file);
let mut row_buff = String::new();
match csv_reader.read_line(&mut row_buff) {
Ok(_n) => {
if row_buff.ends_with("\n") {
row_buff.pop();
}
}
Err(e) => return Err(format!("error gettig csv row: {}", e)),
};
let result: Vec<String> = {
let cols_iter = row_buff.split(csv_desc.delimiter);
match csv_desc.quote {
Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(),
None => cols_iter.map(|s| s.to_string()).collect(),
}
};
Ok(result)
}
static USAGE: &'static str = "
Usage: rusty-csv-diff <csv1> <delim1> <quote1> <csv2> <delim2> <quote2>
rusty-csv-diff --help
Options:
-h, --help Show this message.
";
#[derive(Debug, Deserialize)]
struct Args {
arg_csv1: String,
arg_delim1: String,
arg_quote1: String,
arg_csv2: String,
arg_delim2: String,
arg_quote2: String,
}
fn main() {
/*
1. Parse arguments
2. Open CSV files
3. Get columns (cols_N)
4. Get intersection of those two sets of columns(cols_to_compare)
5. Create {column name : column index in cols_N} dicts
6. Create {CSV_col_value : CSV row index in file} dicts,
where CSV_col_value is a unique key made of the value of several CSV columns.
For example, {Alex38 : 76}. Here the name and age form a unique key for the 76th CSV row.
7. Get intersection of key sets of dicts from step 6 (row_keys_to_compare)
8. Loop through row_keys_to_compare, use dicts from step 6 to get line numbers for CSV files
8.1 Loop through cols_to_compare, use dicts from step 5 to extract column values from CSV rows
8.2 Compare values
Input parameters: CSV paths, delimiters, quotes
For example, ./main file_1.csv "," "'" file_2.csv " " ""
*/
/*** 0 ***/
log::set_logger(&MY_LOGGER).unwrap();
log::set_max_level(LevelFilter::Error);
/*** 1 ***/
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
let csv_desc_1: CsvDesc = match parse_args(&args.arg_csv1, &args.arg_delim1, &args.arg_quote1) {
Err(why) => panic!("error parsing arguments for CSV #1: {}", why),
Ok(result) => result,
};
let csv_desc_2: CsvDesc = match parse_args(&args.arg_csv2, &args.arg_delim2, &args.arg_quote2) {
Err(why) => panic!("error parsing arguments for CSV #2: {}", why),
Ok(result) => result,
};
/*** 2&3 ***/
let csv_cols_1: Vec<String> = match get_csv_cols(&csv_desc_1) {
Err(why) => panic!("couldn't get columns: {}", why),
Ok(cols) => cols,
};
let csv_cols_2: Vec<String> = match get_csv_cols(&csv_desc_2) {
Err(why) => panic!("couldn't get columns: {}", why),
Ok(cols) => cols,
};
/*** 5 ***/
let mut csv_col_index_1 = HashMap::new();
for i in 0..csv_cols_1.len() {
let key = csv_cols_1[i].clone();
if csv_col_index_1.contains_key(&key) {
panic!("duplicate column found in CSV #1: {}", key);
};
csv_col_index_1.insert(key, i);
}
info!("{:?}", csv_col_index_1);
let mut csv_col_index_2 = HashMap::new();
for i in 0..csv_cols_2.len() {
let key = csv_cols_2[i].clone();
if csv_col_index_2.contains_key(&key) {
panic!("duplicate column found in CSV #1: {}", key);
};
csv_col_index_2.insert(key, i);
}
info!("{:?}", csv_col_index_2);
/*** 4 ***/
let mut cols_to_compare = HashSet::new();
for col_1 in csv_col_index_1.keys() {
if csv_col_index_2.contains_key(col_1) {
cols_to_compare.insert(col_1);
};
}
info!("{:?}", cols_to_compare);
/*** 6 ***/
// let's assume that the unique key is (col_0 + col_1)
let csv_index_1 = match build_index(&csv_desc_1) {
Err(why) => panic!("failed building index #1: {}", why),
Ok(index) => index,
};
let csv_index_2 = match build_index(&csv_desc_2) {
Err(why) => panic!("failed building index #2: {}", why),
Ok(index) => index,
};
/*** 7 ***/
let mut row_keys_to_compare = HashSet::new();
for key_1 in csv_index_1.keys() {
if csv_index_2.contains_key(key_1) {
row_keys_to_compare.insert(key_1);
};
}
info!("{:?}", row_keys_to_compare);
/*** 8 ***/
for row_key in row_keys_to_compare {
let index_1 = *csv_index_1.get(row_key).unwrap();
let index_2 = *csv_index_2.get(row_key).unwrap();
let row_1 = match get_csv_row(&csv_desc_1, index_1) {
Ok(row) => row,
Err(e) => panic!("failed getting csv row #1: {}", e),
};
| let row_2 = match get_csv_row(&csv_desc_2, index_2) {
Ok(row) => row, | random_line_split | |
verify.rs | is_nfkc(path) => path.chars().nfc().collect::<String>().into(),
_ => path,
}
}
impl FileWithSize {
fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self {
// we need to cut off .cargo/registry/src/github.com-1ecc6299db9ec823/
let index = path_orig
.iter()
.enumerate()
.position(|e| e.1 == krate_root)
.expect("must find cargo root in path contained within it");
let path = path_orig.iter().skip(index).collect::<PathBuf>();
FileWithSize {
path: normalized(path),
size: std::fs::metadata(path_orig).unwrap().len(),
}
}
// TODO: understand this R: Read stuff
fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self {
FileWithSize {
path: normalized(entry.path().unwrap().into_owned()),
size: entry.size(),
}
}
}
/// Size difference of a file in the .gz archive and extracted source
#[derive(Debug, Clone)]
pub(crate) struct FileSizeDifference {
path: PathBuf,
size_archive: u64,
size_source: u64,
}
/// The Difference between extracted crate sources and an .crate tar.gz archive
#[derive(Debug, Clone)]
pub(crate) struct Diff {
// the crate we are diffing
krate_name: String,
files_missing_in_checkout: Vec<PathBuf>,
additional_files_in_checkout: Vec<PathBuf>,
files_size_difference: Vec<FileSizeDifference>,
source_path: Option<PathBuf>,
}
impl Diff {
fn new() -> Self {
Self {
krate_name: String::new(),
files_missing_in_checkout: Vec::new(),
additional_files_in_checkout: Vec::new(),
files_size_difference: Vec::new(),
source_path: None,
}
}
/// returns true if there is no diff
fn is_ok(&self) -> bool {
self.files_missing_in_checkout.is_empty()
&& self.additional_files_in_checkout.is_empty()
&& self.files_size_difference.is_empty()
}
pub(crate) fn details(&self) -> String {
let mut s = format!("Crate: {}\n", self.krate_name);
if !self.files_missing_in_checkout.is_empty() {
write!(
s,
"Missing from source:\n{}",
self.files_missing_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.additional_files_in_checkout.is_empty() {
write!(
s,
"Not found in archive/additional:\n{}",
self.additional_files_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.files_size_difference.is_empty() {
self.files_size_difference
.iter()
.map(|fsd| {
format!(
"File: {}, size in archive: {}b, size in checkout: {}b\n",
fsd.path.display(),
fsd.size_archive,
fsd.size_source
)
})
.for_each(|strg| s.push_str(&strg));
}
s
}
}
/// take a path to an extracted .crate source and map it to the corresponding .carte archive path
fn map_src_path_to_cache_path(src_path: &Path) -> PathBuf |
dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate
dir.into_iter().collect::<PathBuf>()
}
/// look into the .gz archive and get all the contained files+sizes
fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> {
let tar_gz = File::open(path).unwrap();
// extract the tar
let tar = GzDecoder::new(tar_gz);
let mut archive = Archive::new(tar);
let archive_files = archive.entries().unwrap();
// println!("files inside the archive");
// archive_files.for_each(|x| println!("{:?}", x.unwrap().path()));
archive_files
.into_iter()
.map(|entry| FileWithSize::from_archive(&entry.unwrap()))
.collect::<Vec<FileWithSize>>()
}
/// get the files and their sizes of the extracted .crate sources
fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> {
let krate_root = source.iter().last().unwrap();
WalkDir::new(source)
.into_iter()
.map(Result::unwrap)
// need to skip directories since the are only implicitly inside the tar (via file paths)
.filter(|de| de.file_type().is_file())
.map(|direntry| {
let p = direntry.path();
p.to_owned()
})
.map(|p| FileWithSize::from_disk(&p, krate_root))
.collect()
}
/// compare files of a .crate gz archive and extracted sources and return a Diff object which describes those changes
fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff {
let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate);
let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source);
let mut diff = Diff::new();
diff.source_path = Some(source.to_path_buf());
diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string();
let files_of_source_paths: Vec<&PathBuf> =
files_of_source.iter().map(|fws| &fws.path).collect();
for archive_file in &files_of_archive {
let archive_f_path = &archive_file.path;
if !files_of_source_paths.contains(&archive_f_path) {
// the file is contaied in the archive but not in the extracted source
diff.files_missing_in_checkout.push(archive_f_path.clone());
} else if files_of_source_paths.contains(&archive_f_path) {
// file is contained in both, but sizes differ
match files_of_source
.iter()
.find(|fws| fws.path == archive_file.path)
{
Some(fws) => {
if fws.size != archive_file.size {
diff.files_size_difference.push(FileSizeDifference {
path: fws.path.clone(),
size_archive: archive_file.size,
size_source: fws.size,
});
}
}
None => unreachable!(), // we already checked this
};
}
}
let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect();
for source_file in files_of_source_paths
.iter()
.filter(|path| path.file_name().unwrap() != ".cargo-ok")
.filter(|path| !path.is_dir() /* skip dirs */)
{
// dbg!(source_file);
#[allow(clippy::implicit_clone)]
if !files_of_archive.iter().any(|path| path == source_file) {
diff.additional_files_in_checkout
.push(source_file.to_path_buf());
}
}
diff
}
pub(crate) fn verify_crates(
registry_sources_caches: &mut registry_sources::RegistrySourceCaches,
) -> Result<(), Vec<Diff>> {
// iterate over all the extracted sources that we have
let bad_sources: Vec<_> = registry_sources_caches
.items()
.par_iter()
// get the paths to the source and the .crate for all extracted crates
.map(|source| (source, map_src_path_to_cache_path(source)))
// we need both the .crate and the directory to exist for verification
.filter(|(source, krate)| source.exists() && krate.exists())
// look into the .gz archive and get all the contained files+sizes
.map(|(source, krate)| diff_crate_and_source(&krate, source))
| {
// for each directory, find the path to the corresponding .crate archive
// .cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12
// corresponds to
// .cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate
// reverse, and "pop" the front components
let mut dir = src_path.iter().collect::<Vec<&OsStr>>();
let comp1 = dir.pop().unwrap(); // /bytes-0.4.12
let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823
let _src = dir.pop().unwrap(); // throw this away and add "cache" instead
// reconstruct the fixed path in reverse order
dir.push(OsStr::new("cache"));
dir.push(comp2); // github.com...
// we need to add the .crate extension (path to the gzip archive)
let mut comp1_with_crate_ext = comp1.to_os_string();
comp1_with_crate_ext.push(".crate"); | identifier_body |
verify.rs | !is_nfkc(path) => path.chars().nfc().collect::<String>().into(),
_ => path,
}
}
impl FileWithSize {
fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self {
// we need to cut off .cargo/registry/src/github.com-1ecc6299db9ec823/
let index = path_orig
.iter()
.enumerate()
.position(|e| e.1 == krate_root)
.expect("must find cargo root in path contained within it");
let path = path_orig.iter().skip(index).collect::<PathBuf>();
FileWithSize {
path: normalized(path),
size: std::fs::metadata(path_orig).unwrap().len(),
}
}
// TODO: understand this R: Read stuff
fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self {
FileWithSize {
path: normalized(entry.path().unwrap().into_owned()),
size: entry.size(),
}
}
}
/// Size difference of a file in the .gz archive and extracted source
#[derive(Debug, Clone)]
pub(crate) struct FileSizeDifference {
path: PathBuf,
size_archive: u64,
size_source: u64,
}
/// The Difference between extracted crate sources and an .crate tar.gz archive
#[derive(Debug, Clone)]
pub(crate) struct Diff {
// the crate we are diffing
krate_name: String,
files_missing_in_checkout: Vec<PathBuf>,
additional_files_in_checkout: Vec<PathBuf>,
files_size_difference: Vec<FileSizeDifference>,
source_path: Option<PathBuf>,
}
impl Diff {
fn new() -> Self {
Self {
krate_name: String::new(),
files_missing_in_checkout: Vec::new(),
additional_files_in_checkout: Vec::new(),
files_size_difference: Vec::new(),
source_path: None,
}
}
/// returns true if there is no diff
fn is_ok(&self) -> bool {
self.files_missing_in_checkout.is_empty()
&& self.additional_files_in_checkout.is_empty()
&& self.files_size_difference.is_empty()
}
pub(crate) fn details(&self) -> String {
let mut s = format!("Crate: {}\n", self.krate_name);
if !self.files_missing_in_checkout.is_empty() {
write!(
s,
"Missing from source:\n{}",
self.files_missing_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.additional_files_in_checkout.is_empty() {
write!(
s,
"Not found in archive/additional:\n{}",
self.additional_files_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.files_size_difference.is_empty() {
self.files_size_difference
.iter()
.map(|fsd| {
format!(
"File: {}, size in archive: {}b, size in checkout: {}b\n",
fsd.path.display(),
fsd.size_archive,
fsd.size_source
)
})
.for_each(|strg| s.push_str(&strg));
}
s
}
}
/// take a path to an extracted .crate source and map it to the corresponding .carte archive path
fn map_src_path_to_cache_path(src_path: &Path) -> PathBuf {
// for each directory, find the path to the corresponding .crate archive
// .cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12
// corresponds to
// .cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate
// reverse, and "pop" the front components
let mut dir = src_path.iter().collect::<Vec<&OsStr>>();
let comp1 = dir.pop().unwrap(); // /bytes-0.4.12
let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823
let _src = dir.pop().unwrap(); // throw this away and add "cache" instead
// reconstruct the fixed path in reverse order
dir.push(OsStr::new("cache"));
dir.push(comp2); // github.com...
// we need to add the .crate extension (path to the gzip archive)
let mut comp1_with_crate_ext = comp1.to_os_string();
comp1_with_crate_ext.push(".crate");
dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate
dir.into_iter().collect::<PathBuf>()
}
/// look into the .gz archive and get all the contained files+sizes
fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> {
let tar_gz = File::open(path).unwrap();
// extract the tar
let tar = GzDecoder::new(tar_gz);
let mut archive = Archive::new(tar);
let archive_files = archive.entries().unwrap();
// println!("files inside the archive");
// archive_files.for_each(|x| println!("{:?}", x.unwrap().path()));
archive_files
.into_iter()
.map(|entry| FileWithSize::from_archive(&entry.unwrap()))
.collect::<Vec<FileWithSize>>()
}
/// get the files and their sizes of the extracted .crate sources
fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> {
let krate_root = source.iter().last().unwrap();
WalkDir::new(source)
.into_iter()
.map(Result::unwrap)
// need to skip directories since the are only implicitly inside the tar (via file paths)
.filter(|de| de.file_type().is_file())
.map(|direntry| {
let p = direntry.path();
p.to_owned()
})
.map(|p| FileWithSize::from_disk(&p, krate_root))
.collect()
}
/// compare files of a .crate gz archive and extracted sources and return a Diff object which describes those changes
fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff {
let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate);
let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source);
let mut diff = Diff::new();
diff.source_path = Some(source.to_path_buf());
diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string();
let files_of_source_paths: Vec<&PathBuf> =
files_of_source.iter().map(|fws| &fws.path).collect();
for archive_file in &files_of_archive {
let archive_f_path = &archive_file.path;
if !files_of_source_paths.contains(&archive_f_path) {
// the file is contaied in the archive but not in the extracted source
diff.files_missing_in_checkout.push(archive_f_path.clone());
} else if files_of_source_paths.contains(&archive_f_path) {
// file is contained in both, but sizes differ
match files_of_source
.iter()
.find(|fws| fws.path == archive_file.path) | diff.files_size_difference.push(FileSizeDifference {
path: fws.path.clone(),
size_archive: archive_file.size,
size_source: fws.size,
});
}
}
None => unreachable!(), // we already checked this
};
}
}
let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect();
for source_file in files_of_source_paths
.iter()
.filter(|path| path.file_name().unwrap() != ".cargo-ok")
.filter(|path| !path.is_dir() /* skip dirs */)
{
// dbg!(source_file);
#[allow(clippy::implicit_clone)]
if !files_of_archive.iter().any(|path| path == source_file) {
diff.additional_files_in_checkout
.push(source_file.to_path_buf());
}
}
diff
}
pub(crate) fn verify_crates(
registry_sources_caches: &mut registry_sources::RegistrySourceCaches,
) -> Result<(), Vec<Diff>> {
// iterate over all the extracted sources that we have
let bad_sources: Vec<_> = registry_sources_caches
.items()
.par_iter()
// get the paths to the source and the .crate for all extracted crates
.map(|source| (source, map_src_path_to_cache_path(source)))
// we need both the .crate and the directory to exist for verification
.filter(|(source, krate)| source.exists() && krate.exists())
// look into the .gz archive and get all the contained files+sizes
.map(|(source, krate)| diff_crate_and_source(&krate, source))
// | {
Some(fws) => {
if fws.size != archive_file.size { | random_line_split |
verify.rs | is_nfkc(path) => path.chars().nfc().collect::<String>().into(),
_ => path,
}
}
impl FileWithSize {
fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self {
// we need to cut off .cargo/registry/src/github.com-1ecc6299db9ec823/
let index = path_orig
.iter()
.enumerate()
.position(|e| e.1 == krate_root)
.expect("must find cargo root in path contained within it");
let path = path_orig.iter().skip(index).collect::<PathBuf>();
FileWithSize {
path: normalized(path),
size: std::fs::metadata(path_orig).unwrap().len(),
}
}
// TODO: understand this R: Read stuff
fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self {
FileWithSize {
path: normalized(entry.path().unwrap().into_owned()),
size: entry.size(),
}
}
}
/// Size difference of a file in the .gz archive and extracted source
#[derive(Debug, Clone)]
pub(crate) struct FileSizeDifference {
path: PathBuf,
size_archive: u64,
size_source: u64,
}
/// The Difference between extracted crate sources and an .crate tar.gz archive
#[derive(Debug, Clone)]
pub(crate) struct Diff {
// the crate we are diffing
krate_name: String,
files_missing_in_checkout: Vec<PathBuf>,
additional_files_in_checkout: Vec<PathBuf>,
files_size_difference: Vec<FileSizeDifference>,
source_path: Option<PathBuf>,
}
impl Diff {
fn new() -> Self {
Self {
krate_name: String::new(),
files_missing_in_checkout: Vec::new(),
additional_files_in_checkout: Vec::new(),
files_size_difference: Vec::new(),
source_path: None,
}
}
/// returns true if there is no diff
fn is_ok(&self) -> bool {
self.files_missing_in_checkout.is_empty()
&& self.additional_files_in_checkout.is_empty()
&& self.files_size_difference.is_empty()
}
pub(crate) fn details(&self) -> String {
let mut s = format!("Crate: {}\n", self.krate_name);
if !self.files_missing_in_checkout.is_empty() {
write!(
s,
"Missing from source:\n{}",
self.files_missing_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.additional_files_in_checkout.is_empty() {
write!(
s,
"Not found in archive/additional:\n{}",
self.additional_files_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.files_size_difference.is_empty() {
self.files_size_difference
.iter()
.map(|fsd| {
format!(
"File: {}, size in archive: {}b, size in checkout: {}b\n",
fsd.path.display(),
fsd.size_archive,
fsd.size_source
)
})
.for_each(|strg| s.push_str(&strg));
}
s
}
}
/// take a path to an extracted .crate source and map it to the corresponding .carte archive path
fn map_src_path_to_cache_path(src_path: &Path) -> PathBuf {
// for each directory, find the path to the corresponding .crate archive
// .cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12
// corresponds to
// .cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate
// reverse, and "pop" the front components
let mut dir = src_path.iter().collect::<Vec<&OsStr>>();
let comp1 = dir.pop().unwrap(); // /bytes-0.4.12
let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823
let _src = dir.pop().unwrap(); // throw this away and add "cache" instead
// reconstruct the fixed path in reverse order
dir.push(OsStr::new("cache"));
dir.push(comp2); // github.com...
// we need to add the .crate extension (path to the gzip archive)
let mut comp1_with_crate_ext = comp1.to_os_string();
comp1_with_crate_ext.push(".crate");
dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate
dir.into_iter().collect::<PathBuf>()
}
/// look into the .gz archive and get all the contained files+sizes
fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> {
let tar_gz = File::open(path).unwrap();
// extract the tar
let tar = GzDecoder::new(tar_gz);
let mut archive = Archive::new(tar);
let archive_files = archive.entries().unwrap();
// println!("files inside the archive");
// archive_files.for_each(|x| println!("{:?}", x.unwrap().path()));
archive_files
.into_iter()
.map(|entry| FileWithSize::from_archive(&entry.unwrap()))
.collect::<Vec<FileWithSize>>()
}
/// get the files and their sizes of the extracted .crate sources
fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> {
let krate_root = source.iter().last().unwrap();
WalkDir::new(source)
.into_iter()
.map(Result::unwrap)
// need to skip directories since the are only implicitly inside the tar (via file paths)
.filter(|de| de.file_type().is_file())
.map(|direntry| {
let p = direntry.path();
p.to_owned()
})
.map(|p| FileWithSize::from_disk(&p, krate_root))
.collect()
}
/// compare files of a .crate gz archive and extracted sources and return a Diff object which describes those changes
fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff {
let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate);
let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source);
let mut diff = Diff::new();
diff.source_path = Some(source.to_path_buf());
diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string();
let files_of_source_paths: Vec<&PathBuf> =
files_of_source.iter().map(|fws| &fws.path).collect();
for archive_file in &files_of_archive {
let archive_f_path = &archive_file.path;
if !files_of_source_paths.contains(&archive_f_path) {
// the file is contaied in the archive but not in the extracted source
diff.files_missing_in_checkout.push(archive_f_path.clone());
} else if files_of_source_paths.contains(&archive_f_path) {
// file is contained in both, but sizes differ
match files_of_source
.iter()
.find(|fws| fws.path == archive_file.path)
{
Some(fws) => |
None => unreachable!(), // we already checked this
};
}
}
let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect();
for source_file in files_of_source_paths
.iter()
.filter(|path| path.file_name().unwrap() != ".cargo-ok")
.filter(|path| !path.is_dir() /* skip dirs */)
{
// dbg!(source_file);
#[allow(clippy::implicit_clone)]
if !files_of_archive.iter().any(|path| path == source_file) {
diff.additional_files_in_checkout
.push(source_file.to_path_buf());
}
}
diff
}
pub(crate) fn verify_crates(
registry_sources_caches: &mut registry_sources::RegistrySourceCaches,
) -> Result<(), Vec<Diff>> {
// iterate over all the extracted sources that we have
let bad_sources: Vec<_> = registry_sources_caches
.items()
.par_iter()
// get the paths to the source and the .crate for all extracted crates
.map(|source| (source, map_src_path_to_cache_path(source)))
// we need both the .crate and the directory to exist for verification
.filter(|(source, krate)| source.exists() && krate.exists())
// look into the .gz archive and get all the contained files+sizes
.map(|(source, krate)| diff_crate_and_source(&krate, source))
| {
if fws.size != archive_file.size {
diff.files_size_difference.push(FileSizeDifference {
path: fws.path.clone(),
size_archive: archive_file.size,
size_source: fws.size,
});
}
} | conditional_block |
verify.rs | is_nfkc(path) => path.chars().nfc().collect::<String>().into(),
_ => path,
}
}
impl FileWithSize {
fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self {
// we need to cut off .cargo/registry/src/github.com-1ecc6299db9ec823/
let index = path_orig
.iter()
.enumerate()
.position(|e| e.1 == krate_root)
.expect("must find cargo root in path contained within it");
let path = path_orig.iter().skip(index).collect::<PathBuf>();
FileWithSize {
path: normalized(path),
size: std::fs::metadata(path_orig).unwrap().len(),
}
}
// TODO: understand this R: Read stuff
fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self {
FileWithSize {
path: normalized(entry.path().unwrap().into_owned()),
size: entry.size(),
}
}
}
/// Size difference of a file in the .gz archive and extracted source
#[derive(Debug, Clone)]
pub(crate) struct FileSizeDifference {
path: PathBuf,
size_archive: u64,
size_source: u64,
}
/// The Difference between extracted crate sources and an .crate tar.gz archive
#[derive(Debug, Clone)]
pub(crate) struct Diff {
// the crate we are diffing
krate_name: String,
files_missing_in_checkout: Vec<PathBuf>,
additional_files_in_checkout: Vec<PathBuf>,
files_size_difference: Vec<FileSizeDifference>,
source_path: Option<PathBuf>,
}
impl Diff {
fn new() -> Self {
Self {
krate_name: String::new(),
files_missing_in_checkout: Vec::new(),
additional_files_in_checkout: Vec::new(),
files_size_difference: Vec::new(),
source_path: None,
}
}
/// returns true if there is no diff
fn is_ok(&self) -> bool {
self.files_missing_in_checkout.is_empty()
&& self.additional_files_in_checkout.is_empty()
&& self.files_size_difference.is_empty()
}
pub(crate) fn details(&self) -> String {
let mut s = format!("Crate: {}\n", self.krate_name);
if !self.files_missing_in_checkout.is_empty() {
write!(
s,
"Missing from source:\n{}",
self.files_missing_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.additional_files_in_checkout.is_empty() {
write!(
s,
"Not found in archive/additional:\n{}",
self.additional_files_in_checkout
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<String>>()
.join(", ")
)
.unwrap();
s.push('\n');
}
if !self.files_size_difference.is_empty() {
self.files_size_difference
.iter()
.map(|fsd| {
format!(
"File: {}, size in archive: {}b, size in checkout: {}b\n",
fsd.path.display(),
fsd.size_archive,
fsd.size_source
)
})
.for_each(|strg| s.push_str(&strg));
}
s
}
}
/// take a path to an extracted .crate source and map it to the corresponding .carte archive path
fn | (src_path: &Path) -> PathBuf {
// for each directory, find the path to the corresponding .crate archive
// .cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12
// corresponds to
// .cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate
// reverse, and "pop" the front components
let mut dir = src_path.iter().collect::<Vec<&OsStr>>();
let comp1 = dir.pop().unwrap(); // /bytes-0.4.12
let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823
let _src = dir.pop().unwrap(); // throw this away and add "cache" instead
// reconstruct the fixed path in reverse order
dir.push(OsStr::new("cache"));
dir.push(comp2); // github.com...
// we need to add the .crate extension (path to the gzip archive)
let mut comp1_with_crate_ext = comp1.to_os_string();
comp1_with_crate_ext.push(".crate");
dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate
dir.into_iter().collect::<PathBuf>()
}
/// look into the .gz archive and get all the contained files+sizes
fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> {
let tar_gz = File::open(path).unwrap();
// extract the tar
let tar = GzDecoder::new(tar_gz);
let mut archive = Archive::new(tar);
let archive_files = archive.entries().unwrap();
// println!("files inside the archive");
// archive_files.for_each(|x| println!("{:?}", x.unwrap().path()));
archive_files
.into_iter()
.map(|entry| FileWithSize::from_archive(&entry.unwrap()))
.collect::<Vec<FileWithSize>>()
}
/// get the files and their sizes of the extracted .crate sources
fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> {
let krate_root = source.iter().last().unwrap();
WalkDir::new(source)
.into_iter()
.map(Result::unwrap)
// need to skip directories since the are only implicitly inside the tar (via file paths)
.filter(|de| de.file_type().is_file())
.map(|direntry| {
let p = direntry.path();
p.to_owned()
})
.map(|p| FileWithSize::from_disk(&p, krate_root))
.collect()
}
/// compare files of a .crate gz archive and extracted sources and return a Diff object which describes those changes
fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff {
let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate);
let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source);
let mut diff = Diff::new();
diff.source_path = Some(source.to_path_buf());
diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string();
let files_of_source_paths: Vec<&PathBuf> =
files_of_source.iter().map(|fws| &fws.path).collect();
for archive_file in &files_of_archive {
let archive_f_path = &archive_file.path;
if !files_of_source_paths.contains(&archive_f_path) {
// the file is contaied in the archive but not in the extracted source
diff.files_missing_in_checkout.push(archive_f_path.clone());
} else if files_of_source_paths.contains(&archive_f_path) {
// file is contained in both, but sizes differ
match files_of_source
.iter()
.find(|fws| fws.path == archive_file.path)
{
Some(fws) => {
if fws.size != archive_file.size {
diff.files_size_difference.push(FileSizeDifference {
path: fws.path.clone(),
size_archive: archive_file.size,
size_source: fws.size,
});
}
}
None => unreachable!(), // we already checked this
};
}
}
let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect();
for source_file in files_of_source_paths
.iter()
.filter(|path| path.file_name().unwrap() != ".cargo-ok")
.filter(|path| !path.is_dir() /* skip dirs */)
{
// dbg!(source_file);
#[allow(clippy::implicit_clone)]
if !files_of_archive.iter().any(|path| path == source_file) {
diff.additional_files_in_checkout
.push(source_file.to_path_buf());
}
}
diff
}
pub(crate) fn verify_crates(
registry_sources_caches: &mut registry_sources::RegistrySourceCaches,
) -> Result<(), Vec<Diff>> {
// iterate over all the extracted sources that we have
let bad_sources: Vec<_> = registry_sources_caches
.items()
.par_iter()
// get the paths to the source and the .crate for all extracted crates
.map(|source| (source, map_src_path_to_cache_path(source)))
// we need both the .crate and the directory to exist for verification
.filter(|(source, krate)| source.exists() && krate.exists())
// look into the .gz archive and get all the contained files+sizes
.map(|(source, krate)| diff_crate_and_source(&krate, source))
// | map_src_path_to_cache_path | identifier_name |
load.rs | = fs::read_to_string("site-config.toml") {
toml::from_str(&s)?
} else {
Config {
keys: Keys {
github_api_token: std::env::var("GITHUB_API_TOKEN").ok(),
github_webhook_secret: std::env::var("GITHUB_WEBHOOK_SECRET").ok(),
},
}
};
let master_commits = MasterCommitCache::download().await?;
Ok(Self {
config,
index: ArcSwap::new(Arc::new(index)),
master_commits: Arc::new(ArcSwap::new(Arc::new(master_commits))),
pool,
landing_page: ArcSwap::new(Arc::new(None)),
})
}
pub async fn conn(&self) -> Box<dyn database::pool::Connection> {
self.pool.connection().await
}
/// Returns the not yet tested commits
pub async fn missing_commits(&self) -> Vec<(Commit, MissingReason)> {
let conn = self.conn().await;
let (queued_pr_commits, in_progress_artifacts) =
futures::join!(conn.queued_commits(), conn.in_progress_artifacts());
let master_commits = &self.get_master_commits().commits;
let index = self.index.load();
let all_commits = index
.commits()
.iter()
.map(|commit| commit.sha.clone())
.collect::<HashSet<_>>();
calculate_missing(
master_commits.clone(),
queued_pr_commits,
in_progress_artifacts,
all_commits,
)
}
/// Returns the not yet tested published artifacts, sorted from newest to oldest.
pub async fn missing_published_artifacts(&self) -> anyhow::Result<Vec<String>> {
let artifact_list: String = reqwest::get("https://static.rust-lang.org/manifests.txt")
.await?
.text()
.await?;
lazy_static! {
static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap();
}
let conn = self.conn().await;
let index = self.index.load();
let tested_artifacts: HashSet<_> = index.artifacts().collect();
let in_progress_tagged_artifacts: HashSet<_> = conn
.in_progress_artifacts()
.await
.into_iter()
.filter_map(|artifact| match artifact {
ArtifactId::Commit(_) => None,
ArtifactId::Tag(tag) => Some(tag),
})
.collect();
// Gather at most last 20 published artifacts that are not yet tested and
// are not in progress.
let artifacts: Vec<_> = artifact_list
.lines()
.rev()
.filter_map(parse_published_artifact_tag)
.take(20)
.filter(|artifact| {
!tested_artifacts.contains(artifact.as_str())
&& !in_progress_tagged_artifacts.contains(artifact.as_str())
})
.collect();
Ok(artifacts)
}
pub async fn get_benchmark_category_map(&self) -> HashMap<Benchmark, Category> {
let benchmarks = self.pool.connection().await.get_compile_benchmarks().await;
benchmarks
.into_iter()
.map(|bench| {
(
bench.name.as_str().into(),
Category::from_db_representation(&bench.category).unwrap(),
)
})
.collect()
}
/// Get cached master-branch Rust commits.
/// Returns cached results immediately, but if the cached value is older than one minute,
/// updates in a background task for next time.
pub fn get_master_commits(&self) -> Guard<Arc<MasterCommitCache>> {
let commits = self.master_commits.load();
if commits.updated.elapsed() > std::time::Duration::from_secs(60) {
let master_commits = self.master_commits.clone();
tokio::task::spawn(async move {
// if another update happens before this one is done, we will download the data twice, but that's it
match MasterCommitCache::download().await {
Ok(commits) => master_commits.store(Arc::new(commits)),
Err(e) => {
// couldn't get the data, keep serving cached results for now
error!("error retrieving master commit list: {}", e)
}
}
});
}
commits
}
}
/// Parses an artifact tag like `1.63.0` or `beta-2022-08-19` from a line taken from
/// `https://static.rust-lang.org/manifests.txt`.
fn parse_published_artifact_tag(line: &str) -> Option<String> {
lazy_static! {
static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap();
}
let mut parts = line.rsplit('/');
let name = parts.next();
let date = parts.next();
if let Some(date) = date {
if let Some(name) = name {
// Create beta artifact in the form of beta-YYYY-MM-DD
if name == "channel-rust-beta.toml" {
return Some(format!("beta-{date}"));
} else if let Some(capture) = VERSION_REGEX.captures(name) {
if let Some(version) = capture.get(1).map(|c| c.as_str()) {
return Some(version.to_string());
}
}
}
}
None
}
/// Calculating the missing commits.
fn calculate_missing(
master_commits: Vec<collector::MasterCommit>,
queued_pr_commits: Vec<database::QueuedCommit>,
in_progress_artifacts: Vec<ArtifactId>,
all_commits: HashSet<String>,
) -> Vec<(Commit, MissingReason)> {
calculate_missing_from(
master_commits,
queued_pr_commits,
in_progress_artifacts,
all_commits,
Utc::now(),
)
}
/// Calculate the missing commits filtering out any that are 29 days or older than the supplied time.
///
/// This is used by `calculate_missing` is exists as a separate function for testing purposes.
fn calculate_missing_from(
master_commits: Vec<collector::MasterCommit>,
queued_pr_commits: Vec<database::QueuedCommit>,
in_progress_artifacts: Vec<ArtifactId>,
mut all_commits: HashSet<String>,
time: chrono::DateTime<chrono::Utc>,
) -> Vec<(Commit, MissingReason)> {
let mut queue = master_commits
.into_iter()
.filter(|c| time.signed_duration_since(c.time) < Duration::days(29))
.map(|c| {
(
Commit {
sha: c.sha,
date: Date(c.time),
r#type: CommitType::Master,
},
// All recent master commits should have an associated PR
MissingReason::Master {
pr: c.pr.unwrap_or(0),
parent_sha: c.parent_sha,
is_try_parent: false,
},
)
})
.collect::<Vec<_>>();
let master_commits = queue
.iter()
.map(|(mc, _)| mc.sha.clone())
.collect::<HashSet<_>>();
for database::QueuedCommit {
sha,
parent_sha,
pr,
include,
exclude,
runs,
commit_date,
} in queued_pr_commits
.into_iter()
// filter out any queued PR master commits (leaving only try commits)
.filter(|c| !master_commits.contains(&c.sha))
{
// Mark the parent commit as a try_parent.
if let Some((_, metadata)) = queue.iter_mut().find(|(m, _)| m.sha == parent_sha.as_str()) {
if let MissingReason::Master { is_try_parent, .. } = metadata {
*is_try_parent = true;
} else {
unreachable!("try commit has non-master parent {:?}", metadata);
};
}
queue.push((
Commit {
sha: sha.to_string(),
date: commit_date.unwrap_or(Date::empty()),
r#type: CommitType::Try,
},
MissingReason::Try {
pr,
parent_sha,
include,
exclude,
runs,
},
));
}
for aid in in_progress_artifacts {
match aid {
ArtifactId::Commit(c) => {
let previous = queue
.iter()
.find(|(i, _)| i.sha == c.sha)
.map(|v| Box::new(v.1.clone()));
all_commits.remove(&c.sha);
queue.insert(0, (c, MissingReason::InProgress(previous)));
}
ArtifactId::Tag(_) => {
// do nothing, for now, though eventually we'll want an artifact queue
}
}
}
let mut already_tested = all_commits.clone();
let mut i = 0;
while i != queue.len() {
if !already_tested.insert(queue[i].0.sha.clone()) {
queue.remove(i);
} else {
i += 1;
}
}
sort_queue(all_commits.clone(), queue)
}
fn sort_queue(
mut done: HashSet<String>,
mut unordered_queue: Vec<(Commit, MissingReason)>,
) -> Vec<(Commit, MissingReason)> { | // A topological sort, where each "level" is additionally altered such that
// try commits come first, and then sorted by PR # (as a rough heuristic for
// earlier requests).
| random_line_split | |
load.rs | SiteCtxt` from database url
pub async fn from_db_url(db_url: &str) -> anyhow::Result<Self> {
let pool = Pool::open(db_url);
let mut conn = pool.connection().await;
let index = db::Index::load(&mut *conn).await;
let config = if let Ok(s) = fs::read_to_string("site-config.toml") {
toml::from_str(&s)?
} else {
Config {
keys: Keys {
github_api_token: std::env::var("GITHUB_API_TOKEN").ok(),
github_webhook_secret: std::env::var("GITHUB_WEBHOOK_SECRET").ok(),
},
}
};
let master_commits = MasterCommitCache::download().await?;
Ok(Self {
config,
index: ArcSwap::new(Arc::new(index)),
master_commits: Arc::new(ArcSwap::new(Arc::new(master_commits))),
pool,
landing_page: ArcSwap::new(Arc::new(None)),
})
}
pub async fn conn(&self) -> Box<dyn database::pool::Connection> {
self.pool.connection().await
}
/// Returns the not yet tested commits
pub async fn missing_commits(&self) -> Vec<(Commit, MissingReason)> {
let conn = self.conn().await;
let (queued_pr_commits, in_progress_artifacts) =
futures::join!(conn.queued_commits(), conn.in_progress_artifacts());
let master_commits = &self.get_master_commits().commits;
let index = self.index.load();
let all_commits = index
.commits()
.iter()
.map(|commit| commit.sha.clone())
.collect::<HashSet<_>>();
calculate_missing(
master_commits.clone(),
queued_pr_commits,
in_progress_artifacts,
all_commits,
)
}
/// Returns the not yet tested published artifacts, sorted from newest to oldest.
pub async fn missing_published_artifacts(&self) -> anyhow::Result<Vec<String>> {
let artifact_list: String = reqwest::get("https://static.rust-lang.org/manifests.txt")
.await?
.text()
.await?;
lazy_static! {
static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap();
}
let conn = self.conn().await;
let index = self.index.load();
let tested_artifacts: HashSet<_> = index.artifacts().collect();
let in_progress_tagged_artifacts: HashSet<_> = conn
.in_progress_artifacts()
.await
.into_iter()
.filter_map(|artifact| match artifact {
ArtifactId::Commit(_) => None,
ArtifactId::Tag(tag) => Some(tag),
})
.collect();
// Gather at most last 20 published artifacts that are not yet tested and
// are not in progress.
let artifacts: Vec<_> = artifact_list
.lines()
.rev()
.filter_map(parse_published_artifact_tag)
.take(20)
.filter(|artifact| {
!tested_artifacts.contains(artifact.as_str())
&& !in_progress_tagged_artifacts.contains(artifact.as_str())
})
.collect();
Ok(artifacts)
}
pub async fn get_benchmark_category_map(&self) -> HashMap<Benchmark, Category> {
let benchmarks = self.pool.connection().await.get_compile_benchmarks().await;
benchmarks
.into_iter()
.map(|bench| {
(
bench.name.as_str().into(),
Category::from_db_representation(&bench.category).unwrap(),
)
})
.collect()
}
/// Get cached master-branch Rust commits.
/// Returns cached results immediately, but if the cached value is older than one minute,
/// updates in a background task for next time.
pub fn get_master_commits(&self) -> Guard<Arc<MasterCommitCache>> {
let commits = self.master_commits.load();
if commits.updated.elapsed() > std::time::Duration::from_secs(60) {
let master_commits = self.master_commits.clone();
tokio::task::spawn(async move {
// if another update happens before this one is done, we will download the data twice, but that's it
match MasterCommitCache::download().await {
Ok(commits) => master_commits.store(Arc::new(commits)),
Err(e) => {
// couldn't get the data, keep serving cached results for now
error!("error retrieving master commit list: {}", e)
}
}
});
}
commits
}
}
/// Parses an artifact tag like `1.63.0` or `beta-2022-08-19` from a line taken from
/// `https://static.rust-lang.org/manifests.txt`.
fn parse_published_artifact_tag(line: &str) -> Option<String> {
lazy_static! {
static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap();
}
let mut parts = line.rsplit('/');
let name = parts.next();
let date = parts.next();
if let Some(date) = date {
if let Some(name) = name {
// Create beta artifact in the form of beta-YYYY-MM-DD
if name == "channel-rust-beta.toml" {
return Some(format!("beta-{date}"));
} else if let Some(capture) = VERSION_REGEX.captures(name) {
if let Some(version) = capture.get(1).map(|c| c.as_str()) {
return Some(version.to_string());
}
}
}
}
None
}
/// Calculating the missing commits.
fn calculate_missing(
master_commits: Vec<collector::MasterCommit>,
queued_pr_commits: Vec<database::QueuedCommit>,
in_progress_artifacts: Vec<ArtifactId>,
all_commits: HashSet<String>,
) -> Vec<(Commit, MissingReason)> {
calculate_missing_from(
master_commits,
queued_pr_commits,
in_progress_artifacts,
all_commits,
Utc::now(),
)
}
/// Calculate the missing commits filtering out any that are 29 days or older than the supplied time.
///
/// This is used by `calculate_missing` is exists as a separate function for testing purposes.
fn calculate_missing_from(
master_commits: Vec<collector::MasterCommit>,
queued_pr_commits: Vec<database::QueuedCommit>,
in_progress_artifacts: Vec<ArtifactId>,
mut all_commits: HashSet<String>,
time: chrono::DateTime<chrono::Utc>,
) -> Vec<(Commit, MissingReason)> {
let mut queue = master_commits
.into_iter()
.filter(|c| time.signed_duration_since(c.time) < Duration::days(29))
.map(|c| {
(
Commit {
sha: c.sha,
date: Date(c.time),
r#type: CommitType::Master,
},
// All recent master commits should have an associated PR
MissingReason::Master {
pr: c.pr.unwrap_or(0),
parent_sha: c.parent_sha,
is_try_parent: false,
},
)
})
.collect::<Vec<_>>();
let master_commits = queue
.iter()
.map(|(mc, _)| mc.sha.clone())
.collect::<HashSet<_>>();
for database::QueuedCommit {
sha,
parent_sha,
pr,
include,
exclude,
runs,
commit_date,
} in queued_pr_commits
.into_iter()
// filter out any queued PR master commits (leaving only try commits)
.filter(|c| !master_commits.contains(&c.sha))
{
// Mark the parent commit as a try_parent.
if let Some((_, metadata)) = queue.iter_mut().find(|(m, _)| m.sha == parent_sha.as_str()) {
if let MissingReason::Master { is_try_parent, .. } = metadata {
*is_try_parent = true;
} else {
unreachable!("try commit has non-master parent {:?}", metadata);
};
}
queue.push((
Commit {
sha: sha.to_string(),
date: commit_date.unwrap_or(Date::empty()),
r#type: CommitType::Try,
},
MissingReason::Try {
pr,
parent_sha,
include,
exclude,
runs,
},
));
}
for aid in in_progress_artifacts {
match aid {
ArtifactId::Commit(c) => {
let previous = queue
.iter()
.find(|(i, _)| i.sha == c.sha)
.map(|v| Box::new(v.1.clone()));
all_commits.remove(&c.sha);
queue.insert(0, (c, MissingReason::InProgress(previous)));
}
ArtifactId::Tag(_) => |
}
}
let mut already_tested = all_commits.clone();
let mut i = 0;
while i != queue.len() {
if !already_tested.insert(queue[i].0.sha.clone()) {
queue.remove(i);
} else {
i += 1;
}
}
sort_queue(all_commits.clone(), queue)
}
fn sort | {
// do nothing, for now, though eventually we'll want an artifact queue
} | conditional_block |
load.rs | (
// InProgress MR go first (false < true)
mr.parent_sha().is_some(),
mr.pr().unwrap_or(0),
c.sha.clone(),
)
});
for (c, _) in level {
done.insert(c.sha.clone());
}
finished += level_len;
}
unordered_queue
}
// Copy of Iterator::partition_in_place, which is currently unstable.
fn partition_in_place<'a, I, T: 'a, P>(mut iter: I, mut predicate: P) -> usize
where
I: Sized + DoubleEndedIterator<Item = &'a mut T>,
P: FnMut(&T) -> bool,
{
// FIXME: should we worry about the count overflowing? The only way to have more than
// `usize::MAX` mutable references is with ZSTs, which aren't useful to partition...
// These closure "factory" functions exist to avoid genericity in `Self`.
#[inline]
fn is_false<'a, T>(
predicate: &'a mut impl FnMut(&T) -> bool,
true_count: &'a mut usize,
) -> impl FnMut(&&mut T) -> bool + 'a {
move |x| {
let p = predicate(&**x);
*true_count += p as usize;
!p
}
}
#[inline]
fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ {
move |x| predicate(&**x)
}
// Repeatedly find the first `false` and swap it with the last `true`.
let mut true_count = 0;
while let Some(head) = iter.find(is_false(&mut predicate, &mut true_count)) {
if let Some(tail) = iter.rfind(is_true(&mut predicate)) {
std::mem::swap(head, tail);
true_count += 1;
} else {
break;
}
}
true_count
}
/// One decimal place rounded percent
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct Percent(#[serde(with = "collector::round_float")] pub f64);
#[cfg(test)]
mod tests {
use std::str::FromStr;
use collector::MasterCommit;
use database::QueuedCommit;
use super::*;
// Checks that when we have a setup like the following, where a -> b means b
// is the parent of a (i.e., must be tested before we can report comparison
// results for a):
//
// a -> b
// -> try-on-a
//
// the resulting ordering is:
//
// b
// a
// try-on-a
//
// which ensures that as each commit finishes, we have the results for it.
//
// Note that try-on-a does *not* have a direct dependency on b's results
// being available; we could order b after ([a, try-on-a, b]) but this means
// that we have to be more careful about posting comparison results, and to
// most observers they expect those posted as soon as the PR's build in the
// queue finishes: not doing so will look odd to onlookers.
#[test]
fn try_commit_ancestors() {
let time = chrono::DateTime::from_str("2021-09-01T00:00:00.000Z").unwrap();
let master_commits = vec![
MasterCommit {
sha: "a".into(),
parent_sha: "b".into(),
pr: Some(2),
time,
},
MasterCommit {
sha: "b".into(),
parent_sha: "c".into(),
pr: Some(1),
time,
},
];
let queued_pr_commits = vec![
QueuedCommit {
sha: "try-on-a".into(),
parent_sha: "a".into(),
pr: 3,
include: None,
exclude: None,
runs: None,
commit_date: None,
},
QueuedCommit {
sha: "b".into(),
parent_sha: "c".into(),
pr: 1,
include: None,
exclude: None,
runs: None,
commit_date: None,
},
QueuedCommit {
sha: "a".into(),
parent_sha: "b".into(),
pr: 2,
include: None,
exclude: None,
runs: None,
commit_date: None,
},
];
let in_progress_artifacts = vec![];
let mut all_commits = HashSet::new();
all_commits.insert("c".into());
let expected = vec![
(
Commit {
sha: "b".into(),
date: database::Date(time),
r#type: CommitType::Master,
},
MissingReason::Master {
pr: 1,
parent_sha: "c".into(),
is_try_parent: false,
},
),
(
Commit {
sha: "a".into(),
date: database::Date(time),
r#type: CommitType::Master,
},
MissingReason::Master {
pr: 2,
parent_sha: "b".into(),
is_try_parent: true,
},
),
(
Commit {
sha: "try-on-a".into(),
date: database::Date(time),
r#type: CommitType::Try,
},
MissingReason::Try {
pr: 3,
parent_sha: "a".into(),
include: None,
exclude: None,
runs: None,
},
),
];
let found = calculate_missing_from(
master_commits,
queued_pr_commits,
in_progress_artifacts,
all_commits,
time,
);
assert_eq!(expected, found, "{:#?} != {:#?}", expected, found);
}
#[test]
fn calculates_missing_correct() {
let time = chrono::DateTime::from_str("2021-09-01T00:00:00.000Z").unwrap();
let master_commits = vec![
// A not yet tested commit
MasterCommit {
sha: "123".into(),
parent_sha: "345".into(),
pr: Some(11),
time,
},
// An already tested commit
MasterCommit {
sha: "abc".into(),
parent_sha: "def".into(),
pr: Some(90),
time,
},
// A queued PR commit
MasterCommit {
sha: "foo".into(),
parent_sha: "bar".into(),
pr: Some(77),
time,
},
];
let queued_pr_commits = vec![
// A master commit
QueuedCommit {
sha: "foo".into(),
parent_sha: "bar".into(),
pr: 77,
include: None,
exclude: None,
runs: None,
commit_date: None,
},
// A try run
QueuedCommit {
sha: "baz".into(),
parent_sha: "foo".into(),
pr: 101,
include: None,
exclude: None,
runs: None,
commit_date: None,
},
];
let in_progress_artifacts = vec![];
let mut all_commits = HashSet::new();
all_commits.insert(master_commits[1].sha.clone());
// Parent trailers
all_commits.insert(master_commits[0].parent_sha.clone());
all_commits.insert(master_commits[1].parent_sha.clone());
all_commits.insert(master_commits[2].parent_sha.clone());
let expected = vec![
(
Commit {
sha: "123".into(),
date: database::Date(time),
r#type: CommitType::Master,
},
MissingReason::Master {
pr: 11,
parent_sha: "345".into(),
is_try_parent: false,
},
),
(
Commit {
sha: "foo".into(),
date: database::Date(time),
r#type: CommitType::Master,
},
MissingReason::Master {
pr: 77,
parent_sha: "bar".into(),
is_try_parent: true,
},
),
(
Commit {
sha: "baz".into(),
date: database::Date(time),
r#type: CommitType::Try,
},
MissingReason::Try {
pr: 101,
parent_sha: "foo".into(),
include: None,
exclude: None,
runs: None,
},
),
];
assert_eq!(
expected,
calculate_missing_from(
master_commits,
queued_pr_commits,
in_progress_artifacts,
all_commits,
time
)
);
}
#[test]
fn parse_published_beta_artifact() | {
assert_eq!(
parse_published_artifact_tag(
"static.rust-lang.org/dist/2022-08-15/channel-rust-beta.toml"
),
Some("beta-2022-08-15".to_string())
);
} | identifier_body | |
load.rs | {
pub sha: String,
pub parent_sha: String,
}
impl TryCommit {
pub fn sha(&self) -> &str {
self.sha.as_str()
}
pub fn comparison_url(&self) -> String {
format!(
"https://perf.rust-lang.org/compare.html?start={}&end={}",
self.parent_sha, self.sha
)
}
}
/// Keys for accessing various services
///
/// At the moment only used for accessing GitHub
#[derive(Debug, Default, Deserialize)]
pub struct Keys {
/// GitHub API token from the `GITHUB_API_TOKEN` env variable
#[serde(rename = "github")]
pub github_api_token: Option<String>,
/// GitHub webhook secret from the `GITHUB_WEBHOOK_SECRET` env variable
#[serde(rename = "secret")]
pub github_webhook_secret: Option<String>,
}
/// Site configuration
#[derive(Debug, Deserialize)]
pub struct Config {
pub keys: Keys,
}
#[derive(Debug)]
pub struct MasterCommitCache {
pub commits: Vec<MasterCommit>,
pub updated: Instant,
}
impl MasterCommitCache {
/// Download the master-branch Rust commit list
pub async fn download() -> anyhow::Result<Self> {
let commits = collector::master_commits().await?;
Ok(Self {
commits,
updated: Instant::now(),
})
}
}
/// Site context object that contains global data
pub struct SiteCtxt {
/// Site configuration
pub config: Config,
/// Cached site landing page
pub landing_page: ArcSwap<Option<Arc<crate::api::graphs::Response>>>,
/// Index of various common queries
pub index: ArcSwap<crate::db::Index>,
/// Cached master-branch Rust commits
pub master_commits: Arc<ArcSwap<MasterCommitCache>>, // outer Arc enables mutation in background task
/// Database connection pool
pub pool: Pool,
}
impl SiteCtxt {
pub fn summary_scenarios(&self) -> Vec<crate::db::Scenario> {
vec![
crate::db::Scenario::Empty,
crate::db::Scenario::IncrementalEmpty,
crate::db::Scenario::IncrementalFresh,
crate::db::Scenario::IncrementalPatch("println".into()),
]
}
pub fn artifact_id_for_bound(&self, query: Bound, is_left: bool) -> Option<ArtifactId> {
crate::selector::artifact_id_for_bound(&self.index.load(), query, is_left)
}
pub fn data_range(&self, range: RangeInclusive<Bound>) -> Vec<Commit> {
crate::selector::range_subset(self.index.load().commits(), range)
}
/// Initialize `SiteCtxt` from database url
pub async fn from_db_url(db_url: &str) -> anyhow::Result<Self> {
let pool = Pool::open(db_url);
let mut conn = pool.connection().await;
let index = db::Index::load(&mut *conn).await;
let config = if let Ok(s) = fs::read_to_string("site-config.toml") {
toml::from_str(&s)?
} else {
Config {
keys: Keys {
github_api_token: std::env::var("GITHUB_API_TOKEN").ok(),
github_webhook_secret: std::env::var("GITHUB_WEBHOOK_SECRET").ok(),
},
}
};
let master_commits = MasterCommitCache::download().await?;
Ok(Self {
config,
index: ArcSwap::new(Arc::new(index)),
master_commits: Arc::new(ArcSwap::new(Arc::new(master_commits))),
pool,
landing_page: ArcSwap::new(Arc::new(None)),
})
}
pub async fn conn(&self) -> Box<dyn database::pool::Connection> {
self.pool.connection().await
}
/// Returns the not yet tested commits
pub async fn missing_commits(&self) -> Vec<(Commit, MissingReason)> {
let conn = self.conn().await;
let (queued_pr_commits, in_progress_artifacts) =
futures::join!(conn.queued_commits(), conn.in_progress_artifacts());
let master_commits = &self.get_master_commits().commits;
let index = self.index.load();
let all_commits = index
.commits()
.iter()
.map(|commit| commit.sha.clone())
.collect::<HashSet<_>>();
calculate_missing(
master_commits.clone(),
queued_pr_commits,
in_progress_artifacts,
all_commits,
)
}
/// Returns the not yet tested published artifacts, sorted from newest to oldest.
pub async fn missing_published_artifacts(&self) -> anyhow::Result<Vec<String>> {
let artifact_list: String = reqwest::get("https://static.rust-lang.org/manifests.txt")
.await?
.text()
.await?;
lazy_static! {
static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap();
}
let conn = self.conn().await;
let index = self.index.load();
let tested_artifacts: HashSet<_> = index.artifacts().collect();
let in_progress_tagged_artifacts: HashSet<_> = conn
.in_progress_artifacts()
.await
.into_iter()
.filter_map(|artifact| match artifact {
ArtifactId::Commit(_) => None,
ArtifactId::Tag(tag) => Some(tag),
})
.collect();
// Gather at most last 20 published artifacts that are not yet tested and
// are not in progress.
let artifacts: Vec<_> = artifact_list
.lines()
.rev()
.filter_map(parse_published_artifact_tag)
.take(20)
.filter(|artifact| {
!tested_artifacts.contains(artifact.as_str())
&& !in_progress_tagged_artifacts.contains(artifact.as_str())
})
.collect();
Ok(artifacts)
}
pub async fn get_benchmark_category_map(&self) -> HashMap<Benchmark, Category> {
let benchmarks = self.pool.connection().await.get_compile_benchmarks().await;
benchmarks
.into_iter()
.map(|bench| {
(
bench.name.as_str().into(),
Category::from_db_representation(&bench.category).unwrap(),
)
})
.collect()
}
/// Get cached master-branch Rust commits.
/// Returns cached results immediately, but if the cached value is older than one minute,
/// updates in a background task for next time.
pub fn get_master_commits(&self) -> Guard<Arc<MasterCommitCache>> {
let commits = self.master_commits.load();
if commits.updated.elapsed() > std::time::Duration::from_secs(60) {
let master_commits = self.master_commits.clone();
tokio::task::spawn(async move {
// if another update happens before this one is done, we will download the data twice, but that's it
match MasterCommitCache::download().await {
Ok(commits) => master_commits.store(Arc::new(commits)),
Err(e) => {
// couldn't get the data, keep serving cached results for now
error!("error retrieving master commit list: {}", e)
}
}
});
}
commits
}
}
/// Parses an artifact tag like `1.63.0` or `beta-2022-08-19` from a line taken from
/// `https://static.rust-lang.org/manifests.txt`.
fn parse_published_artifact_tag(line: &str) -> Option<String> {
lazy_static! {
static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap();
}
let mut parts = line.rsplit('/');
let name = parts.next();
let date = parts.next();
if let Some(date) = date {
if let Some(name) = name {
// Create beta artifact in the form of beta-YYYY-MM-DD
if name == "channel-rust-beta.toml" {
return Some(format!("beta-{date}"));
} else if let Some(capture) = VERSION_REGEX.captures(name) {
if let Some(version) = capture.get(1).map(|c| c.as_str()) {
return Some(version.to_string());
}
}
}
}
None
}
/// Calculating the missing commits.
fn calculate_missing(
master_commits: Vec<collector::MasterCommit>,
queued_pr_commits: Vec<database::QueuedCommit>,
in_progress_artifacts: Vec<ArtifactId>,
all_commits: HashSet<String>,
) -> Vec<(Commit, MissingReason)> {
calculate_missing_from(
master_commits,
queued_pr_commits,
in_progress_artifacts,
all_commits,
Utc::now(),
)
}
/// Calculate the missing commits filtering out any that are 29 days or older than the supplied time.
///
/// This is used by `calculate_missing` is exists as a separate function for testing purposes.
fn calculate_missing_from(
master_commits: Vec<collector::MasterCommit>,
queued_pr_commits: Vec<database::QueuedCommit>,
in_progress_artifacts: Vec<ArtifactId>,
mut all_commits: HashSet<String>,
time: chrono::DateTime<chrono::Utc>,
) -> Vec<(Commit, MissingReason)> {
let mut | TryCommit | identifier_name | |
create_cluster.go | region, kubernetesVersion, pkeVersion string, pkeImageNameGetter PKEImageNameGetter) (string, error) {
kubeVersion, err := semver.NewVersion(kubernetesVersion)
if err != nil {
return "", errors.WithDetails(err, "could not create semver from Kubernetes version", "kubernetesVersion", kubernetesVersion)
}
_ = kubeVersion
if pkeImageNameGetter != nil {
ami, err := pkeImageNameGetter.PKEImageName("amazon", "pke", "ubuntu", kubeVersion.String(), pkeVersion, region)
if err != nil {
// fail silently
}
if ami != "" {
return ami, nil
}
}
// PKE 0.4.19; K8s 1.13.10; OS Ubuntu
return map[string]string{
"ap-east-1": "ami-0ca8206236662e9ea", // Asia Pacific (Hong Kong).
"ap-northeast-1": "ami-029f1fff7d250aa95", // Asia Pacific (Tokyo).
"ap-northeast-2": "ami-0b2ea3e1fb7e0a0dc", // Asia Pacific (Seoul).
"ap-southeast-1": "ami-00d5d224c11f12854", // Asia Pacific (Singapore).
"ap-southeast-2": "ami-03ad7f293fb551d91", // Asia Pacific (Sydney).
"ap-south-1": "ami-03f5be5363911cfd7", // Asia Pacific (Mumbai).
"ca-central-1": "ami-0f45e7a3348941cd0", // Canada (Central).
"eu-central-1": "ami-01a9d881b5eef8c78", // EU (Frankfurt).
"eu-north-1": "ami-0152ce8be8bcc0c50", // EU (Stockholm).
"eu-west-1": "ami-0284019fcb7ca3121", // EU (Ireland).
"eu-west-2": "ami-0b4c70c59b14d97ba", // EU (London).
"eu-west-3": "ami-084d0cc1bce975f4b", // EU (Paris).
"me-south-1": "ami-0bab0d9b6142d8674", // Middle East (Bahrain).
"sa-east-1": "ami-025c97e09ba50cb05", // South America (Sao Paulo).
"us-east-1": "ami-0980e83ac34bbc3bb", // US East (N. Virginia).
"us-east-2": "ami-07d087f4be161fa72", // US East (Ohio).
"us-west-1": "ami-0c8ea5996aa18c15d", // US West (N. California).
"us-west-2": "ami-0d441df4104cb772b", // US West (Oregon).
}[region], nil
}
type TokenGenerator interface {
GenerateClusterToken(orgID, clusterID uint) (string, string, error)
}
type CreateClusterWorkflowInput struct {
OrganizationID uint
ClusterID uint
ClusterUID string
ClusterName string
SecretID string
Region string
PipelineExternalURL string
PipelineExternalURLInsecure bool
OIDCEnabled bool
VPCID string
SubnetID string
}
type CreateClusterWorkflow struct {
GlobalRegion string
}
func (w CreateClusterWorkflow) Execute(ctx workflow.Context, input CreateClusterWorkflowInput) error {
ao := workflow.ActivityOptions{
ScheduleToStartTimeout: 10 * time.Minute,
StartToCloseTimeout: 20 * time.Minute,
WaitForCancellation: true,
}
ctx = workflow.WithActivityOptions(ctx, ao)
// Generate CA certificates
{
activityInput := GenerateCertificatesActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, GenerateCertificatesActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
// Generic AWS activity input
awsActivityInput := AWSActivityInput{
OrganizationID: input.OrganizationID,
SecretID: input.SecretID,
Region: input.Region,
}
var rolesStackID string
// Create AWS roles
{
activityInput := CreateAWSRolesActivityInput{AWSActivityInput: awsActivityInput, ClusterID: input.ClusterID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, CreateAWSRolesActivityName, activityInput).Get(ctx, &rolesStackID)
if err != nil {
return err
}
}
var rolesOutput map[string]string
// Wait for roles
{
if rolesStackID == "" {
return errors.New("missing AWS role stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: rolesStackID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &rolesOutput)
if err != nil {
return err
}
}
var vpcStackID string
// Create VPC
{
activityInput := CreateVPCActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
ClusterName: input.ClusterName,
VPCID: input.VPCID,
SubnetID: input.SubnetID,
}
err := workflow.ExecuteActivity(ctx, CreateVPCActivityName, activityInput).Get(ctx, &vpcStackID)
if err != nil {
return err
}
}
var vpcOutput map[string]string
// Wait for VPC
{
if vpcStackID == "" {
return errors.New("missing VPC stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: vpcStackID}
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &vpcOutput)
if err != nil {
return err
}
}
// Get default security group of the VPC
var vpcDefaultSecurityGroupID string
activityInput := GetVpcDefaultSecurityGroupActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
VpcID: vpcOutput["VpcId"],
}
err := workflow.ExecuteActivity(ctx, GetVpcDefaultSecurityGroupActivityName, activityInput).Get(ctx, &vpcDefaultSecurityGroupID)
if err != nil {
return err
}
if vpcDefaultSecurityGroupID == "" {
return errors.Errorf("couldn't get the default security group of the VPC %q", vpcOutput["VpcId"])
}
var nodePools []NodePool
// List node pools
{
activityInput := ListNodePoolsActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, ListNodePoolsActivityName, activityInput).Get(ctx, &nodePools)
if err != nil {
return err
}
}
var master NodePool
for _, np := range nodePools {
if np.Master {
master = np
if len(np.AvailabilityZones) <= 0 || np.AvailabilityZones[0] == "" {
return errors.Errorf("missing availability zone for nodepool %q", np.Name)
}
break
}
}
var keyOut UploadSSHKeyPairActivityOutput
// Upload SSH key pair
{
activityInput := UploadSSHKeyPairActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, UploadSSHKeyPairActivityName, activityInput).Get(ctx, &keyOut)
if err != nil {
return err
}
}
// Create dex client for the cluster
if input.OIDCEnabled {
activityInput := CreateDexClientActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, CreateDexClientActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
var externalAddress string
multiMaster := master.MaxCount > 1
masterNodeSubnetID := strings.Split | etDefaultImageID( | identifier_name | |
create_cluster.go | 1.13.10; OS Ubuntu
return map[string]string{
"ap-east-1": "ami-0ca8206236662e9ea", // Asia Pacific (Hong Kong).
"ap-northeast-1": "ami-029f1fff7d250aa95", // Asia Pacific (Tokyo).
"ap-northeast-2": "ami-0b2ea3e1fb7e0a0dc", // Asia Pacific (Seoul).
"ap-southeast-1": "ami-00d5d224c11f12854", // Asia Pacific (Singapore).
"ap-southeast-2": "ami-03ad7f293fb551d91", // Asia Pacific (Sydney).
"ap-south-1": "ami-03f5be5363911cfd7", // Asia Pacific (Mumbai).
"ca-central-1": "ami-0f45e7a3348941cd0", // Canada (Central).
"eu-central-1": "ami-01a9d881b5eef8c78", // EU (Frankfurt).
"eu-north-1": "ami-0152ce8be8bcc0c50", // EU (Stockholm).
"eu-west-1": "ami-0284019fcb7ca3121", // EU (Ireland).
"eu-west-2": "ami-0b4c70c59b14d97ba", // EU (London).
"eu-west-3": "ami-084d0cc1bce975f4b", // EU (Paris).
"me-south-1": "ami-0bab0d9b6142d8674", // Middle East (Bahrain).
"sa-east-1": "ami-025c97e09ba50cb05", // South America (Sao Paulo).
"us-east-1": "ami-0980e83ac34bbc3bb", // US East (N. Virginia).
"us-east-2": "ami-07d087f4be161fa72", // US East (Ohio).
"us-west-1": "ami-0c8ea5996aa18c15d", // US West (N. California).
"us-west-2": "ami-0d441df4104cb772b", // US West (Oregon).
}[region], nil
}
type TokenGenerator interface {
GenerateClusterToken(orgID, clusterID uint) (string, string, error)
}
type CreateClusterWorkflowInput struct {
OrganizationID uint
ClusterID uint
ClusterUID string
ClusterName string
SecretID string
Region string
PipelineExternalURL string
PipelineExternalURLInsecure bool
OIDCEnabled bool
VPCID string
SubnetID string
}
type CreateClusterWorkflow struct { | GlobalRegion string
}
func (w CreateClusterWorkflow) Execute(ctx workflow.Context, input CreateClusterWorkflowInput) error {
ao := workflow.ActivityOptions{
ScheduleToStartTimeout: 10 * time.Minute,
StartToCloseTimeout: 20 * time.Minute,
WaitForCancellation: true,
}
ctx = workflow.WithActivityOptions(ctx, ao)
// Generate CA certificates
{
activityInput := GenerateCertificatesActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, GenerateCertificatesActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
// Generic AWS activity input
awsActivityInput := AWSActivityInput{
OrganizationID: input.OrganizationID,
SecretID: input.SecretID,
Region: input.Region,
}
var rolesStackID string
// Create AWS roles
{
activityInput := CreateAWSRolesActivityInput{AWSActivityInput: awsActivityInput, ClusterID: input.ClusterID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, CreateAWSRolesActivityName, activityInput).Get(ctx, &rolesStackID)
if err != nil {
return err
}
}
var rolesOutput map[string]string
// Wait for roles
{
if rolesStackID == "" {
return errors.New("missing AWS role stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: rolesStackID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &rolesOutput)
if err != nil {
return err
}
}
var vpcStackID string
// Create VPC
{
activityInput := CreateVPCActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
ClusterName: input.ClusterName,
VPCID: input.VPCID,
SubnetID: input.SubnetID,
}
err := workflow.ExecuteActivity(ctx, CreateVPCActivityName, activityInput).Get(ctx, &vpcStackID)
if err != nil {
return err
}
}
var vpcOutput map[string]string
// Wait for VPC
{
if vpcStackID == "" {
return errors.New("missing VPC stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: vpcStackID}
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &vpcOutput)
if err != nil {
return err
}
}
// Get default security group of the VPC
var vpcDefaultSecurityGroupID string
activityInput := GetVpcDefaultSecurityGroupActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
VpcID: vpcOutput["VpcId"],
}
err := workflow.ExecuteActivity(ctx, GetVpcDefaultSecurityGroupActivityName, activityInput).Get(ctx, &vpcDefaultSecurityGroupID)
if err != nil {
return err
}
if vpcDefaultSecurityGroupID == "" {
return errors.Errorf("couldn't get the default security group of the VPC %q", vpcOutput["VpcId"])
}
var nodePools []NodePool
// List node pools
{
activityInput := ListNodePoolsActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, ListNodePoolsActivityName, activityInput).Get(ctx, &nodePools)
if err != nil {
return err
}
}
var master NodePool
for _, np := range nodePools {
if np.Master {
master = np
if len(np.AvailabilityZones) <= 0 || np.AvailabilityZones[0] == "" {
return errors.Errorf("missing availability zone for nodepool %q", np.Name)
}
break
}
}
var keyOut UploadSSHKeyPairActivityOutput
// Upload SSH key pair
{
activityInput := UploadSSHKeyPairActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, UploadSSHKeyPairActivityName, activityInput).Get(ctx, &keyOut)
if err != nil {
return err
}
}
// Create dex client for the cluster
if input.OIDCEnabled {
activityInput := CreateDexClientActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, CreateDexClientActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
var externalAddress string
multiMaster := master.MaxCount > 1
masterNodeSubnetID := strings.Split(vpcOutput["SubnetIds"], ",")[0]
if len(master.Subnets) > 0 {
masterNodeSubnetID = master.Subnets[0]
}
masterInput := CreateMasterActivityInput{
ClusterID: input.ClusterID,
VPCID: vpcOutput["VpcId"],
VPCDefaultSecurityGroupID: vpcDefaultSecurityGroupID,
SubnetID: masterNodeSubnetID,
MultiMaster: multiMaster,
MasterInstanceProfile: rolesOutput["MasterInstanceProfile"],
ExternalBaseUrl: input.PipelineExternalURL,
ExternalBaseUrlInsecure: input.PipelineExternalURLInsecure,
Pool: master,
SSHKeyName: keyOut.KeyName,
AvailabilityZone: master.AvailabilityZones | random_line_split | |
create_cluster.go | "ap-northeast-1": "ami-029f1fff7d250aa95", // Asia Pacific (Tokyo).
"ap-northeast-2": "ami-0b2ea3e1fb7e0a0dc", // Asia Pacific (Seoul).
"ap-southeast-1": "ami-00d5d224c11f12854", // Asia Pacific (Singapore).
"ap-southeast-2": "ami-03ad7f293fb551d91", // Asia Pacific (Sydney).
"ap-south-1": "ami-03f5be5363911cfd7", // Asia Pacific (Mumbai).
"ca-central-1": "ami-0f45e7a3348941cd0", // Canada (Central).
"eu-central-1": "ami-01a9d881b5eef8c78", // EU (Frankfurt).
"eu-north-1": "ami-0152ce8be8bcc0c50", // EU (Stockholm).
"eu-west-1": "ami-0284019fcb7ca3121", // EU (Ireland).
"eu-west-2": "ami-0b4c70c59b14d97ba", // EU (London).
"eu-west-3": "ami-084d0cc1bce975f4b", // EU (Paris).
"me-south-1": "ami-0bab0d9b6142d8674", // Middle East (Bahrain).
"sa-east-1": "ami-025c97e09ba50cb05", // South America (Sao Paulo).
"us-east-1": "ami-0980e83ac34bbc3bb", // US East (N. Virginia).
"us-east-2": "ami-07d087f4be161fa72", // US East (Ohio).
"us-west-1": "ami-0c8ea5996aa18c15d", // US West (N. California).
"us-west-2": "ami-0d441df4104cb772b", // US West (Oregon).
}[region], nil
}
type TokenGenerator interface {
GenerateClusterToken(orgID, clusterID uint) (string, string, error)
}
type CreateClusterWorkflowInput struct {
OrganizationID uint
ClusterID uint
ClusterUID string
ClusterName string
SecretID string
Region string
PipelineExternalURL string
PipelineExternalURLInsecure bool
OIDCEnabled bool
VPCID string
SubnetID string
}
type CreateClusterWorkflow struct {
GlobalRegion string
}
func (w CreateClusterWorkflow) Execute(ctx workflow.Context, input CreateClusterWorkflowInput) error {
ao := workflow.ActivityOptions{
ScheduleToStartTimeout: 10 * time.Minute,
StartToCloseTimeout: 20 * time.Minute,
WaitForCancellation: true,
}
ctx = workflow.WithActivityOptions(ctx, ao)
// Generate CA certificates
{
activityInput := GenerateCertificatesActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, GenerateCertificatesActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
// Generic AWS activity input
awsActivityInput := AWSActivityInput{
OrganizationID: input.OrganizationID,
SecretID: input.SecretID,
Region: input.Region,
}
var rolesStackID string
// Create AWS roles
{
activityInput := CreateAWSRolesActivityInput{AWSActivityInput: awsActivityInput, ClusterID: input.ClusterID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, CreateAWSRolesActivityName, activityInput).Get(ctx, &rolesStackID)
if err != nil {
return err
}
}
var rolesOutput map[string]string
// Wait for roles
{
if rolesStackID == "" {
return errors.New("missing AWS role stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: rolesStackID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &rolesOutput)
if err != nil {
return err
}
}
var vpcStackID string
// Create VPC
{
activityInput := CreateVPCActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
ClusterName: input.ClusterName,
VPCID: input.VPCID,
SubnetID: input.SubnetID,
}
err := workflow.ExecuteActivity(ctx, CreateVPCActivityName, activityInput).Get(ctx, &vpcStackID)
if err != nil {
return err
}
}
var vpcOutput map[string]string
// Wait for VPC
{
if vpcStackID == "" {
return errors.New("missing VPC stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: vpcStackID}
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &vpcOutput)
if err != nil {
return err
}
}
// Get default security group of the VPC
var vpcDefaultSecurityGroupID string
activityInput := GetVpcDefaultSecurityGroupActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
VpcID: vpcOutput["VpcId"],
}
err := workflow.ExecuteActivity(ctx, GetVpcDefaultSecurityGroupActivityName, activityInput).Get(ctx, &vpcDefaultSecurityGroupID)
if err != nil {
return err
}
if vpcDefaultSecurityGroupID == "" {
return errors.Errorf("couldn't get the default security group of the VPC %q", vpcOutput["VpcId"])
}
var nodePools []NodePool
// List node pools
{
activityInput := ListNodePoolsActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, ListNodePoolsActivityName, activityInput).Get(ctx, &nodePools)
if err != nil {
return err
}
}
var master NodePool
for _, np := range nodePools {
if np.Master {
master = np
if len(np.AvailabilityZones) <= 0 || np.AvailabilityZones[0] == "" {
return errors.Errorf("missing availability zone for nodepool %q", np.Name)
}
break
}
}
var keyOut UploadSSHKeyPairActivityOutput
// Upload SSH key pair
{
activityInput := UploadSSHKeyPairActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, UploadSSHKeyPairActivityName, activityInput).Get(ctx, &keyOut)
if err != nil {
return err
}
}
// Create dex client for the cluster
if input.OIDCEnabled {
activityInput := CreateDexClientActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, CreateDexClientActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
var externalAddress string
multiMaster := master.MaxCount > 1
masterNodeSubnetID := strings.Split(vpcOutput["SubnetIds"], ",")[0]
if len(master.Subnets) > 0 {
masterNodeSubnetID = master.Sub |
kubeVersion, err := semver.NewVersion(kubernetesVersion)
if err != nil {
return "", errors.WithDetails(err, "could not create semver from Kubernetes version", "kubernetesVersion", kubernetesVersion)
}
_ = kubeVersion
if pkeImageNameGetter != nil {
ami, err := pkeImageNameGetter.PKEImageName("amazon", "pke", "ubuntu", kubeVersion.String(), pkeVersion, region)
if err != nil {
// fail silently
}
if ami != "" {
return ami, nil
}
}
// PKE 0.4.19; K8s 1.13.10; OS Ubuntu
return map[string]string{
"ap-east-1": "ami-0ca8206236662e9ea", // Asia Pacific (Hong Kong). | identifier_body | |
create_cluster.go | }
// PKE 0.4.19; K8s 1.13.10; OS Ubuntu
return map[string]string{
"ap-east-1": "ami-0ca8206236662e9ea", // Asia Pacific (Hong Kong).
"ap-northeast-1": "ami-029f1fff7d250aa95", // Asia Pacific (Tokyo).
"ap-northeast-2": "ami-0b2ea3e1fb7e0a0dc", // Asia Pacific (Seoul).
"ap-southeast-1": "ami-00d5d224c11f12854", // Asia Pacific (Singapore).
"ap-southeast-2": "ami-03ad7f293fb551d91", // Asia Pacific (Sydney).
"ap-south-1": "ami-03f5be5363911cfd7", // Asia Pacific (Mumbai).
"ca-central-1": "ami-0f45e7a3348941cd0", // Canada (Central).
"eu-central-1": "ami-01a9d881b5eef8c78", // EU (Frankfurt).
"eu-north-1": "ami-0152ce8be8bcc0c50", // EU (Stockholm).
"eu-west-1": "ami-0284019fcb7ca3121", // EU (Ireland).
"eu-west-2": "ami-0b4c70c59b14d97ba", // EU (London).
"eu-west-3": "ami-084d0cc1bce975f4b", // EU (Paris).
"me-south-1": "ami-0bab0d9b6142d8674", // Middle East (Bahrain).
"sa-east-1": "ami-025c97e09ba50cb05", // South America (Sao Paulo).
"us-east-1": "ami-0980e83ac34bbc3bb", // US East (N. Virginia).
"us-east-2": "ami-07d087f4be161fa72", // US East (Ohio).
"us-west-1": "ami-0c8ea5996aa18c15d", // US West (N. California).
"us-west-2": "ami-0d441df4104cb772b", // US West (Oregon).
}[region], nil
}
type TokenGenerator interface {
GenerateClusterToken(orgID, clusterID uint) (string, string, error)
}
type CreateClusterWorkflowInput struct {
OrganizationID uint
ClusterID uint
ClusterUID string
ClusterName string
SecretID string
Region string
PipelineExternalURL string
PipelineExternalURLInsecure bool
OIDCEnabled bool
VPCID string
SubnetID string
}
type CreateClusterWorkflow struct {
GlobalRegion string
}
func (w CreateClusterWorkflow) Execute(ctx workflow.Context, input CreateClusterWorkflowInput) error {
ao := workflow.ActivityOptions{
ScheduleToStartTimeout: 10 * time.Minute,
StartToCloseTimeout: 20 * time.Minute,
WaitForCancellation: true,
}
ctx = workflow.WithActivityOptions(ctx, ao)
// Generate CA certificates
{
activityInput := GenerateCertificatesActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, GenerateCertificatesActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
// Generic AWS activity input
awsActivityInput := AWSActivityInput{
OrganizationID: input.OrganizationID,
SecretID: input.SecretID,
Region: input.Region,
}
var rolesStackID string
// Create AWS roles
{
activityInput := CreateAWSRolesActivityInput{AWSActivityInput: awsActivityInput, ClusterID: input.ClusterID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, CreateAWSRolesActivityName, activityInput).Get(ctx, &rolesStackID)
if err != nil {
return err
}
}
var rolesOutput map[string]string
// Wait for roles
{
if rolesStackID == "" {
return errors.New("missing AWS role stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: rolesStackID}
activityInput.AWSActivityInput.Region = w.GlobalRegion
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &rolesOutput)
if err != nil {
return err
}
}
var vpcStackID string
// Create VPC
{
activityInput := CreateVPCActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
ClusterName: input.ClusterName,
VPCID: input.VPCID,
SubnetID: input.SubnetID,
}
err := workflow.ExecuteActivity(ctx, CreateVPCActivityName, activityInput).Get(ctx, &vpcStackID)
if err != nil {
return err
}
}
var vpcOutput map[string]string
// Wait for VPC
{
if vpcStackID == "" {
return errors.New("missing VPC stack ID")
}
activityInput := WaitCFCompletionActivityInput{AWSActivityInput: awsActivityInput, StackID: vpcStackID}
err := workflow.ExecuteActivity(ctx, WaitCFCompletionActivityName, activityInput).Get(ctx, &vpcOutput)
if err != nil {
return err
}
}
// Get default security group of the VPC
var vpcDefaultSecurityGroupID string
activityInput := GetVpcDefaultSecurityGroupActivityInput{
AWSActivityInput: awsActivityInput,
ClusterID: input.ClusterID,
VpcID: vpcOutput["VpcId"],
}
err := workflow.ExecuteActivity(ctx, GetVpcDefaultSecurityGroupActivityName, activityInput).Get(ctx, &vpcDefaultSecurityGroupID)
if err != nil {
return err
}
if vpcDefaultSecurityGroupID == "" {
return errors.Errorf("couldn't get the default security group of the VPC %q", vpcOutput["VpcId"])
}
var nodePools []NodePool
// List node pools
{
activityInput := ListNodePoolsActivityInput{ClusterID: input.ClusterID}
err := workflow.ExecuteActivity(ctx, ListNodePoolsActivityName, activityInput).Get(ctx, &nodePools)
if err != nil {
return err
}
}
var master NodePool
for _, np := range nodePools {
if np.Master {
master = np
if len(np.AvailabilityZones) <= 0 || np.AvailabilityZones[0] == "" {
return errors.Errorf("missing availability zone for nodepool %q", np.Name)
}
break
}
}
var keyOut UploadSSHKeyPairActivityOutput
// Upload SSH key pair
{
activityInput := UploadSSHKeyPairActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, UploadSSHKeyPairActivityName, activityInput).Get(ctx, &keyOut)
if err != nil {
return err
}
}
// Create dex client for the cluster
if input.OIDCEnabled {
activityInput := CreateDexClientActivityInput{
ClusterID: input.ClusterID,
}
err := workflow.ExecuteActivity(ctx, CreateDexClientActivityName, activityInput).Get(ctx, nil)
if err != nil {
return err
}
}
var externalAddress string
multiMaster := master.MaxCount > 1
masterNodeSubnetID := strings.Split(vpcOutput["SubnetIds"], ",")[0]
if len(master.Subnets) > 0 {
masterNodeSubnetID = master.Subnets[0]
}
masterInput := CreateMasterActivityInput{
ClusterID: input.ClusterID,
VPCID: vpcOutput["VpcId"],
VPCDefaultSecurityGroupID: vpcDefaultSecurityGroupID,
SubnetID: masterNodeSubnetID,
MultiMaster: multiMaster,
MasterInstanceProfile: rolesOutput["MasterInstanceProfile"],
ExternalBaseUrl: input.PipelineExternalURL,
ExternalBaseUrlInsecure: input.PipelineExternalURLInsecure,
|
return ami, nil
}
| conditional_block | |
pyod_tests.py | 212827119302409
"""
input_shape = (input_dim, input_dim, 1)
inputs = Input(shape=input_shape)
x = Conv2D(h, (7, 7), input_shape=input_shape, padding='valid', activation="elu")(inputs)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = residual(h, x)
x = residual(h, x)
x = residual(h, x)
x = Conv2D(h, (1, 1), activation='linear')(x)
x = Flatten()(x)
embeddings = Dense(n_embeddings, name='embeddings')(x)
model = Model(inputs=inputs, outputs=embeddings)
print(model.summary())
return model
def get_train_test_lists(dataset_path, classes=('glare_small', 'normal'), test_size=0.25):
"""
Function for gathering lists of image paths on the disk and splitting them to train and test data
classes - list of classes that should be gathered in dataset_path. They are names of folders, and then names of folders are considered as labels
Labels are then encoded for using pyod module, as this: anomaly class == 1, common class == 0
"""
image_set = []
label_set = []
for cls in classes:
dir = os.path.join(dataset_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
def generate_embeddings_gen(dataset_path, classes):
"""
Function for creating train and test generators with image embeddigns
"""
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
# create data generators
batch_size = 16
train_batch_generator = image_batch_generator(X_train, model, batch_size=batch_size)
test_batch_generator = image_batch_generator(X_test, model, batch_size=batch_size)
return train_batch_generator, test_batch_generator
def generate_embeddings_memory(dataset_path, classes):
"""
function for creating embeddings in-memory
"""
# get embeddings not in generators
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
X_train_em = []
X_test_em = []
for im in X_train:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_train_em.append(embeds)
for im in X_test:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_test_em.append(embeds)
return np.array(X_train_em), np.array(X_test_em), np.array(y_train), np.array(y_test)
def test_gens():
"""
test of generate_embeddings_gen fucntions
"""
dataset_path = "/home/kateryna/Documents"
train_gen, test_gen = generate_embeddings_gen(dataset_path)
img, feature, labels = next(train_gen)
print(len(img), len(feature), labels)
def read_images(img_path, labels_list, test_size=0.25):
'''
Function for reading images in-memory
:param img_path: img path to folders with images
:return: array of images and labels
'''
image_set = []
label_set = []
images = []
for cls in labels_list:
dir = os.path.join(img_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
from sklearn.metrics import roc_auc_score, confusion_matrix, precision_score, recall_score
from pyod.models.iforest import IForest
from pyod.models.copod import COPOD
from pyod.models.cblof import CBLOF
from pyod.models.vae import VAE
def test_autoencoder():
"""
function for testing VAE autoencoder module
"""
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = read_images(dataset_path, labels_list=['normal', 'glare_small'], test_size=0.25)
X_train_im = []
for im in X_train:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_train_im.append(img)
X_train_im = np.array(X_train_im)
X_test_im = []
for im in X_test:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_test_im.append(img)
X_test_im = np.array(X_test_im)
autoenc = VAE(encoder_neurons=[16, 32], decoder_neurons=[32, 16], latent_dim=32, epochs=50)
autoenc.fit(X_train_im, y_train)
y_pred = autoenc.predict(X_test_im)
y_test_scores = autoenc.decision_function(X_test_im)
conf_mtx_test = confusion_matrix(y_test, y_pred, labels=[0, 1])
evaluate_print('vae', y_test, y_test_scores)
print(conf_mtx_test)
def classic_model_testing():
"""
function for classic models' testing, that take embeddigns as inputs
"""
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])
contam = 0.08
models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),
COPOD(contamination=contam)]
for model in models:
model_name = model.__str__().split('(')[0]
clf = model
clf.fit(X_train, y_train)
y_train_pred = clf.labels_
y_train_scores = clf.decision_scores_
# get the prediction on the test data
# 0 stands for inliers and 1 for outliers.
y_test_pred = clf.predict(X_test)
y_test_scores = clf.decision_function(X_test)
# y_probabilities = clf.predict_proba(X_test)
print("\nOn Training Data:")
evaluate_print(model_name, y_train, y_train_scores)
print("\nOn Test Data:")
evaluate_print(model_name, y_test, y_test_scores)
print('roc auc', roc_auc_score(y_test, y_test_scores))
conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])
print(conf_mtx_test)
conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])
print(conf_mtx_train)
print('~~~')
from sklearn.model_selection import ParameterGrid
def param_grid_search():
# for model = XGBOD()
param_dict = {'learning_rate': [0.01, 0.1, 0.001], 'n_estimators': [50, 100, 150], 'max_depth': [3, 5, 10]}
grid = ParameterGrid(param_dict)
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])
max_precision = 0
max_recall = 0
max_precision_id = None
max_recall_id = None
for i, params in enumerate(grid):
clf = XGBOD(**params)
clf.fit(X_train, y_train)
# get the prediction on the test data
# 0 stands for inliers and 1 for outliers.
y_test_pred = clf.predict(X_test)
# y_test_scores = clf.decision_function(X_test)
# precision and recall regrading positive class
precision = precision_score(y_test, y_test_pred, pos_label=0)
recall = recall_score(y_test, y_test_pred, pos_label=0)
if max_precision < precision:
max_precision = precision
max_precision_id = i
if max_recall < recall:
| max_recall = recall
max_recall_id = i | conditional_block | |
pyod_tests.py | import random
import os
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, MaxPooling2D, Input, Cropping2D, Add
from keras.layers import Flatten, Dense
from keras.models import Model
import glob
import cv2
import itertools
from utils import silent
from pyod.models.xgbod import XGBOD
from pyod.models.ocsvm import OCSVM
from pyod.models.cof import COF
from pyod.utils.data import evaluate_print
INPUT_DIM = 64
def preprocess_image(img):
"""
Helper function for image preprocessing with such steps:
- reading from disk
- resizing
- normalization
- adding dimension for tensor-like shape
"""
img = cv2.imread(img, 0)
img = cv2.resize(img, (INPUT_DIM, INPUT_DIM))
img = np.array(img, np.float32)
img = img / 255.
img = np.expand_dims(img, 0)
img = np.expand_dims(img, 3)
return img
def image_generator(img_list):
"""
Image generator function from image list. Yields image and label. Label is taken from containing folder name
"""
while True:
img = random.choice(img_list)
label = os.path.basename(os.path.dirname(img)) # add label function according to the dataset tree
img = preprocess_image(img)
yield img, label
def image_batch_generator(img_paths, model, batch_size, features=True):
"""
Generates batches of images from image_generator. Yields batches of images, image embeddings and class labels
"""
while True:
ig = image_generator(img_paths)
batch_img, batch_features, batch_labels = [], [], []
for img, lab in ig:
# Add the image and mask to the batch
if features:
img = np.expand_dims(img, 0)
img_embedding = model.predict(img)
batch_features.append(img_embedding)
batch_img.append(img)
batch_labels.append(lab)
# If we've reached our batchsize, yield the batch and reset
if len(batch_img) == batch_size:
yield batch_img, batch_features, batch_labels
batch_img, batch_features, batch_labels = [], [], []
# If we have an nonempty batch left, yield it out and reset
if len(batch_img) != 0:
yield np.stack(batch_img, axis=1), np.array(batch_features), batch_labels
batch_img, batch_features, batch_labels = [], [], []
def residual(n_filters, input):
"""
residual block for convolutional encoder
"""
shape = input.shape
_, h, w, d = shape
l1 = Conv2D(n_filters, (5, 5), padding='valid', activation='elu')(input)
l2 = Conv2D(n_filters, (1, 1), padding='valid', activation='linear')(l1)
l3 = Cropping2D(cropping=2)(input)
added = Add()([l2, l3])
return added
def embeddings(input_dim, h=16, n_embeddings=64):
"""
Convolutional residual model for embeddings creations. Idea of this model is taken from this paper:
https://www.sciencedirect.com/science/article/pii/S2212827119302409
"""
input_shape = (input_dim, input_dim, 1)
inputs = Input(shape=input_shape)
x = Conv2D(h, (7, 7), input_shape=input_shape, padding='valid', activation="elu")(inputs)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = residual(h, x)
x = residual(h, x)
x = residual(h, x)
x = Conv2D(h, (1, 1), activation='linear')(x)
x = Flatten()(x)
embeddings = Dense(n_embeddings, name='embeddings')(x)
model = Model(inputs=inputs, outputs=embeddings)
print(model.summary())
return model
def get_train_test_lists(dataset_path, classes=('glare_small', 'normal'), test_size=0.25):
"""
Function for gathering lists of image paths on the disk and splitting them to train and test data
classes - list of classes that should be gathered in dataset_path. They are names of folders, and then names of folders are considered as labels
Labels are then encoded for using pyod module, as this: anomaly class == 1, common class == 0
"""
image_set = []
label_set = []
for cls in classes:
dir = os.path.join(dataset_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
def generate_embeddings_gen(dataset_path, classes):
"""
Function for creating train and test generators with image embeddigns
"""
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
# create data generators
batch_size = 16
train_batch_generator = image_batch_generator(X_train, model, batch_size=batch_size)
test_batch_generator = image_batch_generator(X_test, model, batch_size=batch_size)
return train_batch_generator, test_batch_generator
def generate_embeddings_memory(dataset_path, classes):
"""
function for creating embeddings in-memory
"""
# get embeddings not in generators
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
X_train_em = []
X_test_em = []
for im in X_train:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_train_em.append(embeds)
for im in X_test:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_test_em.append(embeds)
return np.array(X_train_em), np.array(X_test_em), np.array(y_train), np.array(y_test)
def test_gens():
"""
test of generate_embeddings_gen fucntions
"""
dataset_path = "/home/kateryna/Documents"
train_gen, test_gen = generate_embeddings_gen(dataset_path)
img, feature, labels = next(train_gen)
print(len(img), len(feature), labels)
def read_images(img_path, labels_list, test_size=0.25):
'''
Function for reading images in-memory
:param img_path: img path to folders with images
:return: array of images and labels
'''
image_set = []
label_set = []
images = []
for cls in labels_list:
dir = os.path.join(img_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
from sklearn.metrics import roc_auc_score, confusion_matrix, precision_score, recall_score
from pyod.models.iforest import IForest
from pyod.models.copod import COPOD
from pyod.models.cblof import CBLOF
from pyod.models.vae import VAE
def test_autoencoder():
"""
function for testing VAE autoencoder module
"""
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = read_images(dataset_path, labels_list=['normal', 'glare_small'], test_size=0.25)
X_train_im = []
for im in X_train:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_train_im.append(img)
X_train_im = np.array(X_train_im)
X_test_im = []
for im in X_test:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_test_im.append(img)
X_test_im = np.array(X_test_im)
autoenc = VAE(encoder_neurons=[16, 32], decoder_neurons=[32, 16], latent_dim=32, epochs=50)
autoenc.fit(X_train_im, y_train)
y_pred = autoenc.predict(X_test_im)
y_test_scores = auto |
import numpy as np | random_line_split | |
pyod_tests.py | return img
def image_generator(img_list):
"""
Image generator function from image list. Yields image and label. Label is taken from containing folder name
"""
while True:
img = random.choice(img_list)
label = os.path.basename(os.path.dirname(img)) # add label function according to the dataset tree
img = preprocess_image(img)
yield img, label
def image_batch_generator(img_paths, model, batch_size, features=True):
"""
Generates batches of images from image_generator. Yields batches of images, image embeddings and class labels
"""
while True:
ig = image_generator(img_paths)
batch_img, batch_features, batch_labels = [], [], []
for img, lab in ig:
# Add the image and mask to the batch
if features:
img = np.expand_dims(img, 0)
img_embedding = model.predict(img)
batch_features.append(img_embedding)
batch_img.append(img)
batch_labels.append(lab)
# If we've reached our batchsize, yield the batch and reset
if len(batch_img) == batch_size:
yield batch_img, batch_features, batch_labels
batch_img, batch_features, batch_labels = [], [], []
# If we have an nonempty batch left, yield it out and reset
if len(batch_img) != 0:
yield np.stack(batch_img, axis=1), np.array(batch_features), batch_labels
batch_img, batch_features, batch_labels = [], [], []
def residual(n_filters, input):
"""
residual block for convolutional encoder
"""
shape = input.shape
_, h, w, d = shape
l1 = Conv2D(n_filters, (5, 5), padding='valid', activation='elu')(input)
l2 = Conv2D(n_filters, (1, 1), padding='valid', activation='linear')(l1)
l3 = Cropping2D(cropping=2)(input)
added = Add()([l2, l3])
return added
def embeddings(input_dim, h=16, n_embeddings=64):
"""
Convolutional residual model for embeddings creations. Idea of this model is taken from this paper:
https://www.sciencedirect.com/science/article/pii/S2212827119302409
"""
input_shape = (input_dim, input_dim, 1)
inputs = Input(shape=input_shape)
x = Conv2D(h, (7, 7), input_shape=input_shape, padding='valid', activation="elu")(inputs)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = residual(h, x)
x = residual(h, x)
x = residual(h, x)
x = Conv2D(h, (1, 1), activation='linear')(x)
x = Flatten()(x)
embeddings = Dense(n_embeddings, name='embeddings')(x)
model = Model(inputs=inputs, outputs=embeddings)
print(model.summary())
return model
def get_train_test_lists(dataset_path, classes=('glare_small', 'normal'), test_size=0.25):
"""
Function for gathering lists of image paths on the disk and splitting them to train and test data
classes - list of classes that should be gathered in dataset_path. They are names of folders, and then names of folders are considered as labels
Labels are then encoded for using pyod module, as this: anomaly class == 1, common class == 0
"""
image_set = []
label_set = []
for cls in classes:
dir = os.path.join(dataset_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
def | (dataset_path, classes):
"""
Function for creating train and test generators with image embeddigns
"""
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
# create data generators
batch_size = 16
train_batch_generator = image_batch_generator(X_train, model, batch_size=batch_size)
test_batch_generator = image_batch_generator(X_test, model, batch_size=batch_size)
return train_batch_generator, test_batch_generator
def generate_embeddings_memory(dataset_path, classes):
"""
function for creating embeddings in-memory
"""
# get embeddings not in generators
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
X_train_em = []
X_test_em = []
for im in X_train:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_train_em.append(embeds)
for im in X_test:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_test_em.append(embeds)
return np.array(X_train_em), np.array(X_test_em), np.array(y_train), np.array(y_test)
def test_gens():
"""
test of generate_embeddings_gen fucntions
"""
dataset_path = "/home/kateryna/Documents"
train_gen, test_gen = generate_embeddings_gen(dataset_path)
img, feature, labels = next(train_gen)
print(len(img), len(feature), labels)
def read_images(img_path, labels_list, test_size=0.25):
'''
Function for reading images in-memory
:param img_path: img path to folders with images
:return: array of images and labels
'''
image_set = []
label_set = []
images = []
for cls in labels_list:
dir = os.path.join(img_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
from sklearn.metrics import roc_auc_score, confusion_matrix, precision_score, recall_score
from pyod.models.iforest import IForest
from pyod.models.copod import COPOD
from pyod.models.cblof import CBLOF
from pyod.models.vae import VAE
def test_autoencoder():
"""
function for testing VAE autoencoder module
"""
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = read_images(dataset_path, labels_list=['normal', 'glare_small'], test_size=0.25)
X_train_im = []
for im in X_train:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_train_im.append(img)
X_train_im = np.array(X_train_im)
X_test_im = []
for im in X_test:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_test_im.append(img)
X_test_im = np.array(X_test_im)
autoenc = VAE(encoder_neurons=[16, 32], decoder_neurons=[32, 16], latent_dim=32, epochs=50)
autoenc.fit(X_train_im, y_train)
y_pred = autoenc.predict(X_test_im)
y_test_scores = autoenc.decision_function(X_test_im)
conf_mtx_test = confusion_matrix(y_test, y_pred, labels=[0, 1])
evaluate_print('vae', y_test, y_test_scores)
print(conf_mtx_test)
def classic_model_testing():
"""
function for classic models' testing, that take embeddigns as inputs
"""
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])
contam = 0.08
models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),
COPOD(contamination=contam)]
for model in models:
model_name = model.__str__().split('(')[0]
clf = model
clf.fit(X_train, y_train)
y_train_pred = clf.labels_
y_train_scores = clf | generate_embeddings_gen | identifier_name |
pyod_tests.py | )
return img
def image_generator(img_list):
"""
Image generator function from image list. Yields image and label. Label is taken from containing folder name
"""
while True:
img = random.choice(img_list)
label = os.path.basename(os.path.dirname(img)) # add label function according to the dataset tree
img = preprocess_image(img)
yield img, label
def image_batch_generator(img_paths, model, batch_size, features=True):
"""
Generates batches of images from image_generator. Yields batches of images, image embeddings and class labels
"""
while True:
ig = image_generator(img_paths)
batch_img, batch_features, batch_labels = [], [], []
for img, lab in ig:
# Add the image and mask to the batch
if features:
img = np.expand_dims(img, 0)
img_embedding = model.predict(img)
batch_features.append(img_embedding)
batch_img.append(img)
batch_labels.append(lab)
# If we've reached our batchsize, yield the batch and reset
if len(batch_img) == batch_size:
yield batch_img, batch_features, batch_labels
batch_img, batch_features, batch_labels = [], [], []
# If we have an nonempty batch left, yield it out and reset
if len(batch_img) != 0:
yield np.stack(batch_img, axis=1), np.array(batch_features), batch_labels
batch_img, batch_features, batch_labels = [], [], []
def residual(n_filters, input):
|
def embeddings(input_dim, h=16, n_embeddings=64):
"""
Convolutional residual model for embeddings creations. Idea of this model is taken from this paper:
https://www.sciencedirect.com/science/article/pii/S2212827119302409
"""
input_shape = (input_dim, input_dim, 1)
inputs = Input(shape=input_shape)
x = Conv2D(h, (7, 7), input_shape=input_shape, padding='valid', activation="elu")(inputs)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = residual(h, x)
x = residual(h, x)
x = residual(h, x)
x = Conv2D(h, (1, 1), activation='linear')(x)
x = Flatten()(x)
embeddings = Dense(n_embeddings, name='embeddings')(x)
model = Model(inputs=inputs, outputs=embeddings)
print(model.summary())
return model
def get_train_test_lists(dataset_path, classes=('glare_small', 'normal'), test_size=0.25):
"""
Function for gathering lists of image paths on the disk and splitting them to train and test data
classes - list of classes that should be gathered in dataset_path. They are names of folders, and then names of folders are considered as labels
Labels are then encoded for using pyod module, as this: anomaly class == 1, common class == 0
"""
image_set = []
label_set = []
for cls in classes:
dir = os.path.join(dataset_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
def generate_embeddings_gen(dataset_path, classes):
"""
Function for creating train and test generators with image embeddigns
"""
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
# create data generators
batch_size = 16
train_batch_generator = image_batch_generator(X_train, model, batch_size=batch_size)
test_batch_generator = image_batch_generator(X_test, model, batch_size=batch_size)
return train_batch_generator, test_batch_generator
def generate_embeddings_memory(dataset_path, classes):
"""
function for creating embeddings in-memory
"""
# get embeddings not in generators
model = embeddings(INPUT_DIM)
X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)
X_train_em = []
X_test_em = []
for im in X_train:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_train_em.append(embeds)
for im in X_test:
img = preprocess_image(im)
embeds = model.predict(img)
embeds = np.squeeze(embeds)
X_test_em.append(embeds)
return np.array(X_train_em), np.array(X_test_em), np.array(y_train), np.array(y_test)
def test_gens():
"""
test of generate_embeddings_gen fucntions
"""
dataset_path = "/home/kateryna/Documents"
train_gen, test_gen = generate_embeddings_gen(dataset_path)
img, feature, labels = next(train_gen)
print(len(img), len(feature), labels)
def read_images(img_path, labels_list, test_size=0.25):
'''
Function for reading images in-memory
:param img_path: img path to folders with images
:return: array of images and labels
'''
image_set = []
label_set = []
images = []
for cls in labels_list:
dir = os.path.join(img_path, cls)
img_list = glob.glob(dir + '/*.png')
img_list.extend(glob.glob(dir + '/*.jpg'))
label = None
if cls == 'glare_small' or cls == 'glare':
label = 1
if cls == 'normal':
label = 0
labels = list(itertools.repeat(label, len(img_list)))
image_set.extend(img_list)
label_set.extend(labels)
X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
from sklearn.metrics import roc_auc_score, confusion_matrix, precision_score, recall_score
from pyod.models.iforest import IForest
from pyod.models.copod import COPOD
from pyod.models.cblof import CBLOF
from pyod.models.vae import VAE
def test_autoencoder():
"""
function for testing VAE autoencoder module
"""
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = read_images(dataset_path, labels_list=['normal', 'glare_small'], test_size=0.25)
X_train_im = []
for im in X_train:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_train_im.append(img)
X_train_im = np.array(X_train_im)
X_test_im = []
for im in X_test:
img = preprocess_image(im)
img = np.array(img)
img = img.flatten()
X_test_im.append(img)
X_test_im = np.array(X_test_im)
autoenc = VAE(encoder_neurons=[16, 32], decoder_neurons=[32, 16], latent_dim=32, epochs=50)
autoenc.fit(X_train_im, y_train)
y_pred = autoenc.predict(X_test_im)
y_test_scores = autoenc.decision_function(X_test_im)
conf_mtx_test = confusion_matrix(y_test, y_pred, labels=[0, 1])
evaluate_print('vae', y_test, y_test_scores)
print(conf_mtx_test)
def classic_model_testing():
"""
function for classic models' testing, that take embeddigns as inputs
"""
dataset_path = "/home/kateryna/Documents"
X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])
contam = 0.08
models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),
COPOD(contamination=contam)]
for model in models:
model_name = model.__str__().split('(')[0]
clf = model
clf.fit(X_train, y_train)
y_train_pred = clf.labels_
y_train_scores = clf | """
residual block for convolutional encoder
"""
shape = input.shape
_, h, w, d = shape
l1 = Conv2D(n_filters, (5, 5), padding='valid', activation='elu')(input)
l2 = Conv2D(n_filters, (1, 1), padding='valid', activation='linear')(l1)
l3 = Cropping2D(cropping=2)(input)
added = Add()([l2, l3])
return added | identifier_body |
network.js | IP addresses
are acceptable - hostnames will cause an error.
**/
// If the transport hasn't been initialized yet, wait a second
console.log('[DHT Server] Bootstrapping with', addrs.length, 'peers, finding neighbors...');
let ds = [];
let that = this;
for (let addrIndex in addrs) {
if (addrs[addrIndex] != [this.node.ip, this.node.port]) {
let addr = addrs[addrIndex];
let u = new Utils();
let nullDigest = u.digest('null');
ds.push(
this.protocol.rpcProxy(
'ping',
new Node.DHTNode(nullDigest, addr[0], addr[1], NATTYPE['FULL_CONE'])
)
)
}
}
function initTable(results) {
console.log('iniTable');
let response = false;
let potentialRelayNodes = [];
for (var index in results) {
let addr = results[index][0];
let result = results[index][1];
if (result[0]) {
response = true;
let h, validPow;
let n = new objects.Node();
let sNode = objects.Node.deserializeBinary(result[1][0]);
let sNodeAddress = sNode.getNodeaddress();
// Create protobuf from uint8array
// create new DHTNode object
// add to router
let sNodeRelayAddress = sNode.getRelayaddress();
let dhtNodeRelayAddress = null;
if (sNodeRelayAddress !== undefined) {
dhtNodeRelayAddress = [sNodeRelayAddress.getIp(), sNodeRelayAddress.getPort()];
}
let dhtNode = new Node.DHTNode(
sNode.getGuid_asU8(),
sNodeAddress.getIp(),
sNodeAddress.getPort(),
sNode.getPublickey(),
dhtNodeRelayAddress,
parseInt(sNode.getNattype()),
sNode.getVendor()
);
try {
h = sodium.crypto_hash_sha512(sNode.getPublickey());
let hStr = h.toString('hex');
let powHash = hStr.substring(40, hStr.length - 1);
validPow = GUID._testpow(powHash.substring(0, 6));
if (!validPow) {
throw('Invalid GUID');
}
} catch (err) {
console.log('Initializing DHT table error:', err);
}
that.protocol.router.addContact(dhtNode);
if (dhtNode.natType == NATTYPE['FULL_CONE']) {
potentialRelayNodes.push([addr.ip, addr.port]);
}
}
}
}
return Promise.all(ds).then((results) => {
initTable(results);
console.log('All done...');
return 'done';
}).catch((err) => {
console.log(err);
});
}
| (guid) {
/**
Given a guid return a `Node` object containing its ip and port or none if it's
not found.
Args:
guid: the 20 raw bytes representing the guid.
**/
console.log('[Kademlia Server] Crawling DHT to find IP for', guid);
let p = new Promise((resolve, reject) => {
let nodeToFind = new Node.DHTNode(guid);
let dhtConnections = this.protocol.multiplexer.getAllConnections()
// Check in existing connections
for (var connIndex in dhtConnections) {
let connection = dhtConnections[connIndex];
if (connection.handler.node !== undefined && connection.handler.node.id == nodeToFind.id) {
console.log(guid, 'successfully resolved as', connection.handler.node);
resolve(connection.handler.node);
return;
}
}
// Check local DHT
let index = this.protocol.router.getBucketFor(nodeToFind);
let nodes = this.protocol.router.buckets[index].getNodes();
for (var nodeIndex in nodes) {
let node = nodes[nodeIndex];
if (node.id == nodeToFind.id) {
console.log('[Kademlia Server] Successfully resolved', guid, 'as', node);
resolve(node);
return;
}
}
// Get neighbors to ask
let nearest = this.protocol.router.findNeighbors(nodeToFind);
if (nearest.length == 0) {
console.log('[Kademlia Server] There are no known neighbors to find node', nodeToFind);
resolve([]);
return;
}
function checkForNode(node) {
if (node === undefined) {
resolve('Error');
return;
}
if(node !== []) {
if (Buffer(node.id).toString('hex') == nodeToFind.id) {
console.log('Successfully found', guid);
resolve(node);
return;
}
}
console.log('Failed to find', guid, 'in the DHT');
resolve('Fail');
return;
}
let that = this;
let spider = new Crawling.NodeSpiderCrawl(this.protocol, nodeToFind, nearest, this.ksize, this.alpha, true);
let results;
spider.find().then((results) => {
checkForNode(results);
}, (err) => {
console.log('Spider error', err);
});
});
return p;
}
// def check_for_node(nodes):
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug('%s successfully resolved as %s' % (guid.encode('hex'), node))
// return node
// this.log.debug('%s was not found in the dht' % guid.encode('hex'))
// return None
//
// index = this.protocol.router.getBucketFor(node_to_find)
// nodes = this.protocol.router.buckets[index].getNodes()
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug('%s successfully resolved as %s' % (guid.encode('hex'), node))
// return defer.succeed(node)
//
// nearest = this.protocol.router.findNeighbors(node_to_find)
// if len(nearest) == 0:
// this.log.warning('there are no known neighbors to find node %s' % node_to_find.id.encode('hex'))
// return defer.succeed(None)
//
// spider = NodeSpiderCrawl(this.protocol, node_to_find, nearest, this.ksize, this.alpha, True)
// return spider.find().addCallback(check_for_node)
};
// def _anyRespondSuccess(responses):
// """
// Given the result of a DeferredList of calls to peers, ensure that at least
// one of them was contacted and responded with a Truthy result.
// """
// for deferSuccess, result in responses:
// peerReached, peerResponse = result
// if deferSuccess and peerReached and peerResponse:
// return True
// return False
//
//
// class Server(object):
//
//
// def listen(self, port):
// """
// Start listening on the given port.
//
// This is the same as calling::
//
// reactor.listenUDP(port, server.protocol)
// """
// return reactor.listenUDP(port, this.protocol)
//
// def refreshTable(self):
// """
// Refresh buckets that haven't had any lookups in the last hour
// (per section 2.3 of the paper).
// """
// ds = []
// refresh_ids = this.protocol.getRefreshIDs()
// refresh_ids.append(digest(random.getrandbits(255))) # random node so we get more diversity
// for rid in refresh_ids:
// node = Node(rid)
// nearest = this.protocol.router.findNeighbors(node, this.alpha)
// spider = NodeSpiderCrawl(this.protocol, node, nearest, this.ksize, this.alpha)
// ds.append(spider.find())
//
// def republishKeys(_):
// this.log.debug("Republishing key/values...")
// neighbors = this.protocol.router.findNeighbors(this.node, exclude=this.node)
// for node in neighbors:
// this.protocol.transferKeyValues(node)
//
// return defer.gatherResults(ds).addCallback(republishKeys)
//
// def querySeed(self, list_seed_pubkey):
// """
// Query an HTTP seed and return a `list` if (ip, port) `tuple` pairs.
//
// Args:
// Receives a list of one or more tuples Example [(seed, pubkey)]
// seed: A `string` consisting of "ip:port" or "hostname:port"
// pubkey: The hex encoded public key to verify the signature on the response
// """
//
// nodes = []
// if not list_seed_pubkey:
// this.log.error('failed to query seed {0} from ob.cfg'.format(list_seed_pubkey))
// return nodes
// else:
// for sp in list_seed_pubkey:
// seed, pubkey = sp
// try:
// this.log.info("querying %s for peers" % seed)
// c = httplib.HTTPConnection(seed)
// c.request("GET", "/")
// response = c.getresponse()
// this.log.debug("Http response from %s: %s, %s" % (seed, response.status, response.reason))
// data = response.read()
// reread_data = data.decode("zlib | resolveGUID | identifier_name |
network.js | let address = seed.split(':');
http.get({
host: address[0],
port: address[1],
path: '?format=json'
}, (res) => {
const statusCode = res.statusCode;
const contentType = res.headers['content-type'];
const encoding = res.headers['content-encoding'];
let error;
if (statusCode !== 200) {
error = new Error(`Request Failed.\n` +
`Status Code: ${statusCode}`);
}
if (error) {
console.log(error.message);
// consume response data to free up memory
res.resume();
return;
}
let rawData = '';
res.on('data', (chunk) => rawData += chunk);
res.on('end', () => {
var rawJSON = JSON.parse(rawData);
var peers = rawJSON.peers;
var signature = rawJSON.signature;
//peers = [{ip:'71.178.207.137', port:33333}];
//peers = peers.slice(0,2);
for (var p in peers) {
nodes.push([peers[p].ip, peers[p].port]);
}
// let verifyKey = sodium.crypto_sign_open(Buffer.from(peers.toString()), Buffer.from(pubkey, 'hex'));
// console.log(verifyKey);
console.log(seed, 'returned', nodes.length, 'addresses');
callback(nodes);
});
});
} catch (err) {
console.log(err);
}
// except Exception, e:
// this.log.error("failed to query seed: %s" % str(e))
return nodes;
}
}
}
bootstrap(addrs, promise) {
/**
Bootstrap the server by connecting to other known nodes in the network.
Args:
addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP addresses
are acceptable - hostnames will cause an error.
**/
// If the transport hasn't been initialized yet, wait a second
console.log('[DHT Server] Bootstrapping with', addrs.length, 'peers, finding neighbors...');
let ds = [];
let that = this;
for (let addrIndex in addrs) {
if (addrs[addrIndex] != [this.node.ip, this.node.port]) {
let addr = addrs[addrIndex];
let u = new Utils();
let nullDigest = u.digest('null');
ds.push(
this.protocol.rpcProxy(
'ping',
new Node.DHTNode(nullDigest, addr[0], addr[1], NATTYPE['FULL_CONE'])
)
)
}
}
function initTable(results) {
console.log('iniTable');
let response = false;
let potentialRelayNodes = [];
for (var index in results) {
let addr = results[index][0];
let result = results[index][1];
if (result[0]) {
response = true;
let h, validPow;
let n = new objects.Node();
let sNode = objects.Node.deserializeBinary(result[1][0]);
let sNodeAddress = sNode.getNodeaddress();
// Create protobuf from uint8array
// create new DHTNode object
// add to router
let sNodeRelayAddress = sNode.getRelayaddress();
let dhtNodeRelayAddress = null;
if (sNodeRelayAddress !== undefined) {
dhtNodeRelayAddress = [sNodeRelayAddress.getIp(), sNodeRelayAddress.getPort()];
}
let dhtNode = new Node.DHTNode(
sNode.getGuid_asU8(),
sNodeAddress.getIp(),
sNodeAddress.getPort(),
sNode.getPublickey(),
dhtNodeRelayAddress,
parseInt(sNode.getNattype()),
sNode.getVendor()
);
try {
h = sodium.crypto_hash_sha512(sNode.getPublickey());
let hStr = h.toString('hex');
let powHash = hStr.substring(40, hStr.length - 1);
validPow = GUID._testpow(powHash.substring(0, 6));
if (!validPow) {
throw('Invalid GUID');
}
} catch (err) {
console.log('Initializing DHT table error:', err);
}
that.protocol.router.addContact(dhtNode);
if (dhtNode.natType == NATTYPE['FULL_CONE']) {
potentialRelayNodes.push([addr.ip, addr.port]);
}
}
}
}
return Promise.all(ds).then((results) => {
initTable(results);
console.log('All done...');
return 'done';
}).catch((err) => {
console.log(err);
});
}
resolveGUID(guid) {
/**
Given a guid return a `Node` object containing its ip and port or none if it's
not found.
Args:
guid: the 20 raw bytes representing the guid.
**/
console.log('[Kademlia Server] Crawling DHT to find IP for', guid);
let p = new Promise((resolve, reject) => {
let nodeToFind = new Node.DHTNode(guid);
let dhtConnections = this.protocol.multiplexer.getAllConnections()
// Check in existing connections
for (var connIndex in dhtConnections) {
let connection = dhtConnections[connIndex];
if (connection.handler.node !== undefined && connection.handler.node.id == nodeToFind.id) {
console.log(guid, 'successfully resolved as', connection.handler.node);
resolve(connection.handler.node);
return;
}
}
// Check local DHT
let index = this.protocol.router.getBucketFor(nodeToFind);
let nodes = this.protocol.router.buckets[index].getNodes();
for (var nodeIndex in nodes) {
let node = nodes[nodeIndex];
if (node.id == nodeToFind.id) {
console.log('[Kademlia Server] Successfully resolved', guid, 'as', node);
resolve(node);
return;
}
}
// Get neighbors to ask
let nearest = this.protocol.router.findNeighbors(nodeToFind);
if (nearest.length == 0) {
console.log('[Kademlia Server] There are no known neighbors to find node', nodeToFind);
resolve([]);
return;
}
function checkForNode(node) {
if (node === undefined) {
resolve('Error');
return;
}
if(node !== []) {
if (Buffer(node.id).toString('hex') == nodeToFind.id) {
console.log('Successfully found', guid);
resolve(node);
return;
}
}
console.log('Failed to find', guid, 'in the DHT');
resolve('Fail');
return;
}
let that = this;
let spider = new Crawling.NodeSpiderCrawl(this.protocol, nodeToFind, nearest, this.ksize, this.alpha, true);
let results;
spider.find().then((results) => {
checkForNode(results);
}, (err) => {
console.log('Spider error', err);
});
});
return p;
}
// def check_for_node(nodes):
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug('%s successfully resolved as %s' % (guid.encode('hex'), node))
// return node
// this.log.debug('%s was not found in the dht' % guid.encode('hex'))
// return None
//
// index = this.protocol.router.getBucketFor(node_to_find)
// nodes = this.protocol.router.buckets[index].getNodes()
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug('%s successfully resolved as %s' % (guid.encode('hex'), node))
// return defer.succeed(node)
//
// nearest = this.protocol.router.findNeighbors(node_to_find)
// if len(nearest) == 0:
// this.log.warning('there are no known neighbors to find node %s' % node_to_find.id.encode('hex'))
// return defer.succeed(None)
//
// spider = NodeSpiderCrawl(this.protocol, node_to_find, nearest, this.ksize, this.alpha, True)
// return spider.find().addCallback(check_for_node)
};
// def _anyRespondSuccess(responses):
// """
// Given the result of a DeferredList of calls to peers, ensure that at least
| {
/**
Query an HTTP seed and return a `list` if (ip, port) `tuple` pairs.
Args:
Receives a list of one or more tuples Example [(seed, pubkey)]
seed: A `string` consisting of "ip:port" or "hostname:port"
pubkey: The hex encoded public key to verify the signature on the response
**/
let nodes = [];
if (listSeedPubkey === undefined) {
console.log('Failed to query seed ', listSeedPubkey, ' from configuration file (ob.cfg).');
return nodes;
} else {
for (let sp in listSeedPubkey) {
let seed = listSeedPubkey[sp][0];
let pubkey = listSeedPubkey[sp][1];
try {
console.log('Querying http://' + seed, 'for peers');
| identifier_body | |
network.js | .sha512(n.publicKey)
// hash_pow = h[40:]
// if int(hash_pow[:6], 16) >= 50 or hexlify(n.guid) != h[:40]:
// raise Exception('Invalid GUID')
// node = Node(n.guid, addr[0], addr[1], n.publicKey,
// None if not n.HasField("relayAddress") else
// (n.relayAddress.ip, n.relayAddress.port),
// n.natType,
// n.vendor)
// this.protocol.router.addContact(node)
// if n.natType == objects.FULL_CONE:
// potential_relay_nodes.append((addr[0], addr[1]))
// except Exception:
// this.log.warning("bootstrap node returned invalid GUID")
// if not response:
// if this.protocol.multiplexer.testnet:
// this.bootstrap(this.querySeed(SEEDS_TESTNET), d)
// else:
// this.bootstrap(this.querySeed(SEEDS), d)
// return
// if len(potential_relay_nodes) > 0 and this.node.nat_type != objects.FULL_CONE:
// shuffle(potential_relay_nodes)
// this.node.relay_node = potential_relay_nodes[0]
//
// d.callback(True)
// ds = {}
// for addr in addrs:
// if addr != (this.node.ip, this.node.port):
// ds[addr] = this.protocol.ping(Node(digest("null"), addr[0], addr[1], nat_type=objects.FULL_CONE))
// deferredDict(ds).addCallback(initTable)
// return d
//
// def inetVisibleIP(self):
// """
// Get the internet visible IP's of this node as other nodes see it.
//
// Returns:
// A `list` of IP's. If no one can be contacted, then the `list` will be empty.
// """
//
// def handle(results):
// ips = []
// for result in results:
// if result[0]:
// ips.append((result[1][0], int(result[1][1])))
// this.log.debug("other nodes think our ip is %s" % str(ips))
// return ips
//
// ds = []
// for neighbor in this.bootstrappableNeighbors():
// ds.append(this.protocol.stun(neighbor))
// return defer.gatherResults(ds).addCallback(handle)
//
// def get(self, keyword, save_at_nearest=True):
// """
// Get a key if the network has it.
//
// Args:
// keyword = the keyword to save to
// save_at_nearest = save value at the nearest without value
//
// Returns:
// :class:`None` if not found, the value otherwise.
// """
// dkey = digest(keyword)
// node = Node(dkey)
// nearest = this.protocol.router.findNeighbors(node)
// if len(nearest) == 0:
// this.log.warning("there are no known neighbors to get key %s" % dkey.encode('hex'))
// return defer.succeed(None)
// spider = ValueSpiderCrawl(this.protocol, node, nearest, this.ksize, this.alpha, save_at_nearest)
// return spider.find()
//
// def set(self, keyword, key, value, ttl=604800):
// """
// Set the given key/value tuple at the hash of the given keyword.
// All values stored in the DHT are stored as dictionaries of key/value
// pairs. If a value already exists for a given keyword, the new key/value
// pair will be appended to the dictionary.
//
// Args:
// keyword: The keyword to use. Should be hashed with hash160 before
// passing it in here.
// key: the 20 byte hash of the data.
// value: a serialized `protos.objects.Node` object which serves as a
// pointer to the node storing the data.
//
// Return: True if at least one peer responded. False if the store rpc
// completely failed.
// """
// if len(keyword) != 20:
// return defer.succeed(False)
//
// this.log.debug("setting '%s' on network" % keyword.encode("hex"))
//
// def store(nodes):
// this.log.debug("setting '%s' on %s" % (keyword.encode("hex"), [str(i) for i in nodes]))
// ds = [this.protocol.callStore(node, keyword, key, value, ttl) for node in nodes]
//
// keynode = Node(keyword)
// if this.node.distanceTo(keynode) < max([n.distanceTo(keynode) for n in nodes]):
// this.storage[keyword] = (key, value, ttl)
// this.log.debug("got a store request from %s, storing value" % str(this.node))
//
// return defer.DeferredList(ds).addCallback(_anyRespondSuccess)
//
// node = Node(keyword)
// nearest = this.protocol.router.findNeighbors(node)
// if len(nearest) == 0:
// this.log.warning("there are no known neighbors to set keyword %s" % keyword.encode("hex"))
// return defer.succeed(False)
// spider = NodeSpiderCrawl(this.protocol, node, nearest, this.ksize, this.alpha)
// return spider.find().addCallback(store)
//
// def delete(self, keyword, key, signature):
// """
// Delete the given key/value pair from the keyword dictionary on the network.
// To delete you must provide a signature covering the key that you wish to
// delete. It will be verified against the public key stored in the value. We
// use our ksize as alpha to make sure we reach as many nodes storing our value
// as possible.
//
// Args:
// keyword: the `string` keyword where the data being deleted is stored.
// key: the 20 byte hash of the data.
// signature: a signature covering the key.
//
// """
// this.log.debug("deleting '%s':'%s' from the network" % (keyword.encode("hex"), key.encode("hex")))
// dkey = digest(keyword)
//
// def delete(nodes):
// this.log.debug("deleting '%s' on %s" % (key.encode("hex"), [str(i) for i in nodes]))
// ds = [this.protocol.callDelete(node, dkey, key, signature) for node in nodes]
//
// if this.storage.getSpecific(dkey, key) is not None:
// this.storage.delete(dkey, key)
//
// return defer.DeferredList(ds).addCallback(_anyRespondSuccess)
//
// node = Node(dkey)
// nearest = this.protocol.router.findNeighbors(node)
// if len(nearest) == 0:
// this.log.warning("there are no known neighbors to delete key %s" % key.encode("hex"))
// return defer.succeed(False)
// spider = NodeSpiderCrawl(this.protocol, node, nearest, this.ksize, this.ksize)
// return spider.find().addCallback(delete)
//
// def resolve(self, guid):
// """
// Given a guid return a `Node` object containing its ip and port or none if it's
// not found.
//
// Args:
// guid: the 20 raw bytes representing the guid.
// """
// this.log.debug("crawling dht to find IP for %s" % guid.encode("hex"))
//
// node_to_find = Node(guid)
// for connection in this.protocol.multiplexer.values():
// if connection.handler.node is not None and connection.handler.node.id == node_to_find.id:
// this.log.debug("%s successfully resolved as %s" % (guid.encode("hex"), connection.handler.node))
// return defer.succeed(connection.handler.node)
//
// def check_for_node(nodes):
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug("%s successfully resolved as %s" % (guid.encode("hex"), node))
// return node
// this.log.debug("%s was not found in the dht" % guid.encode("hex"))
// return None
//
// index = this.protocol.router.getBucketFor(node_to_find)
// nodes = this.protocol.router.buckets[index].getNodes()
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug("%s successfully resolved as %s" % (guid.encode("hex"), node))
// return defer.succeed(node)
//
// nearest = this.protocol.router.findNeighbors(node_to_find)
// if len(nearest) == 0:
// this.log.warning("there are no known neighbors to find node %s" % node_to_find.id.encode("hex"))
// return defer.succeed(None)
//
// spider = NodeSpiderCrawl(this.protocol, node_to_find, nearest, this.ksize, this.alpha, True)
// return spider.find().addCallback(check_for_node)
//
// def saveState(self, fname):
// """
// Save the state of this node (the alpha/ksize/id/immediate neighbors)
// to a cache file with the given fname.
// """
// data = {'ksize': this.ksize, | // 'alpha': this.alpha, | random_line_split | |
network.js | else {
for (let sp in listSeedPubkey) {
let seed = listSeedPubkey[sp][0];
let pubkey = listSeedPubkey[sp][1];
try {
console.log('Querying http://' + seed, 'for peers');
let address = seed.split(':');
http.get({
host: address[0],
port: address[1],
path: '?format=json'
}, (res) => {
const statusCode = res.statusCode;
const contentType = res.headers['content-type'];
const encoding = res.headers['content-encoding'];
let error;
if (statusCode !== 200) {
error = new Error(`Request Failed.\n` +
`Status Code: ${statusCode}`);
}
if (error) {
console.log(error.message);
// consume response data to free up memory
res.resume();
return;
}
let rawData = '';
res.on('data', (chunk) => rawData += chunk);
res.on('end', () => {
var rawJSON = JSON.parse(rawData);
var peers = rawJSON.peers;
var signature = rawJSON.signature;
//peers = [{ip:'71.178.207.137', port:33333}];
//peers = peers.slice(0,2);
for (var p in peers) {
nodes.push([peers[p].ip, peers[p].port]);
}
// let verifyKey = sodium.crypto_sign_open(Buffer.from(peers.toString()), Buffer.from(pubkey, 'hex'));
// console.log(verifyKey);
console.log(seed, 'returned', nodes.length, 'addresses');
callback(nodes);
});
});
} catch (err) {
console.log(err);
}
// except Exception, e:
// this.log.error("failed to query seed: %s" % str(e))
return nodes;
}
}
}
bootstrap(addrs, promise) {
/**
Bootstrap the server by connecting to other known nodes in the network.
Args:
addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP addresses
are acceptable - hostnames will cause an error.
**/
// If the transport hasn't been initialized yet, wait a second
console.log('[DHT Server] Bootstrapping with', addrs.length, 'peers, finding neighbors...');
let ds = [];
let that = this;
for (let addrIndex in addrs) {
if (addrs[addrIndex] != [this.node.ip, this.node.port]) {
let addr = addrs[addrIndex];
let u = new Utils();
let nullDigest = u.digest('null');
ds.push(
this.protocol.rpcProxy(
'ping',
new Node.DHTNode(nullDigest, addr[0], addr[1], NATTYPE['FULL_CONE'])
)
)
}
}
function initTable(results) {
console.log('iniTable');
let response = false;
let potentialRelayNodes = [];
for (var index in results) {
let addr = results[index][0];
let result = results[index][1];
if (result[0]) {
response = true;
let h, validPow;
let n = new objects.Node();
let sNode = objects.Node.deserializeBinary(result[1][0]);
let sNodeAddress = sNode.getNodeaddress();
// Create protobuf from uint8array
// create new DHTNode object
// add to router
let sNodeRelayAddress = sNode.getRelayaddress();
let dhtNodeRelayAddress = null;
if (sNodeRelayAddress !== undefined) {
dhtNodeRelayAddress = [sNodeRelayAddress.getIp(), sNodeRelayAddress.getPort()];
}
let dhtNode = new Node.DHTNode(
sNode.getGuid_asU8(),
sNodeAddress.getIp(),
sNodeAddress.getPort(),
sNode.getPublickey(),
dhtNodeRelayAddress,
parseInt(sNode.getNattype()),
sNode.getVendor()
);
try {
h = sodium.crypto_hash_sha512(sNode.getPublickey());
let hStr = h.toString('hex');
let powHash = hStr.substring(40, hStr.length - 1);
validPow = GUID._testpow(powHash.substring(0, 6));
if (!validPow) {
throw('Invalid GUID');
}
} catch (err) {
console.log('Initializing DHT table error:', err);
}
that.protocol.router.addContact(dhtNode);
if (dhtNode.natType == NATTYPE['FULL_CONE']) {
potentialRelayNodes.push([addr.ip, addr.port]);
}
}
}
}
return Promise.all(ds).then((results) => {
initTable(results);
console.log('All done...');
return 'done';
}).catch((err) => {
console.log(err);
});
}
resolveGUID(guid) {
/**
Given a guid return a `Node` object containing its ip and port or none if it's
not found.
Args:
guid: the 20 raw bytes representing the guid.
**/
console.log('[Kademlia Server] Crawling DHT to find IP for', guid);
let p = new Promise((resolve, reject) => {
let nodeToFind = new Node.DHTNode(guid);
let dhtConnections = this.protocol.multiplexer.getAllConnections()
// Check in existing connections
for (var connIndex in dhtConnections) {
let connection = dhtConnections[connIndex];
if (connection.handler.node !== undefined && connection.handler.node.id == nodeToFind.id) {
console.log(guid, 'successfully resolved as', connection.handler.node);
resolve(connection.handler.node);
return;
}
}
// Check local DHT
let index = this.protocol.router.getBucketFor(nodeToFind);
let nodes = this.protocol.router.buckets[index].getNodes();
for (var nodeIndex in nodes) {
let node = nodes[nodeIndex];
if (node.id == nodeToFind.id) {
console.log('[Kademlia Server] Successfully resolved', guid, 'as', node);
resolve(node);
return;
}
}
// Get neighbors to ask
let nearest = this.protocol.router.findNeighbors(nodeToFind);
if (nearest.length == 0) {
console.log('[Kademlia Server] There are no known neighbors to find node', nodeToFind);
resolve([]);
return;
}
function checkForNode(node) {
if (node === undefined) {
resolve('Error');
return;
}
if(node !== []) {
if (Buffer(node.id).toString('hex') == nodeToFind.id) {
console.log('Successfully found', guid);
resolve(node);
return;
}
}
console.log('Failed to find', guid, 'in the DHT');
resolve('Fail');
return;
}
let that = this;
let spider = new Crawling.NodeSpiderCrawl(this.protocol, nodeToFind, nearest, this.ksize, this.alpha, true);
let results;
spider.find().then((results) => {
checkForNode(results);
}, (err) => {
console.log('Spider error', err);
});
});
return p;
}
// def check_for_node(nodes):
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug('%s successfully resolved as %s' % (guid.encode('hex'), node))
// return node
// this.log.debug('%s was not found in the dht' % guid.encode('hex'))
// return None
//
// index = this.protocol.router.getBucketFor(node_to_find)
// nodes = this.protocol.router.buckets[index].getNodes()
// for node in nodes:
// if node.id == node_to_find.id:
// this.log.debug('%s successfully resolved as %s' % (guid.encode('hex'), node))
// return defer.succeed(node)
//
// nearest = this.protocol.router.findNeighbors(node_to_find)
// if len(nearest) == 0:
// this.log.warning('there are no known neighbors to find node %s' % node_to_find.id.encode('hex'))
// return defer.succeed(None)
//
// spider = NodeSpiderCrawl(this.protocol, node_to_find, nearest, this.ksize, this.alpha, True)
// return spider.find().addCallback(check_for_node)
};
// def _anyRespondSuccess(responses):
// """
// Given the result of a DeferredList of calls to peers, ensure that at least
// one of them was contacted and responded with a Truthy result.
// """
// for deferSuccess, result in responses:
// peerReached, peerResponse = result
// if deferSuccess and peerReached and peerResponse:
// return True
// return False
//
//
// class Server(object):
//
//
// def listen(self, port):
// """
// Start listening on the given port.
//
// This is the same as | {
console.log('Failed to query seed ', listSeedPubkey, ' from configuration file (ob.cfg).');
return nodes;
} | conditional_block | |
attack.py | = int(2**np.ceil(np.log2(frame_length)))
win = np.sqrt(8.0 / 3.) * librosa.core.stft(
y=audio,
n_fft=n_fft,
hop_length=frame_step,
win_length=frame_length,
center=False,
pad_mode='constant')
z = abs(win / window_size)
psd = 10 * np.log10(z * z + 0.0000000000000000001)
psd_max = np.max(psd)
PSD = 96 - psd_max + psd
return PSD, psd_max
def log10(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def tfPSD(x, window_size=256, step_per_window=2, psdMax=None):
# this it PSD_3 from the thesis implemented with tensorflow
# x is batched if you only have one example expand the dimension accordingly
batch_size = x.get_shape()[0]
scale = tf.sqrt(8. / 3.)
frame_length = int(window_size)
frame_step = int(window_size//step_per_window)
fft_length = int(2**np.ceil(np.log2(frame_length)))
win = tf.scalar_mul(scale, tf.abs(tf.contrib.signal.stft(
signals=x,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length
)))
z = win / frame_length
#psd_max = tf.reduce_max(z * z, axis=[1,2])
psd = tf.reshape(tf.scalar_mul(10, log10((z * z) + 0.0000000000000000001)), [batch_size, -1, window_size//2+1])
PSD = 96 - tf.reshape(psdMax, [batch_size, 1, 1]) + psd
return PSD
def lev(i,j, a, b, mem):
if((i,j) in mem):
return mem[(i,j)]
if(min(i,j)==0):
res = max(i,j)
else:
res = min(
lev(i-1,j,a,b,mem) + 1,
lev(i,j-1,a,b,mem) + 1,
lev(i-1,j-1,a,b,mem) + (a[i]!=b[j])
)
mem[(i,j)] = res
return res
def wer(a,b):
mem = {}
return lev(len(a)-1, len(b)-1, a, b, mem)/len(a)
def main(args, thisId):
print(thisId)
print(args)
with tf.Session() as sess:
audios = []
lengths = []
psyThs = []
psdMaxes = []
f = open(args.input, 'r')
temp = f.readlines()
temp = [row[:-1] for row in temp]
temp = [row.split(",") for row in temp]
inputFiles = temp[0]
dstText = temp[1]
# store ouput in same folder as input
outputFiles = [fileName[0] + "_" + thisId + "_out." + fileName[1] for fileName in [fileName.split('.') for fileName in inputFiles]]
f.close()
assert len(inputFiles) == len(dstText)
assert len(dstText) == len(outputFiles)
window_size = int(args.windowsize)
step_per_window = 2
print("window_size, step_per_window", window_size, step_per_window)
# Load the inputs that we're given
for i in range(len(inputFiles)):
fs, audio = wav.read(inputFiles[i])
if args.mp3:
audio = convert_mp3(audio, len(audio))[0]
assert fs == 16000
assert audio.dtype == np.int16
print('source dB', 20*np.log10(np.max(np.abs(audio))))
audios.append(list(audio))
lengths.append(len(audio))
maxlen = max(map(len,audios))
audios = np.array([x+[0]*(maxlen-len(x)) for x in audios])
for audio in audios:
# compute psdMaxes for the PSD_3 algorithm and psychoacoustic threshold
PSD, psdMax = numpyPSD(audio.astype(float), window_size, step_per_window)
psdMaxes.append(psdMax)
frequency = librosa.core.fft_frequencies(fs, int(2**np.ceil(np.log2(window_size))))
resFreq = frequency[-1] / (frequency.shape[0]-1)
resTime = window_size/step_per_window/(fs/1000)
psyTh = convolution.totalMask(PSD, resFreq, resTime, frequency[0], frequency[-1])
psyTh = psyTh.transpose()
psyThs.append(psyTh)
phrase = [[toks.index(c) for c in ph] for ph in dstText]
maxPhraseLen = np.array([len(p) for p in phrase]).max()
deltas = args.delta
if(deltas != None):
deltas = deltas.split(',')
deltas = [list(wav.read(delta)[1]) for delta in deltas]
deltas = np.array([delta+[0]*(maxlen-len(delta)) for delta in deltas])
# Set up the attack class and run it
batch_size=len(audios)
attack = Attack(sess, maxPhraseLen, maxlen,
batch_size=batch_size,
mp3=args.mp3,
learning_rate=args.lr,
window_size=window_size,
step_per_window=step_per_window,
delta=deltas,
audio=audios,
psdMaxes=np.array(psdMaxes),
psdShape=psyThs[0].shape,
num_iterations=args.iterations
)
#you can set the regularizer argument individually for each file
if(',' not in args.regularizer):
regularizer = np.array([args.regularizer]*batch_size).reshape((batch_size))
else:
regularizer = np.array(list(map(lambda x: int(x), args.regularizer.split(','))))
deltas = attack.attack(audios,
psyThs,
lengths,
np.array(phrase),
regularizer=regularizer)
# And now save it to the desired output
if args.mp3:
for i in range(len(outputFiles)):
path = outputFiles[i]
path = path[:path.rfind('.')]+'.mp3'
print(path)
filename = path[path.rfind('/')+1:]
convert_mp3(deltas[i], lengths[i], filename)
copyfile("/tmp/" + filename, path)
print("Final distortion", np.max(np.abs(deltas[0][:lengths[0]]-audios[0][:lengths[0]])))
else:
for i in range(len(outputFiles)):
path = outputFiles[i]
print(path)
wav.write(path, 16000,
np.array(np.clip(np.round(deltas[i][:lengths[i]]),
-2**15, 2**15-1),dtype=np.int16))
print("Final distortion", np.max(np.abs(deltas[i][:lengths[i]]-audios[i][:lengths[i]])))
if __name__ == '__main__':
"""
Do the attack here.
This is all just boilerplate; nothing interesting
happens in this method.
We aonly support using the CTC-Loss.
"""
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--in', type=str, dest="input",
required=True,
help="Input file that defines, input file, output file, sourceText, destText")
parser.add_argument('--regularizer', type=str,
required=False, default=0,
help="Initial regularizer")
parser.add_argument('--lr', type=int,
required=False, default=100,
help="Learning rate for optimization")
parser.add_argument('--iterations', type=int,
required=False, default=2000,
help="Maximum number of iterations of gradient descent")
parser.add_argument('--mp3', action="store_const", const=True,
required=False,
help="Generate MP3 compression resistant adversarial examples")
parser.add_argument('--toLog', action="store_const", const=True,
required=False, default=False,
help="When setting this argument, everything that is printed will be stored in a logfile identified by a timestamp")
parser.add_argument('--delta', type=str,
required=False, default=None,
help="Add an adversarial example here and it will start optimizing from there")
parser.add_argument('--windowsize', type=str,
required=False, default=None,
help="Just add something and then use it afterwards.")
args = parser.parse_args()
print(args)
toLog = args.toLog
thisId = str(int(round(time.time())))
if(toLog):
orig_stdout = sys.stdout
f = open('log_' + thisId + '.txt', 'w')
sys.stdout = f
main(args, thisId)
sys.stdout = orig_stdout
f.close()
else: | main(args, thisId) | conditional_block | |
attack.py | _beam_search_decoder(logits, lengths, merge_repeated=False, beam_width=100)
def clipBatch(self, delta, psyTh, regularizer, psdMaxes, max_audio_len, window_size, step_per_window):
# This is the PsyClip. It can take a batch of deltas and clips five times to get below psyTh.
deltaShape = delta.shape
psdShape = psyTh.shape
batch_size = deltaShape[0]
frame_length = int(window_size)
frame_step = int(window_size//step_per_window)
fft_length = int(2**np.ceil(np.log2(frame_length)))
# pad the signal to remove stft artifacts at the start and end of the signal.
paddingRounding = max_audio_len%frame_step
paddingAudio = tf.constant(np.array([[0,0],[frame_length,frame_length]]), dtype=tf.int32)
paddingStft = tf.constant(np.array([[0,0], [step_per_window, step_per_window],[0,0]]), dtype=tf.int32)
inverse_window_fn = tf.contrib.signal.inverse_stft_window_fn(frame_step)
gauss2dFilter = tf.constant(gauss2d(101,[2,6]).reshape(101,101,1,1), dtype=tf.float32)
deltaPad = tf.reshape(tf.pad(delta, paddingAudio, "CONSTANT", constant_values=tf.constant(0., dtype=tf.float32)),[batch_size, -1])
psyThPad = tf.pad(psyTh, paddingStft, "CONSTANT", constant_values=tf.constant(0., dtype=tf.float32))
# compute PSD of delta
deltaPSD = tfPSD(deltaPad, window_size, step_per_window, psdMaxes)
# compute the PsyLoss-regularizer
diffMax = tf.reshape(tf.reduce_max(deltaPSD - (psyThPad + tf.reshape(regularizer, [batch_size, 1, 1])), axis=[1,2]), [batch_size, 1, 1])
# we use tf.while_loop
# and repead the loop 5 times
cond = lambda argDiffMax, argDeltaPad, count: count < 5
def body(argDiffMax,argDeltaPad,count):
# compute PSD of delta
deltaPSD = tfPSD(argDeltaPad, window_size, step_per_window, psdMaxes)
# relu of deltaPSD - psyTh + regularizer would give exaclty these places where deltaPSD is too high
# softmax is smoother and an upper bound to relu => when reducing by softmax(PsyLoss-regularizer) then we reduce at least by relu(PsyLoss-regularizer)
diff1 = tf.maximum(tf.nn.softplus(deltaPSD - (psyThPad + tf.reshape(self.regularizer-0.5, [-1,1,1]))),0.1)
# store the max of this value for renormalization after the convolution
argDiffMax = tf.reshape(tf.reduce_max(diff1, axis=[1,2]), [batch_size, 1, 1])
# convolve this loss with a gaussian kernel
diff4 = tf.reshape(
tf.nn.conv2d(tf.reshape(
diff1,
[batch_size, psdShape[1]+2*step_per_window,psdShape[2],1]
),
gauss2dFilter,
[1,1,1,1],
'SAME'),
[batch_size, psdShape[1]+2*step_per_window,psdShape[2]])
# renormalize such that max is as before the convolution
diff5 = diff4/tf.reshape(tf.reduce_max(diff4, axis=[1,2]), [batch_size, 1, 1])*argDiffMax
# compute the 'short time Fourfier transform' of delta.
deltaStft = tf.contrib.signal.stft(
argDeltaPad,
frame_length,
frame_step,
fft_length)
# reduce deltaStft such that hopefully PSD(stft^-1(deltaStftReduced)) < psyTh
argDeltaPad = tf.reshape(
tf.pad(
tf.contrib.signal.inverse_stft(
deltaStft/tf.cast(tf.pow(10.0,diff5/20.0), tf.complex64),
frame_length, frame_step, fft_length, window_fn=inverse_window_fn
),
tf.constant([[0,0],[0,paddingRounding]])
),
deltaPad.shape)
count += 1
return (argDiffMax, argDeltaPad, count)
(diffMax,deltaPad, count) = tf.while_loop(cond,body,(diffMax,deltaPad, 0))
reshaped_inv_stft = tf.reshape(deltaPad,[batch_size, (max_audio_len+2*frame_length)])
# slice of padding from the beginning
deltaRet = tf.reshape(tf.slice(reshaped_inv_stft,[0, frame_length],[batch_size, max_audio_len]), delta.shape)
return deltaRet
def clip(self, delta, psyTh, regularizer, psdMax, max_audio_len, window_size, step_per_window):
# This is the PsyClip. It can take a batch of deltas and clips five times to get below psyTh.
# please look at clipBatch for comments. This does the same but only for one sample and repeates the loop until PSD(delta)<psyTh+regularizer+0.5
deltaShape = delta.shape
psdShape = psyTh.shape
delta = tf.expand_dims(delta, 0)
psyTh = tf.expand_dims(psyTh, 0)
batch_size = 1
frame_length = int(window_size)
frame_step = int(window_size//step_per_window)
fft_length = int(2**np.ceil(np.log2(frame_length)))
sample_rate = 16000
freq_res = sample_rate/window_size
time_res = frame_step/(sample_rate/1000)
paddingRounding = max_audio_len%frame_step
paddingAudio = tf.constant(np.array([[0,0],[frame_length,frame_length]]), dtype=tf.int32)
paddingStft = tf.constant(np.array([[0,0], [step_per_window, step_per_window],[0,0]]), dtype=tf.int32)
inverse_window_fn = tf.contrib.signal.inverse_stft_window_fn(frame_step)
gauss2dFilter = tf.constant(gauss2d(101,[2,6]).reshape(101,101,1,1), dtype=tf.float32)
deltaPad = tf.reshape(tf.pad(delta, paddingAudio, "CONSTANT", constant_values=tf.constant(0., dtype=tf.float32)),[1, -1])
psyThPad = tf.pad(psyTh, paddingStft, "CONSTANT", constant_values=tf.constant(0., dtype=tf.float32))
deltaPSD = tfPSD(deltaPad, window_size, step_per_window, psdMax)
diffMax = tf.reduce_max(deltaPSD - (psyThPad + tf.reshape(regularizer, [1, 1, 1])))
cond = lambda argDiffMax,argDeltaPad, argDeltaPSD: argDiffMax>regularizer-0.5
def body(argDiffMax,argDeltaPad, argDeltaPSD):
argDeltaPSD = tf.expand_dims(argDeltaPSD, 0)
diff = tf.maximum(tf.nn.relu(argDeltaPSD - (psyThPad + regularizer-0.5)),0.1)
argDiffMax = tf.reduce_max(diff)
diff = tf.reshape(
tf.nn.conv2d(tf.reshape(diff, [1,psdShape[0]+2*step_per_window,psdShape[1],1]),gauss2dFilter,[1,1,1,1], 'SAME'),
[1, psdShape[0]+2*step_per_window,psdShape[1]])
diff = diff/tf.reduce_max(diff)*argDiffMax
deltaStft = tf.contrib.signal.stft(argDeltaPad, frame_length, frame_step, fft_length)
argDeltaPad = tf.reshape(tf.pad(
tf.contrib.signal.inverse_stft(deltaStft/tf.cast(tf.pow(10.0,diff/20.0), tf.complex64), frame_length, frame_step, fft_length, window_fn=inverse_window_fn),
tf.constant([[0,0],[0,paddingRounding]])), deltaPad.shape)
argDeltaPSD = tfPSD(argDeltaPad, window_size, step_per_window, psdMax)
argDiffMax = tf.reduce_max(argDeltaPSD - (psyThPad + regularizer))
argDeltaPad = tf.squeeze(argDeltaPad)
argDeltaPSD = tf.squeeze(argDeltaPSD)
return (argDiffMax, argDeltaPad, argDeltaPSD)
(diffMax,deltaPad,deltaPSD) = tf.while_loop(cond,body,(diffMax,tf.squeeze(deltaPad),tf.squeeze(deltaPSD)))
reshaped_inv_stft = tf.reshape(deltaPad,[batch_size, (max_audio_len+2*frame_length)])
delta = tf.reshape(tf.slice(reshaped_inv_stft,[0, frame_length],[batch_size, max_audio_len]), delta.shape)
return tf.squeeze(delta)
def | attack | identifier_name | |
attack.py |
# if we have (or if it's the final epoch) then we
# should record our progress and decrease the
# regularizer constant.
for ii in range(self.batch_size):
if(pl[ii] > regularizer[ii]):
# PsyLoss (pl) is too higher than regularizer => clip delta until below regularizer
print("%d : had a too high PsyLoss: %d we target %d at the moment"%(ii, pl[ii], regularizer[ii]))
print(pl[ii], regularizer[ii])
d[ii] = sess.run(self.clip(d[ii], self.psyTh[ii], regularizer[ii], self.psdMaxes[ii], self.max_audio_len, self.window_size, self.step_per_window))
sess.run(self.delta.assign(d))
else:
if(res[ii] == "".join([toks[x] for x in target[ii]])):
# Successful adversarial example
print("%d : correct"%(ii))
count = 0
if(pl[ii] < bestPSY[ii]):
# if new psyLoss is better than the one before, update best delta in final_deltas
bestCTC[ii] = loss[ii]
bestPSY[ii] = pl[ii]
final_deltas[ii] = new_input[ii]
name = "adv" + str(ii) + "reg" + str(regularizer.reshape(-1)[ii])
if self.mp3:
convert_mp3(new_input[ii], lengths[ii], name + '.mp3')
wav.write(
"/tmp/" + name + ".wav",
16000,
np.array(np.clip(np.round(new_input[ii][:lengths[ii]]),
-2**15, 2**15-1),dtype=np.int16))
regularizer[ii] = regularizer[ii]-1
sess.run(self.regularizer.assign(regularizer))
else:
print("%d : not correct with character error rate %f"%(ii, wer("".join([toks[x] for x in target[ii]]), res[ii])))
if(i == MAX-1 and final_deltas[ii] is None):
# if final_deltas[ii] is None means the algorithm did not find an adversarial example.
# consider increasing the regularizer with which you start
final_deltas[ii] = new_input[ii]
print("Did not work i=%d psylLoss=%f regularizer=%d, ctcLoss=%f"%(ii,pl[ii],regularizer[ii],loss[ii]))
wav.write("/tmp/adv.wav", 16000,
np.array(np.clip(np.round(new_input[ii]),
-2**15, 2**15-1),dtype=np.int16))
print('bestCTC', bestCTC)
print('bestPSY', bestPSY)
print('regularizer', regularizer.reshape(-1))
return final_deltas
def numpyPSD(audio, window_size=256, step_per_window=2):
# this it PSD_2 from the thesis implemented with numpy
frame_length = int(window_size)
frame_step = int(window_size//step_per_window)
n_fft = int(2**np.ceil(np.log2(frame_length)))
win = np.sqrt(8.0 / 3.) * librosa.core.stft(
y=audio,
n_fft=n_fft,
hop_length=frame_step,
win_length=frame_length,
center=False,
pad_mode='constant')
z = abs(win / window_size)
psd = 10 * np.log10(z * z + 0.0000000000000000001)
psd_max = np.max(psd)
PSD = 96 - psd_max + psd
return PSD, psd_max
def log10(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def tfPSD(x, window_size=256, step_per_window=2, psdMax=None):
# this it PSD_3 from the thesis implemented with tensorflow
# x is batched if you only have one example expand the dimension accordingly
batch_size = x.get_shape()[0]
scale = tf.sqrt(8. / 3.)
frame_length = int(window_size)
frame_step = int(window_size//step_per_window)
fft_length = int(2**np.ceil(np.log2(frame_length)))
win = tf.scalar_mul(scale, tf.abs(tf.contrib.signal.stft(
signals=x,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length
)))
z = win / frame_length
#psd_max = tf.reduce_max(z * z, axis=[1,2])
psd = tf.reshape(tf.scalar_mul(10, log10((z * z) + 0.0000000000000000001)), [batch_size, -1, window_size//2+1])
PSD = 96 - tf.reshape(psdMax, [batch_size, 1, 1]) + psd
return PSD
def lev(i,j, a, b, mem):
if((i,j) in mem):
return mem[(i,j)]
if(min(i,j)==0):
res = max(i,j)
else:
res = min(
lev(i-1,j,a,b,mem) + 1,
lev(i,j-1,a,b,mem) + 1,
lev(i-1,j-1,a,b,mem) + (a[i]!=b[j])
)
mem[(i,j)] = res
return res
def wer(a,b):
mem = {}
return lev(len(a)-1, len(b)-1, a, b, mem)/len(a)
def main(args, thisId):
print(thisId)
print(args)
with tf.Session() as sess:
audios = []
lengths = []
psyThs = []
psdMaxes = []
f = open(args.input, 'r')
temp = f.readlines()
temp = [row[:-1] for row in temp]
temp = [row.split(",") for row in temp]
inputFiles = temp[0]
dstText = temp[1]
# store ouput in same folder as input
outputFiles = [fileName[0] + "_" + thisId + "_out." + fileName[1] for fileName in [fileName.split('.') for fileName in inputFiles]]
f.close()
assert len(inputFiles) == len(dstText)
assert len(dstText) == len(outputFiles)
window_size = int(args.windowsize)
step_per_window = 2
print("window_size, step_per_window", window_size, step_per_window)
# Load the inputs that we're given
for i in range(len(inputFiles)):
fs, audio = wav.read(inputFiles[i])
if args.mp3:
audio = convert_mp3(audio, len(audio))[0]
assert fs == 16000
assert audio.dtype == np.int16
print('source dB', 20*np.log10(np.max(np.abs(audio))))
audios.append(list(audio))
lengths.append(len(audio))
maxlen = max(map(len,audios))
audios = np.array([x+[0]*(maxlen-len(x)) for x in audios])
for audio in audios:
# compute psdMaxes for the PSD_3 algorithm and psychoacoustic threshold
PSD, psdMax = numpyPSD(audio.astype(float), window_size, step_per_window)
psdMaxes.append(psdMax)
frequency = librosa.core.fft_frequencies(fs, int(2**np.ceil(np.log2(window_size))))
resFreq = frequency[-1] / (frequency.shape[0]-1)
resTime = window_size/step_per_window/(fs/1000)
psyTh = convolution.totalMask(PSD, resFreq, resTime, frequency[0], frequency[-1])
psyTh = psyTh.transpose()
psyThs.append(psyTh)
phrase = [[toks.index(c) for c in ph] for ph in dstText]
maxPhraseLen = np.array([len(p) for p in phrase]).max()
deltas = args.delta
if(deltas != None):
deltas = deltas.split(',')
deltas = [list(wav.read(delta)[1]) for delta in deltas]
deltas = np.array([delta+[0]*(maxlen-len(delta)) for delta in deltas])
# Set up the attack class and run it
batch_size=len(audios)
attack = Attack(sess, maxPhraseLen, maxlen,
batch_size=batch_size,
mp3=args.mp3,
learning_rate=args.lr,
window_size=window_size,
step_per_window=step_per_window,
delta=deltas,
audio=audios,
psdMaxes=np.array(psdMaxes),
psdShape=psyThs[0].shape,
num_iterations=args.iterations
)
#you can set the regularizer argument individually for each file
if(',' not in args.regularizer):
regularizer = np.array([args.regularizer]*batch_size).reshape((batch_size)) | random_line_split | ||
attack.py | == 0:
(d, d2,
plWAV, loss, r_logits,
new_input, r_out, regularizer) = sess.run((
self.delta, self.apply_delta,
self.psyLoss, self.loss, self.logits,
self.new_input, self.decoded, self.regularizer))
lst = [(r_out, r_logits, plWAV, loss, regularizer)]
if self.mp3:
mp3ed = []
for ii in range(len(new_input)):
mp3ed.append(convert_mp3(new_input[ii], max(lengths)))
mp3ed = np.concatenate(mp3ed, axis = 0)
mp3_out, mp3_logits, plMP3, loss = sess.run((
self.decoded, self.logits, self.psyLoss, self.loss),
{self.new_input: mp3ed})
lst = [(mp3_out, mp3_logits, plMP3, loss, regularizer)]
for out, logits, pl, loss, regularizer in lst:
chars = out[0].values
res = np.zeros(out[0].dense_shape)+len(toks)-1
for ii in range(len(out[0].values)):
x,y = out[0].indices[ii]
res[x,y] = out[0].values[ii]
# Here we print the strings that are recognized.
res = ["".join(toks[int(x)] for x in y).replace("-","") for y in res]
print('psyLoss ', pl)
print('bestPsy ', bestPSY)
print('loss ', loss)
print('regularizer', regularizer.reshape(-1))
# And here we print the argmax of the alignment.
res2 = np.argmax(logits,axis=2).T
res2 = ["".join(toks[int(x)] for x in y[:(l-1)//320]) for y,l in zip(res2,lengths)]
if self.mp3:
new = sess.run(self.new_input)
mp3ed = []
for ii in range(len(new_input)):
mp3ed.append(convert_mp3(new[ii], max(lengths))[0])
mp3ed = np.array(mp3ed)
feed_dict = {self.new_input: mp3ed}
else:
feed_dict = {}
# Actually do the optimization ste
(train) = sess.run((self.train),feed_dict)
# Report progress
print('i: ', i, time.time()-start)
logits = np.argmax(r_logits, axis=2).T
if(i%10==0):
# Every 10 iterations, check if we've succeeded
# if we have (or if it's the final epoch) then we
# should record our progress and decrease the
# regularizer constant.
for ii in range(self.batch_size):
if(pl[ii] > regularizer[ii]):
# PsyLoss (pl) is too higher than regularizer => clip delta until below regularizer
print("%d : had a too high PsyLoss: %d we target %d at the moment"%(ii, pl[ii], regularizer[ii]))
print(pl[ii], regularizer[ii])
d[ii] = sess.run(self.clip(d[ii], self.psyTh[ii], regularizer[ii], self.psdMaxes[ii], self.max_audio_len, self.window_size, self.step_per_window))
sess.run(self.delta.assign(d))
else:
if(res[ii] == "".join([toks[x] for x in target[ii]])):
# Successful adversarial example
print("%d : correct"%(ii))
count = 0
if(pl[ii] < bestPSY[ii]):
# if new psyLoss is better than the one before, update best delta in final_deltas
bestCTC[ii] = loss[ii]
bestPSY[ii] = pl[ii]
final_deltas[ii] = new_input[ii]
name = "adv" + str(ii) + "reg" + str(regularizer.reshape(-1)[ii])
if self.mp3:
convert_mp3(new_input[ii], lengths[ii], name + '.mp3')
wav.write(
"/tmp/" + name + ".wav",
16000,
np.array(np.clip(np.round(new_input[ii][:lengths[ii]]),
-2**15, 2**15-1),dtype=np.int16))
regularizer[ii] = regularizer[ii]-1
sess.run(self.regularizer.assign(regularizer))
else:
print("%d : not correct with character error rate %f"%(ii, wer("".join([toks[x] for x in target[ii]]), res[ii])))
if(i == MAX-1 and final_deltas[ii] is None):
# if final_deltas[ii] is None means the algorithm did not find an adversarial example.
# consider increasing the regularizer with which you start
final_deltas[ii] = new_input[ii]
print("Did not work i=%d psylLoss=%f regularizer=%d, ctcLoss=%f"%(ii,pl[ii],regularizer[ii],loss[ii]))
wav.write("/tmp/adv.wav", 16000,
np.array(np.clip(np.round(new_input[ii]),
-2**15, 2**15-1),dtype=np.int16))
print('bestCTC', bestCTC)
print('bestPSY', bestPSY)
print('regularizer', regularizer.reshape(-1))
return final_deltas
def numpyPSD(audio, window_size=256, step_per_window=2):
# this it PSD_2 from the thesis implemented with numpy
frame_length = int(window_size)
frame_step = int(window_size//step_per_window)
n_fft = int(2**np.ceil(np.log2(frame_length)))
win = np.sqrt(8.0 / 3.) * librosa.core.stft(
y=audio,
n_fft=n_fft,
hop_length=frame_step,
win_length=frame_length,
center=False,
pad_mode='constant')
z = abs(win / window_size)
psd = 10 * np.log10(z * z + 0.0000000000000000001)
psd_max = np.max(psd)
PSD = 96 - psd_max + psd
return PSD, psd_max
def log10(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def tfPSD(x, window_size=256, step_per_window=2, psdMax=None):
# this it PSD_3 from the thesis implemented with tensorflow
# x is batched if you only have one example expand the dimension accordingly
batch_size = x.get_shape()[0]
scale = tf.sqrt(8. / 3.)
frame_length = int(window_size)
frame_step = int(window_size//step_per_window)
fft_length = int(2**np.ceil(np.log2(frame_length)))
win = tf.scalar_mul(scale, tf.abs(tf.contrib.signal.stft(
signals=x,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length
)))
z = win / frame_length
#psd_max = tf.reduce_max(z * z, axis=[1,2])
psd = tf.reshape(tf.scalar_mul(10, log10((z * z) + 0.0000000000000000001)), [batch_size, -1, window_size//2+1])
PSD = 96 - tf.reshape(psdMax, [batch_size, 1, 1]) + psd
return PSD
def lev(i,j, a, b, mem):
if((i,j) in mem):
return mem[(i,j)]
if(min(i,j)==0):
res = max(i,j)
else:
res = min(
lev(i-1,j,a,b,mem) + 1,
lev(i,j-1,a,b,mem) + 1,
lev(i-1,j-1,a,b,mem) + (a[i]!=b[j])
)
mem[(i,j)] = res
return res
def wer(a,b):
mem = {}
return lev(len(a)-1, len(b)-1, a, b, mem)/len(a)
def main(args, thisId):
| print(thisId)
print(args)
with tf.Session() as sess:
audios = []
lengths = []
psyThs = []
psdMaxes = []
f = open(args.input, 'r')
temp = f.readlines()
temp = [row[:-1] for row in temp]
temp = [row.split(",") for row in temp]
inputFiles = temp[0]
dstText = temp[1]
# store ouput in same folder as input
outputFiles = [fileName[0] + "_" + thisId + "_out." + fileName[1] for fileName in [fileName.split('.') for fileName in inputFiles]]
f.close()
assert len(inputFiles) == len(dstText) | identifier_body | |
application.py | PODCASTNAME']+ ' ~ '+ date_locale
feed['streamUrl'] = url
else:
print "off-air" # no content found
feed['titleText'] = os.environ['PODCASTNAME']+' is off-air right now, check back again soon!'
feed['streamUrl'] = os.environ['FPATH']+os.environ['AUDIO']+"offair_"+str(randint(0, 4))+".mp3"
feed_json = json.dumps(feed)
print feed_json
return feed_json
# return list of episodes & offairs w/ html5 audio players (kind of like an admin dashboard, but unprotected right now)
@app.route('/episodes', methods=['GET'])
@basic_auth.required
def episodes():
data = geteps()
if data:
return render_template(
'episodes.html',
phone=os.environ['PHONE'],
email=os.environ['EMAIL'],
data=data,
name=os.environ['PODCASTNAME'],
path=os.environ['FPATH']+os.environ['AUDIO'])
else:
return render_template('error.html')
# return latest episode filename (with prefix)
@app.route('/latest', methods=['GET'])
@cross_origin()
def latest():
fn = getlatest()
date = fn[:-4]
m, d, y, dt = getdatefromfilename(fn)
nice_date = dt.strftime("%B %d, %Y")
latest = {"date": date, "nice_date": nice_date, "filename": os.environ['FPATH']+os.environ['AUDIO']+ fn}
feed_json = json.dumps(latest)
print feed_json
return feed_json
# return iTunes podcast feed xml (does not include -future- episodes)
@app.route('/podcast', methods=['GET'])
def podcast():
data = getepsiTunes()
date, date_locale, today, today_utc = getTime()
dt = today.timetuple()
dts = mktime(dt)
daterfc = utils.formatdate(dts)
if data:
for ep in data['episodes']:
xml = render_template(
'feed.xml',
date=daterfc,
data=data) # FEED NEEDS A LOT OF MANUAL WORK / CONTAINS NO ENV VARS, SO NEED TO EDIT ON YOUR OWN !!!
feed = make_response(xml)
feed.headers["Content-Type"] = "application/xml"
return feed
else:
return render_template('error.html')
# Pickup call & get date
@app.route('/begin_call', methods=['GET', 'POST'])
def begin_call():
print "start /begin_call"
from_number = request.values.get('From', None)
if from_number in callers:
session['caller'] = callers[from_number]
else:
session['caller'] = "unknown"
resp = VoiceResponse()
if session['caller'] != "unknown":
resp.say("Hey " + session['caller'] + "!")
gather = Gather(input='dtmf speech', timeout=5, num_digits=4, action='/set_date', method='GET')
gather.say("Let's record a new "+os.environ['PODCASTNAME']+"!\n First, when will this episode air?\n Say the air date or punch it in using a Month Month Day Day format.\n For example, you could say October 31st or punch in 10 31.")
resp.append(gather)
resp.say("You didn't give me a date. Bye!")
else:
resp.say("Hey, this isn't for you. \nBoy Bye!")
resp.hangup()
session.clear()
return str(resp)
# validate date & record audio
@app.route("/set_date", methods=["GET", "POST"])
def set_date():
print "start /set_date"
resp = VoiceResponse()
digits = request.values.get('Digits', None)
speech = request.values.get('SpeechResult', None)
print "dtmf digits: "+ str(digits)
#print "speech recognition: " + speech
#month=0
#digits=0
year=datetime.now().year
if speech:
cal = pdt.Calendar()
time, status = cal.parse(speech)
spoken_date = datetime(*time[:6])
print "spoken date: "+ spoken_date.strftime("%A, %B %-d, %Y")
month = spoken_date.month
day = spoken_date.day
year = spoken_date.year
else:
month = int(str(digits[:2]).lstrip("0").replace(" 0", " "))
day = int(str(digits[-2:]).lstrip("0").replace(" 0", " "))
if isvaliddate(month, day, year) is True:
session['airdate'] = datetime(year,month,day)
print session['airdate'].strftime("%A, %B %-d, %Y")
resp.say("Ok " + session['caller'] + ", this episode will air "+ session['airdate'].strftime("%A, %B %-d, %Y"))
resp.say("Next, record up to 3 minutes of audio following the beep.\n Press any key when you're done.")
resp.record(max_length="180", action="/play_schedule", RecordingChannels="dual", recording_channels="dual") # 3 min max
else:
resp.say("That's not a valid date, hang up and try again.")
resp.hangup()
session.clear()
return str(resp)
# replay audio & confirm scheduling
@app.route("/play_schedule", methods=['GET', 'POST'])
def play_schedule():
print "start /play_schedule"
session['mp3url'] = request.values.get("RecordingUrl", None)
resp = VoiceResponse()
resp.say("Here's what you recorded")
resp.play(session['mp3url'])
# SCHEDULE
print "Gather digits for scheduling"
resp.say("Ok, we're almost done.")
gather = Gather(input='dtmf', timeout=15, num_digits=1, action='/save_finish', method='GET')
gather.say('To schedule this episode, press 1. Otherwise, hang up.')
resp.append(gather)
resp.say("Uhm, ok, hanging up now.")
return str(resp)
# publish audio to s3 & end call
@app.route("/save_finish", methods=["GET", "POST"])
def save_finish():
print "start /save_finish"
resp = VoiceResponse()
digits = int(request.values.get('Digits', None))
if digits == 1:
resp.say("Alright, give me a hot second...")
# save file to s3 with correct date as filename and end call
if save_to_s3_url(session['mp3url']+".mp3", session['airdate'].strftime("%Y-%m-%d")+".mp3") is True:
resp.say("And boom, you're good to go! See you next time " + session['caller'] +" !")
else:
resp.say("Yikes "+ session['caller'] + " we ran into an error saving to s3. Can you try calling in again? Sorry!!")
else:
resp.say("No problem, just hangup and call back again.")
resp.hangup()
session.clear()
return str(resp)
# process incoming email via mailgun routes (SUPER HACKY!!!)
@app.route("/email", methods=["GET", "POST"])
def email():
sender = request.form['sender']
date = request.form['subject']
#print date
month = int(date[5:-3].lstrip("0").replace(" 0", " "))
day = int(date[-2:].lstrip("0").replace(" 0", " "))
year = int(date[:4])
print "From: "+ sender
print "subject: "+ date
if sender in emailers:
print "It's an email from "+ emailers[sender]
if isvaliddate(month, day, year) is True:
fndate = datetime(year,month,day)
print "airdate: "+ fndate.strftime("%A, %B %-d, %Y")
print "audio file: "+ request.files.values()[0].filename
data = request.files.values()[0].stream.read()
if save_to_s3_email(fndate, data) is True:
print request.files.values()[0].filename+" saved!"
emailback(sender, "Your episode airs "+ fndate.strftime("%A, %B %-d, %Y"), emailers[sender]+ ", we successfully scheduled your episode.\n\nDon't reply to this email.")
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
print "error saving "+attachment.filename
emailback(sender, "Error saving your episode to S3", "Try again? \n\nDon't reply to this email.")
return json.dumps({'file_saved':False}), 200, {'ContentType':'application/json'}
else:
print "incorrectly formatted date "+date
emailback(sender, "Error in your airdate", "Try again - and remember - your subject line should be 'YYYY-MM-DD', and that's it.\n\nDon't reply to this email.")
return json.dumps({'date_correct':False}), 200, {'ContentType':'application/json'}
else:
return json.dumps({'good_email':False}), 200, {'ContentType':'application/json'}
# record new video/clip using Ziggeo
@app.route("/record", methods=["GET", "POST"])
@basic_auth.required
def record():
| return render_template(
'record.html',
name=os.environ['PODCASTNAME'],
key=os.environ['ZIGKEY']) | identifier_body | |
application.py | =os.environ['S3KI'],
aws_secret_access_key=os.environ['S3SK'])
print "Connected to s3!!"
resp = s3.list_objects_v2(
Bucket=os.environ['BUCKET'],
Prefix=os.environ['AUDIO'])
tmp = []
for o in resp['Contents']:
|
print "latest episode is: "+ tmp[-1]
return tmp[-1]
except Exception as e:
print "Error talking to s3"
raise
return False
# save file to s3
def s3save(filename, fileobj, folder):
try:
s3 = boto3.client( 's3', aws_access_key_id=os.environ['S3KI'], aws_secret_access_key=os.environ['S3SK'])
print "Connected to s3!!"
print s3.put_object(Bucket=os.environ['BUCKET'], Key=folder+filename, Body=fileobj, ACL="public-read")
print "uploaded " + filename+ " to s3!"
return True
except Exception as e:
print "Error saving "+filename+ "to s3"
raise
return False
# backup audio shortcut method
def backupaudio(data):
tfn = str(uuid.uuid4())+".mp3"
print "backup filename: "+ tfn
if s3save(tfn, data, os.environ['ORIGINAL']):
print "Backed up original audio file as: "+ tfn
else:
print "FAILED to backup original audio file"
# download audio file from twilio and return file object
def getaudio(audiourl):
data = None
try:
# get file stream
if ".mp3" in audiourl:
r = requests.get(audiourl, stream=True)
file_r = r.raw
data = file_r.read()
print "Retreived audio stream!!"
return data
elif ".mp4" in audiourl:
r = requests.get(audiourl, stream=True)
fn = str(uuid.uuid4())
with open(fn, 'wb') as f:
for chunk in r.iter_content(chunk_size = 1024*1024):
if chunk:
f.write(chunk)
f.close()
with open(fn, 'r+b') as f:
data = f.read()
# clean up local file and return the data
os.remove(fn)
return data
else:
print "not an mp3 or mp4 file!!"
return False
except Exception as e:
print "Error retreiving audio stream"
raise
#return False
# amplify audio file using streams & ffmpeg
def amplify(audio):
try:
ff = FFmpeg(
inputs={"pipe:0":None},
#outputs={"pipe:1": "-y -vn -af \"highpass=f=200, lowpass=f=3000, loudnorm=I=-14:TP=-2.0:LRA=11\" -b:a 256k -f mp3"} )
outputs={"pipe:1": "-y -af \"highpass=f=200, lowpass=f=3000, loudnorm=I=-14:LRA=1\" -b:a 256k -f mp3"} )
print ff.cmd
stdout, stderr = ff.run(
input_data=audio,
stdout=subprocess.PIPE)
#print stdout
#print stderr
print "Amplified audio!!"
return stdout
except Exception as e:
print "Error amplifying audio stream"
raise
#return False
# validate date (assumes current year, unless specified)
def isvaliddate(month, day, year=(datetime.now().year)):
correctDate = None
try:
newDate = datetime(year, month, day)
correctDate = True
except ValueError:
correctDate = False
return correctDate
# make sure date is not in the future & also a valid date
def isnotfuturedate(month, day, year):
qdate = datetime(year, month, day, tzinfo=pytz.timezone(os.environ['TZ']))
now = datetime.now(pytz.UTC)
if qdate <= now:
return True
else:
return False
"""
def save_to_s3_CLASSIC():
print "recording url: " + session['mp3url']
filename = session['airdate'].strftime("%Y-%m-%d")+".mp3"
print "filename: " + filename
# download/save url to s3
try:
# connect to s3
s3 = boto3.client(
's3',
aws_access_key_id=os.environ['S3KI'],
aws_secret_access_key=os.environ['S3SK']
)
print "connected to s3"
# get file stream
req_for_image = requests.get(session['mp3url'], stream=True)
file_object_from_req = req_for_image.raw
req_data = file_object_from_req.read()
print "got audio stream"
#AMPLIFY!!!!
ff = FFmpeg(
inputs={"pipe:0":None},
outputs={"pipe:1": "-y -af \"highpass=f=200, lowpass=f=3000, loudnorm=I=-14:TP=-2.0:LRA=11\" -b:a 256k -f mp3"} )
print ff.cmd
stdout, stderr = ff.run(
input_data=req_data,
stdout=subprocess.PIPE)
#print stdout
#print stderr
print "normalized audio"
# Upload to s3
s3.put_object(Bucket="wwaudio", Key="audio/"+filename, Body=stdout)
print "uploaded " + filename+ " to s3"
return True
except Exception as e:
print "Error uploading " + filename+ " to s3"
raise
return False
"""
def save_to_s3_url(url, filename):
print "recording url: " + url
print "filename: " + filename
# download, process, and save url to s3
try:
# get audio file stream
audio = getaudio(url)
# backup original audio
backupaudio(audio)
# amplify audio
amped_audio = amplify(audio)
# upload to s3
return s3save(filename, amped_audio, os.environ['AUDIO'])
except Exception as e:
print "Error getting, processing, or saving " + filename
raise
return False
def save_to_s3_email(date, audio):
filename = date.strftime("%Y-%m-%d")+".mp3"
print "filename: " + filename
# download, process, and save url to s3
try:
# backup original audio
backupaudio(audio)
# amplify audio
amped_audio = amplify(audio)
# upload to s3
return s3save(filename, amped_audio, os.environ['AUDIO'])
except Exception as e:
print "Error getting, processing, or saving " + filename
raise
return False
def url_check(url):
ping = requests.get(url)
print(ping.status_code)
if ping.status_code == 200:
print "OK, we found that file"
return True
else:
print "NOPE, we did not find that file"
return False
def emailback(email, subject, body):
try:
resp = requests.post(
os.environ['MAILGUNDOMAIN']+"/messages",
auth=("api", os.environ['MAILGUNKEY']),
data={"from": os.environ['PODCASTNAME']+" <"+os.environ['EMAIL']+">",
"to": [email],
"subject": subject,
"text": body})
except Exception as e:
print "Error sending email"
raise
return False
# establish current date in PT timezone
def getTime():
tz = pytz.timezone(os.environ['TZ'])
today = datetime.now(tz)
today_utc = today.astimezone(pytz.UTC)
date = today.strftime("%Y-%m-%d")
date_locale = today.strftime("%a, %B %d")
# debug lines for date info #
#print date
#print date_locale
#print today
#print today_utc
return date, date_locale, today, today_utc
### ROUTES
# Generate feed based on day of week
@app.route('/', methods=['GET'])
def index():
# get current date in PT timezone
date, date_locale, today, today_utc = getTime()
feed = {}
feed['uid'] = str(uuid.uuid4())
feed['updateDate'] = today_utc.strftime('%Y-%m-%dT%H:%M:%S.0Z')
feed['mainText'] = ''
url = os.environ['FPATH']+os.environ['AUDIO']+date+ ".mp3"
print "checking for: " + url
if url_check(url):
print "on-air"
feed['titleText'] | fn = o['Key'].replace(os.environ['AUDIO'],'')
if "offair" in fn or fn is "":
pass
else:
#print fn
month, day, year, date = getdatefromfilename(fn)
if isvaliddate(month, day, year) is True and isnotfuturedate(month, day, year) is True:
tmp.append(fn)
#print tmp | conditional_block |
application.py | #AMPLIFY!!!!
ff = FFmpeg(
inputs={"pipe:0":None},
outputs={"pipe:1": "-y -af \"highpass=f=200, lowpass=f=3000, loudnorm=I=-14:TP=-2.0:LRA=11\" -b:a 256k -f mp3"} )
print ff.cmd
stdout, stderr = ff.run(
input_data=req_data,
stdout=subprocess.PIPE)
#print stdout
#print stderr
print "normalized audio"
# Upload to s3
s3.put_object(Bucket="wwaudio", Key="audio/"+filename, Body=stdout)
print "uploaded " + filename+ " to s3"
return True
except Exception as e:
print "Error uploading " + filename+ " to s3"
raise
return False
"""
def save_to_s3_url(url, filename):
print "recording url: " + url
print "filename: " + filename
# download, process, and save url to s3
try:
# get audio file stream
audio = getaudio(url)
# backup original audio
backupaudio(audio)
# amplify audio
amped_audio = amplify(audio)
# upload to s3
return s3save(filename, amped_audio, os.environ['AUDIO'])
except Exception as e:
print "Error getting, processing, or saving " + filename
raise
return False
def save_to_s3_email(date, audio):
filename = date.strftime("%Y-%m-%d")+".mp3"
print "filename: " + filename
# download, process, and save url to s3
try:
# backup original audio
backupaudio(audio)
# amplify audio
amped_audio = amplify(audio)
# upload to s3
return s3save(filename, amped_audio, os.environ['AUDIO'])
except Exception as e:
print "Error getting, processing, or saving " + filename
raise
return False
def url_check(url):
ping = requests.get(url)
print(ping.status_code)
if ping.status_code == 200:
print "OK, we found that file"
return True
else:
print "NOPE, we did not find that file"
return False
def emailback(email, subject, body):
try:
resp = requests.post(
os.environ['MAILGUNDOMAIN']+"/messages",
auth=("api", os.environ['MAILGUNKEY']),
data={"from": os.environ['PODCASTNAME']+" <"+os.environ['EMAIL']+">",
"to": [email],
"subject": subject,
"text": body})
except Exception as e:
print "Error sending email"
raise
return False
# establish current date in PT timezone
def getTime():
tz = pytz.timezone(os.environ['TZ'])
today = datetime.now(tz)
today_utc = today.astimezone(pytz.UTC)
date = today.strftime("%Y-%m-%d")
date_locale = today.strftime("%a, %B %d")
# debug lines for date info #
#print date
#print date_locale
#print today
#print today_utc
return date, date_locale, today, today_utc
### ROUTES
# Generate feed based on day of week
@app.route('/', methods=['GET'])
def index():
# get current date in PT timezone
date, date_locale, today, today_utc = getTime()
feed = {}
feed['uid'] = str(uuid.uuid4())
feed['updateDate'] = today_utc.strftime('%Y-%m-%dT%H:%M:%S.0Z')
feed['mainText'] = ''
url = os.environ['FPATH']+os.environ['AUDIO']+date+ ".mp3"
print "checking for: " + url
if url_check(url):
print "on-air"
feed['titleText'] = os.environ['PODCASTNAME']+ ' ~ '+ date_locale
feed['streamUrl'] = url
else:
print "off-air" # no content found
feed['titleText'] = os.environ['PODCASTNAME']+' is off-air right now, check back again soon!'
feed['streamUrl'] = os.environ['FPATH']+os.environ['AUDIO']+"offair_"+str(randint(0, 4))+".mp3"
feed_json = json.dumps(feed)
print feed_json
return feed_json
# return list of episodes & offairs w/ html5 audio players (kind of like an admin dashboard, but unprotected right now)
@app.route('/episodes', methods=['GET'])
@basic_auth.required
def episodes():
data = geteps()
if data:
return render_template(
'episodes.html',
phone=os.environ['PHONE'],
email=os.environ['EMAIL'],
data=data,
name=os.environ['PODCASTNAME'],
path=os.environ['FPATH']+os.environ['AUDIO'])
else:
return render_template('error.html')
# return latest episode filename (with prefix)
@app.route('/latest', methods=['GET'])
@cross_origin()
def latest():
fn = getlatest()
date = fn[:-4]
m, d, y, dt = getdatefromfilename(fn)
nice_date = dt.strftime("%B %d, %Y")
latest = {"date": date, "nice_date": nice_date, "filename": os.environ['FPATH']+os.environ['AUDIO']+ fn}
feed_json = json.dumps(latest)
print feed_json
return feed_json
# return iTunes podcast feed xml (does not include -future- episodes)
@app.route('/podcast', methods=['GET'])
def podcast():
data = getepsiTunes()
date, date_locale, today, today_utc = getTime()
dt = today.timetuple()
dts = mktime(dt)
daterfc = utils.formatdate(dts)
if data:
for ep in data['episodes']:
xml = render_template(
'feed.xml',
date=daterfc,
data=data) # FEED NEEDS A LOT OF MANUAL WORK / CONTAINS NO ENV VARS, SO NEED TO EDIT ON YOUR OWN !!!
feed = make_response(xml)
feed.headers["Content-Type"] = "application/xml"
return feed
else:
return render_template('error.html')
# Pickup call & get date
@app.route('/begin_call', methods=['GET', 'POST'])
def begin_call():
print "start /begin_call"
from_number = request.values.get('From', None)
if from_number in callers:
session['caller'] = callers[from_number]
else:
session['caller'] = "unknown"
resp = VoiceResponse()
if session['caller'] != "unknown":
resp.say("Hey " + session['caller'] + "!")
gather = Gather(input='dtmf speech', timeout=5, num_digits=4, action='/set_date', method='GET')
gather.say("Let's record a new "+os.environ['PODCASTNAME']+"!\n First, when will this episode air?\n Say the air date or punch it in using a Month Month Day Day format.\n For example, you could say October 31st or punch in 10 31.")
resp.append(gather)
resp.say("You didn't give me a date. Bye!")
else:
resp.say("Hey, this isn't for you. \nBoy Bye!")
resp.hangup()
session.clear()
return str(resp)
# validate date & record audio
@app.route("/set_date", methods=["GET", "POST"])
def set_date():
print "start /set_date"
resp = VoiceResponse()
digits = request.values.get('Digits', None)
speech = request.values.get('SpeechResult', None)
print "dtmf digits: "+ str(digits)
#print "speech recognition: " + speech
#month=0
#digits=0
year=datetime.now().year
if speech:
cal = pdt.Calendar()
time, status = cal.parse(speech)
spoken_date = datetime(*time[:6])
print "spoken date: "+ spoken_date.strftime("%A, %B %-d, %Y")
month = spoken_date.month
day = spoken_date.day
year = spoken_date.year
else:
month = int(str(digits[:2]).lstrip("0").replace(" 0", " "))
day = int(str(digits[-2:]).lstrip("0").replace(" 0", " "))
if isvaliddate(month, day, year) is True:
session['airdate'] = datetime(year,month,day)
print session['airdate'].strftime("%A, %B %-d, %Y")
resp.say("Ok " + session['caller'] + ", this episode will air "+ session['airdate'].strftime("%A, %B %-d, %Y"))
resp.say("Next, record up to 3 minutes of audio following the beep.\n Press any key when you're done.")
resp.record(max_length="180", action="/play_schedule", RecordingChannels="dual", recording_channels="dual") # 3 min max
else:
resp.say("That's not a valid date, hang up and try again.")
resp.hangup()
session.clear()
return str(resp)
# replay audio & confirm scheduling
@app.route("/play_schedule", methods=['GET', 'POST'])
def | play_schedule | identifier_name | |
application.py | =os.environ['S3KI'],
aws_secret_access_key=os.environ['S3SK'])
print "Connected to s3!!"
resp = s3.list_objects_v2(
Bucket=os.environ['BUCKET'],
Prefix=os.environ['AUDIO'])
tmp = []
for o in resp['Contents']:
fn = o['Key'].replace(os.environ['AUDIO'],'')
if "offair" in fn or fn is "":
pass
else:
#print fn
month, day, year, date = getdatefromfilename(fn)
if isvaliddate(month, day, year) is True and isnotfuturedate(month, day, year) is True:
tmp.append(fn)
#print tmp
print "latest episode is: "+ tmp[-1]
return tmp[-1]
except Exception as e:
print "Error talking to s3"
raise
return False
# save file to s3
def s3save(filename, fileobj, folder):
try:
s3 = boto3.client( 's3', aws_access_key_id=os.environ['S3KI'], aws_secret_access_key=os.environ['S3SK'])
print "Connected to s3!!"
print s3.put_object(Bucket=os.environ['BUCKET'], Key=folder+filename, Body=fileobj, ACL="public-read")
print "uploaded " + filename+ " to s3!"
return True
except Exception as e:
print "Error saving "+filename+ "to s3"
raise
return False
# backup audio shortcut method
def backupaudio(data):
tfn = str(uuid.uuid4())+".mp3"
print "backup filename: "+ tfn
if s3save(tfn, data, os.environ['ORIGINAL']):
print "Backed up original audio file as: "+ tfn
else:
print "FAILED to backup original audio file"
# download audio file from twilio and return file object
def getaudio(audiourl):
data = None
try:
# get file stream
if ".mp3" in audiourl:
r = requests.get(audiourl, stream=True)
file_r = r.raw
data = file_r.read()
print "Retreived audio stream!!"
return data
elif ".mp4" in audiourl:
r = requests.get(audiourl, stream=True)
fn = str(uuid.uuid4())
with open(fn, 'wb') as f:
for chunk in r.iter_content(chunk_size = 1024*1024):
if chunk:
f.write(chunk)
f.close()
with open(fn, 'r+b') as f:
data = f.read()
# clean up local file and return the data
os.remove(fn)
return data
else:
print "not an mp3 or mp4 file!!"
return False
except Exception as e:
print "Error retreiving audio stream"
raise
#return False
# amplify audio file using streams & ffmpeg
def amplify(audio):
try:
ff = FFmpeg(
inputs={"pipe:0":None},
#outputs={"pipe:1": "-y -vn -af \"highpass=f=200, lowpass=f=3000, loudnorm=I=-14:TP=-2.0:LRA=11\" -b:a 256k -f mp3"} )
outputs={"pipe:1": "-y -af \"highpass=f=200, lowpass=f=3000, loudnorm=I=-14:LRA=1\" -b:a 256k -f mp3"} )
print ff.cmd
stdout, stderr = ff.run(
input_data=audio,
stdout=subprocess.PIPE)
#print stdout
#print stderr
print "Amplified audio!!"
return stdout
except Exception as e:
print "Error amplifying audio stream"
raise
#return False
# validate date (assumes current year, unless specified)
def isvaliddate(month, day, year=(datetime.now().year)):
correctDate = None
try:
newDate = datetime(year, month, day)
correctDate = True
except ValueError:
correctDate = False
return correctDate
# make sure date is not in the future & also a valid date
def isnotfuturedate(month, day, year):
qdate = datetime(year, month, day, tzinfo=pytz.timezone(os.environ['TZ']))
now = datetime.now(pytz.UTC)
if qdate <= now:
return True
else:
return False
"""
def save_to_s3_CLASSIC():
print "recording url: " + session['mp3url']
filename = session['airdate'].strftime("%Y-%m-%d")+".mp3"
print "filename: " + filename
# download/save url to s3
try:
# connect to s3
s3 = boto3.client(
's3',
aws_access_key_id=os.environ['S3KI'],
aws_secret_access_key=os.environ['S3SK']
)
print "connected to s3"
# get file stream
req_for_image = requests.get(session['mp3url'], stream=True)
file_object_from_req = req_for_image.raw
req_data = file_object_from_req.read()
print "got audio stream"
#AMPLIFY!!!!
ff = FFmpeg(
inputs={"pipe:0":None},
outputs={"pipe:1": "-y -af \"highpass=f=200, lowpass=f=3000, loudnorm=I=-14:TP=-2.0:LRA=11\" -b:a 256k -f mp3"} )
print ff.cmd
stdout, stderr = ff.run(
input_data=req_data,
stdout=subprocess.PIPE)
#print stdout
#print stderr
print "normalized audio"
# Upload to s3
s3.put_object(Bucket="wwaudio", Key="audio/"+filename, Body=stdout)
print "uploaded " + filename+ " to s3"
return True
except Exception as e:
print "Error uploading " + filename+ " to s3"
raise
return False
"""
def save_to_s3_url(url, filename):
print "recording url: " + url
print "filename: " + filename
# download, process, and save url to s3
try:
# get audio file stream
audio = getaudio(url)
# backup original audio
backupaudio(audio)
# amplify audio
amped_audio = amplify(audio)
# upload to s3
return s3save(filename, amped_audio, os.environ['AUDIO'])
except Exception as e:
print "Error getting, processing, or saving " + filename
raise
return False
def save_to_s3_email(date, audio):
filename = date.strftime("%Y-%m-%d")+".mp3"
print "filename: " + filename
# download, process, and save url to s3
try:
# backup original audio
backupaudio(audio) | # amplify audio
amped_audio = amplify(audio)
# upload to s3
return s3save(filename, amped_audio, os.environ['AUDIO'])
except Exception as e:
print "Error getting, processing, or saving " + filename
raise
return False
def url_check(url):
ping = requests.get(url)
print(ping.status_code)
if ping.status_code == 200:
print "OK, we found that file"
return True
else:
print "NOPE, we did not find that file"
return False
def emailback(email, subject, body):
try:
resp = requests.post(
os.environ['MAILGUNDOMAIN']+"/messages",
auth=("api", os.environ['MAILGUNKEY']),
data={"from": os.environ['PODCASTNAME']+" <"+os.environ['EMAIL']+">",
"to": [email],
"subject": subject,
"text": body})
except Exception as e:
print "Error sending email"
raise
return False
# establish current date in PT timezone
def getTime():
tz = pytz.timezone(os.environ['TZ'])
today = datetime.now(tz)
today_utc = today.astimezone(pytz.UTC)
date = today.strftime("%Y-%m-%d")
date_locale = today.strftime("%a, %B %d")
# debug lines for date info #
#print date
#print date_locale
#print today
#print today_utc
return date, date_locale, today, today_utc
### ROUTES
# Generate feed based on day of week
@app.route('/', methods=['GET'])
def index():
# get current date in PT timezone
date, date_locale, today, today_utc = getTime()
feed = {}
feed['uid'] = str(uuid.uuid4())
feed['updateDate'] = today_utc.strftime('%Y-%m-%dT%H:%M:%S.0Z')
feed['mainText'] = ''
url = os.environ['FPATH']+os.environ['AUDIO']+date+ ".mp3"
print "checking for: " + url
if url_check(url):
print "on-air"
feed['titleText'] | random_line_split | |
top500.py | 'http://yandex.ru',
'http://digg.com',
'http://mozilla.org',
'http://huffingtonpost.com',
'http://stumbleupon.com',
'http://123-reg.co.uk',
'http://issuu.com',
'http://creativecommons.org',
'http://wsj.com',
'http://miibeian.gov.cn',
'http://ovh.net',
'http://go.com',
'http://imdb.com',
'http://nih.gov',
'http://secureserver.net',
'http://theguardian.com',
'http://forbes.com',
'http://msn.com',
'http://weibo.com',
'http://paypal.com',
'http://slideshare.net',
'http://google.co.jp',
'http://miitbeian.gov.cn',
'http://washingtonpost.com',
'http://wp.com',
'http://dropbox.com',
'http://domainactive.co',
'http://amazonaws.com',
'http://yelp.com',
'http://eventbrite.com',
'http://ebay.com',
'http://typepad.com',
'http://telegraph.co.uk',
'http://addtoany.com',
'http://reuters.com',
'http://macromedia.com',
'http://sourceforge.net',
'http://etsy.com',
'http://about.com',
'http://free.fr',
'http://usatoday.com',
'http://ameblo.jp',
'http://dailymail.co.uk',
'http://archive.org',
'http://constantcontact.com',
'http://aol.com',
'http://livejournal.com',
'http://google.co.uk',
'http://fc2.com',
'http://time.com',
'http://bing.com',
'http://icio.us',
'http://amazon.co.uk',
'http://mail.ru',
'http://latimes.com',
'http://yahoo.co.jp',
'http://eepurl.com',
'http://51.la',
'http://guardian.co.uk',
'http://npr.org',
'http://cpanel.net',
'http://harvard.edu',
'http://surveymonkey.com',
'http://taobao.com',
'http://1und1.de',
'http://bloomberg.com',
'http://xing.com',
'http://wikimedia.org',
'http://e-recht24.de',
'http://cdc.gov',
'http://cpanel.com',
'http://amazon.de',
'http://hostnet.nl',
'http://mit.edu',
'http://dailymotion.com',
'http://bbb.org',
'http://live.com',
'http://wired.com',
'http://stanford.edu',
'http://list-manage.com',
'http://joomla.org',
'http://webs.com',
'http://hatena.ne.jp',
'http://blogspot.co.uk',
'http://one.com',
'http://domainname.ru',
'http://elegantthemes.com',
'http://delicious.com',
'http://apache.org',
'http://bandcamp.com',
'http://163.com',
'http://kickstarter.com',
'http://networksolutions.com',
'http://amzn.to',
'http://homestead.com',
'http://rambler.ru',
'http://tripadvisor.com',
'http://nasa.gov',
'http://cnet.com',
'http://ovh.com',
'http://gnu.org',
'http://businessinsider.com',
'http://scribd.com',
'http://geocities.com',
'http://independent.co.uk',
'http://photobucket.com',
'http://bbc.com',
'http://disqus.com',
'http://amazon.co.jp',
'http://ted.com',
'http://un.org',
'http://imgur.com',
'http://pbs.org',
'http://trustpilot.com',
'http://domainname.de',
'http://google.fr',
'http://adition.com',
'http://opera.com',
'http://behance.net',
'http://cbsnews.com',
'http://mashable.com',
'http://tripod.com',
'http://wiley.com',
'http://who.int',
'http://deviantart.com',
'http://googleusercontent.com',
'http://ibm.com',
'http://ca.gov',
'http://nationalgeographic.com',
'http://whitehouse.gov',
'http://berkeley.edu',
'http://barnesandnoble.com',
'http://hibu.com',
'http://foxnews.com',
'http://theatlantic.com',
'http://google.ca',
'http://mijndomein.nl',
'http://loopia.se',
'http://google.es',
'http://sohu.com',
'http://techcrunch.com',
'http://namejet.com',
'http://rakuten.co.jp',
'http://loopia.com',
'http://github.io',
'http://visma.com',
'http://goodreads.com',
'http://nature.com',
'http://spotify.com',
'http://medium.com',
'http://cornell.edu',
'http://buzzfeed.com',
'http://usda.gov',
'http://google.it',
'http://ft.com',
'http://ifeng.com',
'http://squarespace.com',
'http://technorati.com',
'http://wixsite.com',
'http://engadget.com',
'http://epa.gov',
'http://cbc.ca',
'http://sciencedirect.com',
'http://sakura.ne.jp',
'http://doubleclick.net',
'http://blogspot.com.es',
'http://change.org',
'http://noaa.gov',
'http://economist.com',
'http://name.com',
'http://bizjournals.com',
'http://php.net',
'http://1and1.fr',
'http://sfgate.com',
'http://gravatar.com',
'http://loc.gov',
'http://ow.ly',
'http://sogou.com',
'http://vkontakte.ru',
'http://detik.com',
'http://prnewswire.com',
'http://meetup.com',
'http://blogspot.de',
'http://nps.gov',
'http://usnews.com',
'http://chicagotribune.com',
'http://businessweek.com',
'http://springer.com',
'http://slate.com',
'http://histats.com',
'http://1and1.com',
'http://umblr.com',
'http://newyorker.com',
'http://cbslocal.com',
'http://spiegel.de',
'http://baiyewang.com',
'http://abc.net.au',
'http://themeforest.net',
'http://about.me',
'http://nydailynews.com',
'http://hp.com',
'http://list-manage1.com',
'http://myshopify.com',
'http://100ye.com',
'http://wikia.com',
'http://umich.edu',
'http://google.com.au',
'http://marriott.com',
'http://xinhuanet.com',
'http://wufoo.com',
'http://webmd.com',
'http://mapquest.com',
'http://ustream.tv',
'http://rs6.net',
'http://foursquare.com',
'http://fda.gov',
'http://cnbc.com',
'http://house.gov',
'http://salenames.ru',
'http://away.ru',
'http://homes.ru',
'http://promopages.ru',
'http://home.pl',
'http://yale.edu',
'http://state.gov',
'http://columbia.edu', | 'http://bigcartel.com',
'http://acquirethisname.com',
'http://wp.me',
'http://cloudfront.net',
'http://unesco.org',
'http://ocn.ne.jp',
'http://gizmodo.com',
'http://skype.com',
'http://fb.me',
'http://upenn.edu',
'http://beian.gov.cn',
'http://a8.net',
'http://geocities.jp',
'http://storify.com',
'http://washington.edu',
'http://people.com.cn',
'http://businesswire.com',
'http://livedoor.jp',
'http://afternic.com',
'http://domainnameshop.com',
'http://line.me',
'http://dreamhost.com',
'http://senate.gov',
'http://naver.com',
'http://uk2.net',
'http://vice.com',
'http://hilton.com',
'http://haljl.com',
'http://domeneshop.no',
'http://irs.gov',
'http://zdnet.com',
'http://doi.org',
'http://smh.com.au',
'http://linksynergy.com',
'http://weather.com',
'http://hexun.com',
'http://booking.com',
'http://android.com',
'http://register.it',
'http://fortune.com',
'http://utexas.edu',
'http://marketwatch.com',
'http://theverge.com',
'http://indiatimes.com',
'http://wisc.edu',
'http://hostgator.com',
'http://fastcompany.com',
'http://bola.net',
'http://xiti.com',
'http://nic.tel',
'http://dribbble.com',
'http://clickbank.net',
'http://ox.ac.uk',
'http://gstatic.com',
'http://debian.org',
'http://samsung.com',
'http://ap.org',
'http://nhs.uk',
'http://shopify.com',
'http://enable-javascript.com',
'http://drupal.org',
'http://fb.com',
'http://mlb.com',
'http://wunderground.com',
'http://nazwa.pl',
'http://worldbank.org',
'http://census.gov',
'http://studiopress.com',
'http://netcraft.com',
'http://oracle.com',
'http://si.edu',
'http://bestfwdservice.com',
'http://sagepub.com',
'http://campaign-archive1.com',
'http://goo.ne.jp',
'http://campaign-archive2.com',
'http://directdomains.com',
'http://sciencemag.org',
'http://ranshao.com',
'http://mozilla.com',
'http://princeton.edu',
'http://alexa.com',
'http://alibaba.com',
'http://usgs.gov',
'http://houzz.com',
'http://youku.com',
'http://paginegialle.it',
'http://telnic.org',
'http://intel.com',
'http://google.nl',
'http://iqiyi.com',
'http://mailchimp.com',
'http://oxfordjournals.org',
'http://ftc.gov',
'http://prweb.com',
'http://jdoqocy.com',
'http://inc.com',
'http://cam.ac.uk',
'http://arstechnica.com',
'http://oecd.org',
'http://cisco.com',
'http://politico.com | 'http://ed.gov',
'http://phpbb.com',
'http://nbcnews.com',
'http://jiathis.com', | random_line_split |
broker.go | (chan net.Conn, pxyPoolSize),
lastPing: time.Now(),
writerShutdown: util.NewShutdown(),
readerShutdown: util.NewShutdown(),
managerShutdown: util.NewShutdown(),
shutdown: util.NewShutdown(),
broker: b,
plumbers: util.NewSet(0),
}
// register the control
if old := b.controllers.Add(authMsg.Id, bc); old != nil {
// old had been kicked out
// routine for shutdown the old one
go func(old *NodeController) {
// send bye message to avoid control reconnect
if err := util.PanicToError(func() { old.out <- new(Bye) }); err != nil {
log.Debug("send Bye message error: %v", err)
}
// change id to empty string
old.id = ""
// tell the old one to shutdown
old.shutdown.Begin()
}(old.(*NodeController))
}
// start four goroutines
go bc.writer()
go bc.manager()
go bc.reader()
go bc.stopper()
// send success message
util.PanicToError(func() { bc.out <- new(AccessResp) })
log.Debug("Broker::control authenticate with id: %v", authMsg.Id)
}
func (b *Broker) tunnel(pxyConn net.Conn, reqTunnel *RegTunnel) {
// authenticate firstly
if err := b.getAuthenticate(&reqTunnel.Auth); err != nil {
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(pxyConn, &AccessResp{Error: err.Error()})
pxyConn.Close()
return
}
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(pxyConn, new(AccessResp)); err != nil {
pxyConn.Close()
return
}
// look up the control for this tunnel conn
var ctl *NodeController
if tmp, ok := b.controllers.Get(reqTunnel.Auth.Id); !ok {
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
pxyConn.Close()
return
} else {
ctl = tmp.(*NodeController)
}
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
ctl.regTunnel(pxyConn)
}
func (b *Broker) Broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
b.broker(srcConn, reqBrokerMsg)
}
func (b *Broker) broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
defer srcConn.Close()
// authenticate
if err := b.reqBrokerPermission(reqBrokerMsg); err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: err.Error()})
return
}
// look up the control connection for this tunnel
var dstCtl *NodeController
if tmp, ok := b.controllers.Get(reqBrokerMsg.DstId); !ok {
log.Debug("Broker::broker no control found for target control id: %s", reqBrokerMsg.DstId)
return
} else {
dstCtl = tmp.(*NodeController)
}
log.Debug("Broker:broker get dstConn for %s", dstCtl.id)
dstConn, err := dstCtl.getTunnel(reqBrokerMsg)
if err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: "failed to get tunnel connection of target control"})
return
}
defer dstConn.Close()
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(srcConn, new(AccessResp)); err != nil {
log.Error("Broker::broker send broker success message error: %v", err)
return
}
srcConn.SetDeadline(time.Time{})
// join the public and tunnel connections
log.Debug("start joining srcConn, dstConn")
p := NewPlumber(srcConn, dstConn)
dstCtl.stopRWMutex.RLock()
if dstCtl.stopping {
dstCtl.stopRWMutex.RUnlock()
log.Debug("Broker::broker dstCtl %s is stopping", dstCtl.id)
return
}
dstCtl.plumbers.Add(p)
defer dstCtl.plumbers.Remove(p)
dstCtl.stopRWMutex.RUnlock()
bytes2Src, bytes2Dst := p.Pipe(bytesForRate)
log.Debug("Broker::broker bytes2Src :%d bytes2Dst:%d", bytes2Src, bytes2Dst)
}
type NodeController struct {
// id of the control
id string
// authMsg
authMsg *Auth
// main controller connection
ctlConn net.Conn
// broker of the controller
broker *Broker
// unlimited capacity set
plumbers *util.Set
stopping bool
stopRWMutex sync.RWMutex
// put a message in this channel to send it over conn to the controller
out chan Message
// read from this channel to get the next message sent to us over conn by the controller
in chan Message
// the last time we received a ping from the controller - for heartbeats
lastPing time.Time
// tunnel connections
tunnels chan net.Conn
// synchronizer for writer()
writerShutdown *util.Shutdown
// synchronizer for reader()
readerShutdown *util.Shutdown
// synchronizer for manager()
managerShutdown *util.Shutdown
// synchronizer for entire controller
shutdown *util.Shutdown
}
func (nc *NodeController) AuthMsg() *Auth {
return nc.authMsg
}
func (nc *NodeController) manager() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::manager recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the control manager stops
defer nc.shutdown.Begin()
// notify that manager() has shutdown
defer nc.managerShutdown.Complete()
// reaping timer for detecting heartbeat failure
pingCheck := time.NewTicker(time.Second)
defer pingCheck.Stop()
for {
select {
case <-pingCheck.C:
if time.Since(nc.lastPing) > (pingInterval + rwTimeout) {
log.Debug("NodeController::manager lost heartbeat")
return
}
case mRaw, ok := <-nc.in:
// c.in closes to indicate shutdown
if !ok {
log.Debug("NodeController::manager chan bc.in closed")
return
}
//log.Debug("NodeController::manager PING")
if _, ok := mRaw.(*Ping); ok {
nc.lastPing = time.Now()
// don't crash on panic
if err := util.PanicToError(func() { nc.out <- new(Pong) }); err != nil {
log.Debug("NodeController::manager send message to bc.out error: %v", err)
return
}
//log.Debug("NodeController::manager PONG")
}
}
}
}
func (nc *NodeController) writer() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::writer recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the writer() stops
defer nc.shutdown.Begin()
// notify that we've flushed all messages
defer nc.writerShutdown.Complete()
// write messages to the control channel
for m := range nc.out {
nc.ctlConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(nc.ctlConn, m); err != nil {
// bc.conn may be closed
log.Debug("NodeController::writer WriteMsg error: %v", err)
return
}
}
}
func (nc *NodeController) reader() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::reader recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the reader stops
defer nc.shutdown.Begin()
// notify that we're done
defer nc.readerShutdown.Complete()
// read messages from the control channel
for {
if message, err := ReadMsg(nc.ctlConn); err != nil {
log.Debug("NodeController::read message: %v", err)
return
} else {
// this can also panic during shutdown
if err := util.PanicToError(func() { nc.in <- message }); err != nil {
log.Debug("NodeController::reader bc.in <- message error: %v", err)
return
}
}
}
}
func (nc *NodeController) stopper() | {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::stopper recover with error: %v, stack: %s", err, debug.Stack())
}
}()
defer nc.shutdown.Complete()
// wait until we're instructed to shutdown
nc.shutdown.WaitBegin()
nc.stopRWMutex.Lock()
nc.stopping = true
nc.stopRWMutex.Unlock()
// close all plumbers
nc.plumbers.Each(func(elem interface{}) { elem.(*Plumber).Close() })
nc.plumbers.Clean()
| identifier_body | |
broker.go | :
b.control(conn, msg)
case *RegTunnel:
b.tunnel(conn, msg)
case *ReqBroker:
b.broker(conn, msg)
default:
conn.Close()
}
}(conn)
}
}
func (b *Broker) control(ctlConn net.Conn, authMsg *Auth) {
// authenticate firstly
if err := b.getAuthenticate(authMsg); err != nil {
ctlConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(ctlConn, &AccessResp{Error: err.Error()})
ctlConn.Close()
return
}
// create the object
bc := &NodeController{
id: authMsg.Id,
authMsg: authMsg,
ctlConn: ctlConn,
out: make(chan Message),
in: make(chan Message),
tunnels: make(chan net.Conn, pxyPoolSize),
lastPing: time.Now(),
writerShutdown: util.NewShutdown(),
readerShutdown: util.NewShutdown(),
managerShutdown: util.NewShutdown(),
shutdown: util.NewShutdown(),
broker: b,
plumbers: util.NewSet(0),
}
// register the control
if old := b.controllers.Add(authMsg.Id, bc); old != nil {
// old had been kicked out
// routine for shutdown the old one
go func(old *NodeController) {
// send bye message to avoid control reconnect
if err := util.PanicToError(func() { old.out <- new(Bye) }); err != nil {
log.Debug("send Bye message error: %v", err)
}
// change id to empty string
old.id = ""
// tell the old one to shutdown
old.shutdown.Begin()
}(old.(*NodeController))
}
// start four goroutines
go bc.writer()
go bc.manager()
go bc.reader()
go bc.stopper()
// send success message
util.PanicToError(func() { bc.out <- new(AccessResp) })
log.Debug("Broker::control authenticate with id: %v", authMsg.Id)
}
func (b *Broker) tunnel(pxyConn net.Conn, reqTunnel *RegTunnel) {
// authenticate firstly
if err := b.getAuthenticate(&reqTunnel.Auth); err != nil {
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(pxyConn, &AccessResp{Error: err.Error()})
pxyConn.Close()
return
}
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(pxyConn, new(AccessResp)); err != nil {
pxyConn.Close()
return
}
// look up the control for this tunnel conn
var ctl *NodeController
if tmp, ok := b.controllers.Get(reqTunnel.Auth.Id); !ok {
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
pxyConn.Close()
return
} else {
ctl = tmp.(*NodeController)
}
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
ctl.regTunnel(pxyConn)
}
func (b *Broker) Broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
b.broker(srcConn, reqBrokerMsg)
}
func (b *Broker) broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
defer srcConn.Close()
// authenticate
if err := b.reqBrokerPermission(reqBrokerMsg); err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: err.Error()})
return
}
// look up the control connection for this tunnel
var dstCtl *NodeController
if tmp, ok := b.controllers.Get(reqBrokerMsg.DstId); !ok {
log.Debug("Broker::broker no control found for target control id: %s", reqBrokerMsg.DstId)
return
} else {
dstCtl = tmp.(*NodeController)
}
log.Debug("Broker:broker get dstConn for %s", dstCtl.id)
dstConn, err := dstCtl.getTunnel(reqBrokerMsg)
if err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: "failed to get tunnel connection of target control"})
return
}
defer dstConn.Close()
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(srcConn, new(AccessResp)); err != nil {
log.Error("Broker::broker send broker success message error: %v", err)
return
}
srcConn.SetDeadline(time.Time{})
// join the public and tunnel connections
log.Debug("start joining srcConn, dstConn")
p := NewPlumber(srcConn, dstConn)
dstCtl.stopRWMutex.RLock()
if dstCtl.stopping {
dstCtl.stopRWMutex.RUnlock()
log.Debug("Broker::broker dstCtl %s is stopping", dstCtl.id)
return
}
dstCtl.plumbers.Add(p)
defer dstCtl.plumbers.Remove(p)
dstCtl.stopRWMutex.RUnlock()
bytes2Src, bytes2Dst := p.Pipe(bytesForRate)
log.Debug("Broker::broker bytes2Src :%d bytes2Dst:%d", bytes2Src, bytes2Dst)
}
type NodeController struct {
// id of the control
id string
// authMsg
authMsg *Auth
// main controller connection
ctlConn net.Conn
// broker of the controller
broker *Broker
// unlimited capacity set
plumbers *util.Set
stopping bool
stopRWMutex sync.RWMutex
// put a message in this channel to send it over conn to the controller
out chan Message
// read from this channel to get the next message sent to us over conn by the controller
in chan Message
// the last time we received a ping from the controller - for heartbeats
lastPing time.Time
// tunnel connections
tunnels chan net.Conn
// synchronizer for writer()
writerShutdown *util.Shutdown
// synchronizer for reader()
readerShutdown *util.Shutdown
// synchronizer for manager()
managerShutdown *util.Shutdown
// synchronizer for entire controller
shutdown *util.Shutdown
}
func (nc *NodeController) AuthMsg() *Auth {
return nc.authMsg
}
func (nc *NodeController) manager() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::manager recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the control manager stops
defer nc.shutdown.Begin()
// notify that manager() has shutdown
defer nc.managerShutdown.Complete()
// reaping timer for detecting heartbeat failure
pingCheck := time.NewTicker(time.Second)
defer pingCheck.Stop()
for {
select {
case <-pingCheck.C:
if time.Since(nc.lastPing) > (pingInterval + rwTimeout) {
log.Debug("NodeController::manager lost heartbeat")
return
}
case mRaw, ok := <-nc.in:
// c.in closes to indicate shutdown
if !ok |
//log.Debug("NodeController::manager PING")
if _, ok := mRaw.(*Ping); ok {
nc.lastPing = time.Now()
// don't crash on panic
if err := util.PanicToError(func() { nc.out <- new(Pong) }); err != nil {
log.Debug("NodeController::manager send message to bc.out error: %v", err)
return
}
//log.Debug("NodeController::manager PONG")
}
}
}
}
func (nc *NodeController) writer() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::writer recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the writer() stops
defer nc.shutdown.Begin()
// notify that we've flushed all messages
defer nc.writerShutdown.Complete()
// write messages to the control channel
for m := range nc.out {
nc.ctlConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(nc.ctlConn, m); err != nil {
// bc.conn may be closed
log.Debug("NodeController::writer WriteMsg error: %v", err)
return
}
}
}
func (nc *NodeController) reader() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::reader recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the reader stops
defer nc.shutdown.Begin()
// notify that we're done
defer nc.readerShutdown.Complete()
// read messages from the control channel
for {
if message, err := ReadMsg(nc.ctlConn); err != nil {
log.Debug("NodeController::read message: %v", err)
return
} else {
| {
log.Debug("NodeController::manager chan bc.in closed")
return
} | conditional_block |
broker.go | err.Error()})
ctlConn.Close()
return
}
// create the object
bc := &NodeController{
id: authMsg.Id,
authMsg: authMsg,
ctlConn: ctlConn,
out: make(chan Message),
in: make(chan Message),
tunnels: make(chan net.Conn, pxyPoolSize),
lastPing: time.Now(),
writerShutdown: util.NewShutdown(),
readerShutdown: util.NewShutdown(),
managerShutdown: util.NewShutdown(),
shutdown: util.NewShutdown(),
broker: b,
plumbers: util.NewSet(0),
}
// register the control
if old := b.controllers.Add(authMsg.Id, bc); old != nil {
// old had been kicked out
// routine for shutdown the old one
go func(old *NodeController) {
// send bye message to avoid control reconnect
if err := util.PanicToError(func() { old.out <- new(Bye) }); err != nil {
log.Debug("send Bye message error: %v", err)
}
// change id to empty string
old.id = ""
// tell the old one to shutdown
old.shutdown.Begin()
}(old.(*NodeController))
}
// start four goroutines
go bc.writer()
go bc.manager()
go bc.reader()
go bc.stopper()
// send success message
util.PanicToError(func() { bc.out <- new(AccessResp) })
log.Debug("Broker::control authenticate with id: %v", authMsg.Id)
}
func (b *Broker) tunnel(pxyConn net.Conn, reqTunnel *RegTunnel) {
// authenticate firstly
if err := b.getAuthenticate(&reqTunnel.Auth); err != nil {
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(pxyConn, &AccessResp{Error: err.Error()})
pxyConn.Close()
return
}
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(pxyConn, new(AccessResp)); err != nil {
pxyConn.Close()
return
}
// look up the control for this tunnel conn
var ctl *NodeController
if tmp, ok := b.controllers.Get(reqTunnel.Auth.Id); !ok {
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
pxyConn.Close()
return
} else {
ctl = tmp.(*NodeController)
}
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
ctl.regTunnel(pxyConn)
}
func (b *Broker) Broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
b.broker(srcConn, reqBrokerMsg)
}
func (b *Broker) broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
defer srcConn.Close()
// authenticate
if err := b.reqBrokerPermission(reqBrokerMsg); err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: err.Error()})
return
}
// look up the control connection for this tunnel
var dstCtl *NodeController
if tmp, ok := b.controllers.Get(reqBrokerMsg.DstId); !ok {
log.Debug("Broker::broker no control found for target control id: %s", reqBrokerMsg.DstId)
return
} else {
dstCtl = tmp.(*NodeController)
}
log.Debug("Broker:broker get dstConn for %s", dstCtl.id)
dstConn, err := dstCtl.getTunnel(reqBrokerMsg)
if err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: "failed to get tunnel connection of target control"})
return
}
defer dstConn.Close()
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(srcConn, new(AccessResp)); err != nil {
log.Error("Broker::broker send broker success message error: %v", err)
return
}
srcConn.SetDeadline(time.Time{})
// join the public and tunnel connections
log.Debug("start joining srcConn, dstConn")
p := NewPlumber(srcConn, dstConn)
dstCtl.stopRWMutex.RLock()
if dstCtl.stopping {
dstCtl.stopRWMutex.RUnlock()
log.Debug("Broker::broker dstCtl %s is stopping", dstCtl.id)
return
}
dstCtl.plumbers.Add(p)
defer dstCtl.plumbers.Remove(p)
dstCtl.stopRWMutex.RUnlock()
bytes2Src, bytes2Dst := p.Pipe(bytesForRate)
log.Debug("Broker::broker bytes2Src :%d bytes2Dst:%d", bytes2Src, bytes2Dst)
}
type NodeController struct {
// id of the control
id string
// authMsg
authMsg *Auth
// main controller connection
ctlConn net.Conn
// broker of the controller
broker *Broker
// unlimited capacity set
plumbers *util.Set
stopping bool
stopRWMutex sync.RWMutex
// put a message in this channel to send it over conn to the controller
out chan Message
// read from this channel to get the next message sent to us over conn by the controller
in chan Message
// the last time we received a ping from the controller - for heartbeats
lastPing time.Time
// tunnel connections
tunnels chan net.Conn
// synchronizer for writer()
writerShutdown *util.Shutdown
// synchronizer for reader()
readerShutdown *util.Shutdown
// synchronizer for manager()
managerShutdown *util.Shutdown
// synchronizer for entire controller
shutdown *util.Shutdown
}
func (nc *NodeController) AuthMsg() *Auth {
return nc.authMsg
}
func (nc *NodeController) manager() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::manager recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the control manager stops
defer nc.shutdown.Begin()
// notify that manager() has shutdown
defer nc.managerShutdown.Complete()
// reaping timer for detecting heartbeat failure
pingCheck := time.NewTicker(time.Second)
defer pingCheck.Stop()
for {
select {
case <-pingCheck.C:
if time.Since(nc.lastPing) > (pingInterval + rwTimeout) {
log.Debug("NodeController::manager lost heartbeat")
return
}
case mRaw, ok := <-nc.in:
// c.in closes to indicate shutdown
if !ok {
log.Debug("NodeController::manager chan bc.in closed")
return
}
//log.Debug("NodeController::manager PING")
if _, ok := mRaw.(*Ping); ok {
nc.lastPing = time.Now()
// don't crash on panic
if err := util.PanicToError(func() { nc.out <- new(Pong) }); err != nil {
log.Debug("NodeController::manager send message to bc.out error: %v", err)
return
}
//log.Debug("NodeController::manager PONG")
}
}
}
}
func (nc *NodeController) writer() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::writer recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the writer() stops
defer nc.shutdown.Begin()
// notify that we've flushed all messages
defer nc.writerShutdown.Complete()
// write messages to the control channel
for m := range nc.out {
nc.ctlConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(nc.ctlConn, m); err != nil {
// bc.conn may be closed
log.Debug("NodeController::writer WriteMsg error: %v", err)
return
}
}
}
func (nc *NodeController) reader() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::reader recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the reader stops
defer nc.shutdown.Begin()
// notify that we're done
defer nc.readerShutdown.Complete()
// read messages from the control channel
for {
if message, err := ReadMsg(nc.ctlConn); err != nil {
log.Debug("NodeController::read message: %v", err)
return
} else {
// this can also panic during shutdown
if err := util.PanicToError(func() { nc.in <- message }); err != nil {
log.Debug("NodeController::reader bc.in <- message error: %v", err)
return
}
}
}
}
func (nc *NodeController) stopper() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::stopper recover with error: %v, stack: %s", err, debug.Stack()) | }
}()
defer nc.shutdown.Complete()
| random_line_split | |
broker.go | (getAuthenticate AuthenticateFunc, reqBrokerPermission ReqBrokerPermission) *Broker {
return &Broker{
getAuthenticate: getAuthenticate,
reqBrokerPermission: reqBrokerPermission,
controllers: util.NewRegistry(),
}
}
func (b *Broker) ListenAddr() net.Addr {
return b.listener.Addr()
}
func (b *Broker) Run(listenerAddr string, tlsConfig *tls.Config) (err error) {
if b.listener, err = NewListener("tcp", listenerAddr, tlsConfig); err != nil {
return log.Error("listening %s error: %v", listenerAddr, err)
}
go b.daemon()
return nil
}
func (b *Broker) AllController(f func(id string, controller *NodeController)) {
b.controllers.All(func(key, value interface{}) { f(key.(string), value.(*NodeController)) })
}
func (b *Broker) EachController(f func(id string, controller *NodeController, stop *bool)) {
b.controllers.Each(func(key, value interface{}, stop *bool) { f(key.(string), value.(*NodeController), stop) })
}
func (b *Broker) daemon() {
for {
conn, ok := <-b.listener.ConnChan
if !ok {
log.Debug("listener.Conn closed")
return
}
go func(conn net.Conn) {
var rawMsg Message
var err error
conn.SetReadDeadline(time.Now().Add(rwTimeout))
if rawMsg, err = ReadMsg(conn); err != nil {
conn.Close()
return
}
conn.SetReadDeadline(time.Time{})
switch msg := rawMsg.(type) {
case *Auth:
b.control(conn, msg)
case *RegTunnel:
b.tunnel(conn, msg)
case *ReqBroker:
b.broker(conn, msg)
default:
conn.Close()
}
}(conn)
}
}
func (b *Broker) control(ctlConn net.Conn, authMsg *Auth) {
// authenticate firstly
if err := b.getAuthenticate(authMsg); err != nil {
ctlConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(ctlConn, &AccessResp{Error: err.Error()})
ctlConn.Close()
return
}
// create the object
bc := &NodeController{
id: authMsg.Id,
authMsg: authMsg,
ctlConn: ctlConn,
out: make(chan Message),
in: make(chan Message),
tunnels: make(chan net.Conn, pxyPoolSize),
lastPing: time.Now(),
writerShutdown: util.NewShutdown(),
readerShutdown: util.NewShutdown(),
managerShutdown: util.NewShutdown(),
shutdown: util.NewShutdown(),
broker: b,
plumbers: util.NewSet(0),
}
// register the control
if old := b.controllers.Add(authMsg.Id, bc); old != nil {
// old had been kicked out
// routine for shutdown the old one
go func(old *NodeController) {
// send bye message to avoid control reconnect
if err := util.PanicToError(func() { old.out <- new(Bye) }); err != nil {
log.Debug("send Bye message error: %v", err)
}
// change id to empty string
old.id = ""
// tell the old one to shutdown
old.shutdown.Begin()
}(old.(*NodeController))
}
// start four goroutines
go bc.writer()
go bc.manager()
go bc.reader()
go bc.stopper()
// send success message
util.PanicToError(func() { bc.out <- new(AccessResp) })
log.Debug("Broker::control authenticate with id: %v", authMsg.Id)
}
func (b *Broker) tunnel(pxyConn net.Conn, reqTunnel *RegTunnel) {
// authenticate firstly
if err := b.getAuthenticate(&reqTunnel.Auth); err != nil {
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(pxyConn, &AccessResp{Error: err.Error()})
pxyConn.Close()
return
}
pxyConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(pxyConn, new(AccessResp)); err != nil {
pxyConn.Close()
return
}
// look up the control for this tunnel conn
var ctl *NodeController
if tmp, ok := b.controllers.Get(reqTunnel.Auth.Id); !ok {
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
pxyConn.Close()
return
} else {
ctl = tmp.(*NodeController)
}
log.Debug("Broker::tunnel registering new tunnel for %s", reqTunnel.Auth.Id)
ctl.regTunnel(pxyConn)
}
func (b *Broker) Broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
b.broker(srcConn, reqBrokerMsg)
}
func (b *Broker) broker(srcConn net.Conn, reqBrokerMsg *ReqBroker) {
defer srcConn.Close()
// authenticate
if err := b.reqBrokerPermission(reqBrokerMsg); err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: err.Error()})
return
}
// look up the control connection for this tunnel
var dstCtl *NodeController
if tmp, ok := b.controllers.Get(reqBrokerMsg.DstId); !ok {
log.Debug("Broker::broker no control found for target control id: %s", reqBrokerMsg.DstId)
return
} else {
dstCtl = tmp.(*NodeController)
}
log.Debug("Broker:broker get dstConn for %s", dstCtl.id)
dstConn, err := dstCtl.getTunnel(reqBrokerMsg)
if err != nil {
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
WriteMsg(srcConn, &AccessResp{Error: "failed to get tunnel connection of target control"})
return
}
defer dstConn.Close()
srcConn.SetWriteDeadline(time.Now().Add(rwTimeout))
if err := WriteMsg(srcConn, new(AccessResp)); err != nil {
log.Error("Broker::broker send broker success message error: %v", err)
return
}
srcConn.SetDeadline(time.Time{})
// join the public and tunnel connections
log.Debug("start joining srcConn, dstConn")
p := NewPlumber(srcConn, dstConn)
dstCtl.stopRWMutex.RLock()
if dstCtl.stopping {
dstCtl.stopRWMutex.RUnlock()
log.Debug("Broker::broker dstCtl %s is stopping", dstCtl.id)
return
}
dstCtl.plumbers.Add(p)
defer dstCtl.plumbers.Remove(p)
dstCtl.stopRWMutex.RUnlock()
bytes2Src, bytes2Dst := p.Pipe(bytesForRate)
log.Debug("Broker::broker bytes2Src :%d bytes2Dst:%d", bytes2Src, bytes2Dst)
}
type NodeController struct {
// id of the control
id string
// authMsg
authMsg *Auth
// main controller connection
ctlConn net.Conn
// broker of the controller
broker *Broker
// unlimited capacity set
plumbers *util.Set
stopping bool
stopRWMutex sync.RWMutex
// put a message in this channel to send it over conn to the controller
out chan Message
// read from this channel to get the next message sent to us over conn by the controller
in chan Message
// the last time we received a ping from the controller - for heartbeats
lastPing time.Time
// tunnel connections
tunnels chan net.Conn
// synchronizer for writer()
writerShutdown *util.Shutdown
// synchronizer for reader()
readerShutdown *util.Shutdown
// synchronizer for manager()
managerShutdown *util.Shutdown
// synchronizer for entire controller
shutdown *util.Shutdown
}
func (nc *NodeController) AuthMsg() *Auth {
return nc.authMsg
}
func (nc *NodeController) manager() {
defer func() {
if err := recover(); err != nil {
log.Error("NodeController::manager recover with error: %v, stack: %s", err, debug.Stack())
}
}()
// kill everything if the control manager stops
defer nc.shutdown.Begin()
// notify that manager() has shutdown
defer nc.managerShutdown.Complete()
// reaping timer for detecting heartbeat failure
pingCheck := time.NewTicker(time.Second)
defer pingCheck.Stop()
for {
select {
case <-pingCheck.C:
if time.Since(nc.lastPing) > (pingInterval + rwTimeout) {
log.Debug("NodeController::manager lost heartbeat")
return
}
case mRaw, ok := <-nc.in:
// c.in closes to indicate shutdown
if !ok {
log.Debug("NodeController::manager chan bc.in closed")
return
}
//log.Debug("NodeController::manager PING")
if _, ok := mRaw.(*Ping); ok {
nc.lastPing = time.Now()
// don't crash on panic
if err := util.PanicToError(func() { nc.out <- new(Pong) }); err != nil {
| NewBroker | identifier_name | |
iso_spec.rs | IsoError> {
let mut selector = String::new();
let mut f2d_map = HashMap::new();
let mut in_buf = Cursor::new(data);
for f in &self.header_fields {
match f.parse(&mut in_buf, &mut f2d_map) {
Ok(_) => {
selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars());
}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
debug!("computed header value for incoming message = {}", selector);
match self.get_message_from_header(selector.as_str()) {
Ok(msg) => {
Ok(msg)
}
Err(e) => Err(e)
}
}
}
/// This struct represents a parsed message for a given spec
pub struct IsoMsg {
// The spec associated with this IsoMsg
pub spec: &'static Spec,
/// The segment that the IsoMsg represents
pub msg: &'static MessageSegment,
/// field data map - name to raw value
pub fd_map: std::collections::HashMap<String, Vec<u8>>,
/// the bitmap on the iso message
pub bmp: bitmap::Bitmap,
}
/// Operations on IsoMsg
impl IsoMsg {
pub fn spec(&self) -> &'static Spec {
self.spec
}
/// Returns the value of a field by position in the bitmap
pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> {
let f = self.msg.fields.iter().find(|f| -> bool {
if f.name() == "bitmap" {
true
} else {
false
}
}).unwrap();
let cf = f.child_by_pos(pos);
match self.fd_map.get(cf.name()) {
None => {
Err(IsoError { msg: format!("no value for field at position {}", pos) })
}
Some(v) => {
Ok(cf.to_string(v))
}
}
}
/// Returns the value of a top level field like message_type
pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> {
match self.msg.fields.iter().find(|f| -> bool {
if f.name() == name {
true
} else {
false
}
}) {
Some(f) => {
Ok(f.to_string(self.fd_map.get(name).unwrap()))
}
None => {
Err(IsoError { msg: format!("No such field : {}", name) })
}
}
}
/// sets a top-level field like message_type etc
pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&name.to_string()) {
Ok(f) => {
self.fd_map.insert(f.name().clone(), f.to_raw(val));
Ok(())
}
Err(e) => Err(e)
}
}
/// Sets a field in the bitmap with the given value
pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
let cf = f.child_by_pos(pos);
self.fd_map.insert(cf.name().clone(), cf.to_raw(val));
self.bmp.set_on(pos);
Ok(())
}
Err(e) => Err(e)
}
}
/// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap
pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
for pos in positions {
let cf = f.child_by_pos(*pos);
match req_msg.bmp_child_value(*pos) {
Ok(res) => {
debug!("echoing .. {}: {}", pos, res);
self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str()));
self.bmp.set_on(*pos);
}
Err(e) => {
return Err(e);
}
}
}
Ok(())
}
Err(e) => Err(e)
}
}
/// Assembles the messages into a Vec<u8> or a IsoError on failure
pub fn assemble(&self) -> Result<Vec<u8>, IsoError> {
let mut out_buf: Vec<u8> = Vec::new();
for f in &self.msg.fields {
match f.assemble(&mut out_buf, &self) {
Ok(_) => {}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
Ok(out_buf)
}
/// Sets F52 based on provided clear pin, and format, key provided via cfg
pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none() {
return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") });
}
match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) {
Ok(v) => {
self.set_on(52, hex::encode(v).as_str())
}
Err(e) => {
Err(IsoError { msg: e.msg })
}
}
}
/// Sets F64 or F128 based on algo, padding and key provided via cfg
pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() {
return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") });
}
if self.bmp.is_on(1) {
self.set_on(128, "0000000000000000")
} else {
self.set_on(64, "0000000000000000")
}.unwrap();
let data: Vec<u8> = match self.assemble() {
Ok(v) => {
v
}
Err(e) => {
return Err(e);
}
};
debug!("generating mac on: {}", hex::encode(&data));
match generate_mac(&cfg.get_mac_algo().as_ref().unwrap(), &cfg.get_mac_padding().as_ref().unwrap(),
&data[0..data.len() - 8].to_vec(), &hex::decode(cfg.get_mac_key().as_ref().unwrap()).unwrap()) {
Ok(v) => {
let pos: u32;
if self.bmp.is_on(1) {
pos = 128;
} else {
pos = 64;
}
self.set_on(pos, hex::encode(v).as_str()).unwrap_or_default();
Ok(())
}
Err(e) => {
Err(IsoError { msg: e.msg })
}
}
}
}
fn collect_children(f: &dyn Field, ordered_fields: &mut Vec<String>) {
ordered_fields.push(f.name().clone());
f.children().iter().for_each(|f| collect_children(*f, ordered_fields));
}
impl Display for IsoMsg {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
let mut res = "".to_string();
let mut ordered_fields = vec![];
self.msg.fields.iter().for_each(|f| collect_children(f.as_ref(), &mut ordered_fields));
res = res + format!("\n{:20.40} : {:5} : {} ", "-Field-", "-Position-", "-Field Value-").as_str();
for f in ordered_fields {
if self.fd_map.contains_key(f.as_str()) {
let field = self.msg.field_by_name(&f).unwrap();
let field_value = &self.fd_map.get(f.as_str()).unwrap();
let mut pos_str: String = String::new();
if field.position() > 0 {
pos_str = format!("{:03}", field.position());
}
//debug!("** formatting {}",field.name());
res = res + format!("\n{:20.40} : {:^10} : {} ", f, pos_str.as_str(), field.to_string(field_value)).as_str();
}
}
f.write_str(&res).unwrap();
Ok(())
}
}
/// Returns a spec given its name
pub fn spec(_name: &str) -> &'static Spec {
//TODO:: handle case of multiple specs, for now just return the first
ALL_SPECS.iter().find_map(|(_k, v)| Some(v)).unwrap()
}
/// Returns a empty IsoMsg that can be used to create a message
pub fn new_msg(spec: &'static Spec, seg: &'static MessageSegment) -> IsoMsg | {
IsoMsg {
spec,
msg: seg,
fd_map: HashMap::new(),
bmp: Bitmap::new(0, 0, 0),
}
} | identifier_body | |
iso_spec.rs | }
/// This struct represents a segment in the Spec (a auth request, a response etc)
pub struct MessageSegment {
pub(in crate::iso8583) name: String,
#[allow(dead_code)]
pub(in crate::iso8583) id: u32,
pub(in crate::iso8583) selector: Vec<String>,
pub(in crate::iso8583) fields: Vec<Box<dyn Field>>,
}
impl From<&YMessageSegment> for MessageSegment {
fn from(yms: &YMessageSegment) -> Self {
let mut fields: Vec<Box<dyn Field>> = Vec::<Box<dyn Field>>::new();
yms.fields.iter().for_each(|f| {
fields.push(Into::<Box<dyn Field>>::into(f));
});
MessageSegment {
name: yms.name.clone(),
id: yms.id,
selector: yms.selector.iter().map(|s| s.clone()).collect(),
fields,
}
}
}
/// Operations on MessageSegment
impl MessageSegment {
/// Returns name of segment
pub fn name(&self) -> &str {
return self.name.as_str();
}
/// Returns a field given it's name if defined in the spec or a IsoError if the field is not found
pub fn field_by_name(&self, name: &String) -> Result<&dyn Field, IsoError> {
match self.fields.iter().find(|field| -> bool{
if field.name() == name {
true
} else {
false
}
}) {
None => {
//try bitmap
let bmp = self.field_by_name(&"bitmap".to_string()).unwrap();
Ok(bmp.child_by_name(name))
}
Some(f) => {
Ok(f.as_ref())
}
}
}
}
impl Spec {
pub fn name(&self) -> &str {
&self.name
}
/// Returns a message segment given its name or a IsoError if such a segment is not present
pub fn get_message(&self, name: &str) -> Result<&MessageSegment, IsoError> {
for msg in &self.messages {
if msg.name() == name {
return Ok(msg);
}
}
return Err(IsoError { msg: format!("{} message not found", name) });
}
/// Returns a message that corresponds to the given header value or an IsoError if such a selector
/// doesn't exist
pub fn get_message_from_header(&self, header_val: &str) -> Result<&MessageSegment, IsoError> {
for msg in &self.messages {
if msg.selector.contains(&header_val.to_string()) {
return Ok(msg);
}
}
return Err(IsoError { msg: format!("message not found for header - {}", header_val) });
}
/// Returns a segment by first parsing the header field and then matching the header value against
/// the selector
pub fn get_msg_segment(&'static self, data: &Vec<u8>) -> Result<&MessageSegment, IsoError> {
let mut selector = String::new();
let mut f2d_map = HashMap::new();
let mut in_buf = Cursor::new(data);
for f in &self.header_fields {
match f.parse(&mut in_buf, &mut f2d_map) {
Ok(_) => {
selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars());
}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
debug!("computed header value for incoming message = {}", selector);
match self.get_message_from_header(selector.as_str()) {
Ok(msg) => {
Ok(msg)
}
Err(e) => Err(e)
}
}
}
/// This struct represents a parsed message for a given spec
pub struct IsoMsg {
// The spec associated with this IsoMsg
pub spec: &'static Spec,
/// The segment that the IsoMsg represents
pub msg: &'static MessageSegment,
/// field data map - name to raw value
pub fd_map: std::collections::HashMap<String, Vec<u8>>,
/// the bitmap on the iso message
pub bmp: bitmap::Bitmap,
}
/// Operations on IsoMsg
impl IsoMsg {
pub fn spec(&self) -> &'static Spec {
self.spec
}
/// Returns the value of a field by position in the bitmap
pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> {
let f = self.msg.fields.iter().find(|f| -> bool {
if f.name() == "bitmap" {
true
} else {
false
}
}).unwrap();
let cf = f.child_by_pos(pos);
match self.fd_map.get(cf.name()) {
None => {
Err(IsoError { msg: format!("no value for field at position {}", pos) })
}
Some(v) => {
Ok(cf.to_string(v))
}
}
}
/// Returns the value of a top level field like message_type
pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> {
match self.msg.fields.iter().find(|f| -> bool {
if f.name() == name {
true
} else {
false
}
}) {
Some(f) => {
Ok(f.to_string(self.fd_map.get(name).unwrap()))
}
None => {
Err(IsoError { msg: format!("No such field : {}", name) })
}
}
}
/// sets a top-level field like message_type etc
pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&name.to_string()) {
Ok(f) => {
self.fd_map.insert(f.name().clone(), f.to_raw(val));
Ok(())
}
Err(e) => Err(e)
}
}
/// Sets a field in the bitmap with the given value
pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
let cf = f.child_by_pos(pos);
self.fd_map.insert(cf.name().clone(), cf.to_raw(val));
self.bmp.set_on(pos);
Ok(())
}
Err(e) => Err(e)
}
}
/// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap
pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
for pos in positions {
let cf = f.child_by_pos(*pos);
match req_msg.bmp_child_value(*pos) {
Ok(res) => {
debug!("echoing .. {}: {}", pos, res);
self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str()));
self.bmp.set_on(*pos);
}
Err(e) => {
return Err(e);
}
}
}
Ok(())
}
Err(e) => Err(e)
}
}
/// Assembles the messages into a Vec<u8> or a IsoError on failure
pub fn assemble(&self) -> Result<Vec<u8>, IsoError> {
let mut out_buf: Vec<u8> = Vec::new();
for f in &self.msg.fields {
match f.assemble(&mut out_buf, &self) {
Ok(_) => {}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
Ok(out_buf)
}
/// Sets F52 based on provided clear pin, and format, key provided via cfg
pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none() {
return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") });
}
match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) {
Ok(v) => {
self.set_on(52, hex::encode(v).as_str())
}
Err(e) => {
Err(IsoError { msg: e.msg })
} | }
/// Sets F64 or F128 based on algo, padding and key provided via cfg
pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() {
return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") });
}
if self.bmp.is_on(1) {
self.set_on(128, "0000000000000000")
} else {
self.set_on(64, "0000000000000000")
| } | random_line_split |
iso_spec.rs | }
/// This struct represents a segment in the Spec (a auth request, a response etc)
pub struct MessageSegment {
pub(in crate::iso8583) name: String,
#[allow(dead_code)]
pub(in crate::iso8583) id: u32,
pub(in crate::iso8583) selector: Vec<String>,
pub(in crate::iso8583) fields: Vec<Box<dyn Field>>,
}
impl From<&YMessageSegment> for MessageSegment {
fn from(yms: &YMessageSegment) -> Self {
let mut fields: Vec<Box<dyn Field>> = Vec::<Box<dyn Field>>::new();
yms.fields.iter().for_each(|f| {
fields.push(Into::<Box<dyn Field>>::into(f));
});
MessageSegment {
name: yms.name.clone(),
id: yms.id,
selector: yms.selector.iter().map(|s| s.clone()).collect(),
fields,
}
}
}
/// Operations on MessageSegment
impl MessageSegment {
/// Returns name of segment
pub fn name(&self) -> &str {
return self.name.as_str();
}
/// Returns a field given it's name if defined in the spec or a IsoError if the field is not found
pub fn field_by_name(&self, name: &String) -> Result<&dyn Field, IsoError> {
match self.fields.iter().find(|field| -> bool{
if field.name() == name {
true
} else {
false
}
}) {
None => {
//try bitmap
let bmp = self.field_by_name(&"bitmap".to_string()).unwrap();
Ok(bmp.child_by_name(name))
}
Some(f) => {
Ok(f.as_ref())
}
}
}
}
impl Spec {
pub fn name(&self) -> &str {
&self.name
}
/// Returns a message segment given its name or a IsoError if such a segment is not present
pub fn get_message(&self, name: &str) -> Result<&MessageSegment, IsoError> {
for msg in &self.messages {
if msg.name() == name {
return Ok(msg);
}
}
return Err(IsoError { msg: format!("{} message not found", name) });
}
/// Returns a message that corresponds to the given header value or an IsoError if such a selector
/// doesn't exist
pub fn get_message_from_header(&self, header_val: &str) -> Result<&MessageSegment, IsoError> {
for msg in &self.messages {
if msg.selector.contains(&header_val.to_string()) {
return Ok(msg);
}
}
return Err(IsoError { msg: format!("message not found for header - {}", header_val) });
}
/// Returns a segment by first parsing the header field and then matching the header value against
/// the selector
pub fn get_msg_segment(&'static self, data: &Vec<u8>) -> Result<&MessageSegment, IsoError> {
let mut selector = String::new();
let mut f2d_map = HashMap::new();
let mut in_buf = Cursor::new(data);
for f in &self.header_fields {
match f.parse(&mut in_buf, &mut f2d_map) {
Ok(_) => {
selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars());
}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
debug!("computed header value for incoming message = {}", selector);
match self.get_message_from_header(selector.as_str()) {
Ok(msg) => {
Ok(msg)
}
Err(e) => Err(e)
}
}
}
/// This struct represents a parsed message for a given spec
pub struct IsoMsg {
// The spec associated with this IsoMsg
pub spec: &'static Spec,
/// The segment that the IsoMsg represents
pub msg: &'static MessageSegment,
/// field data map - name to raw value
pub fd_map: std::collections::HashMap<String, Vec<u8>>,
/// the bitmap on the iso message
pub bmp: bitmap::Bitmap,
}
/// Operations on IsoMsg
impl IsoMsg {
pub fn spec(&self) -> &'static Spec {
self.spec
}
/// Returns the value of a field by position in the bitmap
pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> {
let f = self.msg.fields.iter().find(|f| -> bool {
if f.name() == "bitmap" {
true
} else {
false
}
}).unwrap();
let cf = f.child_by_pos(pos);
match self.fd_map.get(cf.name()) {
None => {
Err(IsoError { msg: format!("no value for field at position {}", pos) })
}
Some(v) => {
Ok(cf.to_string(v))
}
}
}
/// Returns the value of a top level field like message_type
pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> {
match self.msg.fields.iter().find(|f| -> bool {
if f.name() == name {
true
} else {
false
}
}) {
Some(f) => {
Ok(f.to_string(self.fd_map.get(name).unwrap()))
}
None => {
Err(IsoError { msg: format!("No such field : {}", name) })
}
}
}
/// sets a top-level field like message_type etc
pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&name.to_string()) {
Ok(f) => {
self.fd_map.insert(f.name().clone(), f.to_raw(val));
Ok(())
}
Err(e) => Err(e)
}
}
/// Sets a field in the bitmap with the given value
pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
let cf = f.child_by_pos(pos);
self.fd_map.insert(cf.name().clone(), cf.to_raw(val));
self.bmp.set_on(pos);
Ok(())
}
Err(e) => Err(e)
}
}
/// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap
pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
for pos in positions {
let cf = f.child_by_pos(*pos);
match req_msg.bmp_child_value(*pos) {
Ok(res) => {
debug!("echoing .. {}: {}", pos, res);
self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str()));
self.bmp.set_on(*pos);
}
Err(e) => {
return Err(e);
}
}
}
Ok(())
}
Err(e) => Err(e)
}
}
/// Assembles the messages into a Vec<u8> or a IsoError on failure
pub fn assemble(&self) -> Result<Vec<u8>, IsoError> {
let mut out_buf: Vec<u8> = Vec::new();
for f in &self.msg.fields {
match f.assemble(&mut out_buf, &self) {
Ok(_) => {}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
Ok(out_buf)
}
/// Sets F52 based on provided clear pin, and format, key provided via cfg
pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none() |
match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) {
Ok(v) => {
self.set_on(52, hex::encode(v).as_str())
}
Err(e) => {
Err(IsoError { msg: e.msg })
}
}
}
/// Sets F64 or F128 based on algo, padding and key provided via cfg
pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() {
return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") });
}
if self.bmp.is_on(1) {
self.set_on(128, "0000000000000000")
} else {
self.set_on(64, "0000000000000000 | {
return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") });
} | conditional_block |
iso_spec.rs | _from_header(&self, header_val: &str) -> Result<&MessageSegment, IsoError> {
for msg in &self.messages {
if msg.selector.contains(&header_val.to_string()) {
return Ok(msg);
}
}
return Err(IsoError { msg: format!("message not found for header - {}", header_val) });
}
/// Returns a segment by first parsing the header field and then matching the header value against
/// the selector
pub fn get_msg_segment(&'static self, data: &Vec<u8>) -> Result<&MessageSegment, IsoError> {
let mut selector = String::new();
let mut f2d_map = HashMap::new();
let mut in_buf = Cursor::new(data);
for f in &self.header_fields {
match f.parse(&mut in_buf, &mut f2d_map) {
Ok(_) => {
selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars());
}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
debug!("computed header value for incoming message = {}", selector);
match self.get_message_from_header(selector.as_str()) {
Ok(msg) => {
Ok(msg)
}
Err(e) => Err(e)
}
}
}
/// This struct represents a parsed message for a given spec
pub struct IsoMsg {
// The spec associated with this IsoMsg
pub spec: &'static Spec,
/// The segment that the IsoMsg represents
pub msg: &'static MessageSegment,
/// field data map - name to raw value
pub fd_map: std::collections::HashMap<String, Vec<u8>>,
/// the bitmap on the iso message
pub bmp: bitmap::Bitmap,
}
/// Operations on IsoMsg
impl IsoMsg {
pub fn spec(&self) -> &'static Spec {
self.spec
}
/// Returns the value of a field by position in the bitmap
pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> {
let f = self.msg.fields.iter().find(|f| -> bool {
if f.name() == "bitmap" {
true
} else {
false
}
}).unwrap();
let cf = f.child_by_pos(pos);
match self.fd_map.get(cf.name()) {
None => {
Err(IsoError { msg: format!("no value for field at position {}", pos) })
}
Some(v) => {
Ok(cf.to_string(v))
}
}
}
/// Returns the value of a top level field like message_type
pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> {
match self.msg.fields.iter().find(|f| -> bool {
if f.name() == name {
true
} else {
false
}
}) {
Some(f) => {
Ok(f.to_string(self.fd_map.get(name).unwrap()))
}
None => {
Err(IsoError { msg: format!("No such field : {}", name) })
}
}
}
/// sets a top-level field like message_type etc
pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&name.to_string()) {
Ok(f) => {
self.fd_map.insert(f.name().clone(), f.to_raw(val));
Ok(())
}
Err(e) => Err(e)
}
}
/// Sets a field in the bitmap with the given value
pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
let cf = f.child_by_pos(pos);
self.fd_map.insert(cf.name().clone(), cf.to_raw(val));
self.bmp.set_on(pos);
Ok(())
}
Err(e) => Err(e)
}
}
/// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap
pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> {
match self.msg.field_by_name(&"bitmap".to_string()) {
Ok(f) => {
for pos in positions {
let cf = f.child_by_pos(*pos);
match req_msg.bmp_child_value(*pos) {
Ok(res) => {
debug!("echoing .. {}: {}", pos, res);
self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str()));
self.bmp.set_on(*pos);
}
Err(e) => {
return Err(e);
}
}
}
Ok(())
}
Err(e) => Err(e)
}
}
/// Assembles the messages into a Vec<u8> or a IsoError on failure
pub fn assemble(&self) -> Result<Vec<u8>, IsoError> {
let mut out_buf: Vec<u8> = Vec::new();
for f in &self.msg.fields {
match f.assemble(&mut out_buf, &self) {
Ok(_) => {}
Err(e) => {
return Err(IsoError { msg: e.msg });
}
}
}
Ok(out_buf)
}
/// Sets F52 based on provided clear pin, and format, key provided via cfg
pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none() {
return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") });
}
match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) {
Ok(v) => {
self.set_on(52, hex::encode(v).as_str())
}
Err(e) => {
Err(IsoError { msg: e.msg })
}
}
}
/// Sets F64 or F128 based on algo, padding and key provided via cfg
pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> {
if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() {
return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") });
}
if self.bmp.is_on(1) {
self.set_on(128, "0000000000000000")
} else {
self.set_on(64, "0000000000000000")
}.unwrap();
let data: Vec<u8> = match self.assemble() {
Ok(v) => {
v
}
Err(e) => {
return Err(e);
}
};
debug!("generating mac on: {}", hex::encode(&data));
match generate_mac(&cfg.get_mac_algo().as_ref().unwrap(), &cfg.get_mac_padding().as_ref().unwrap(),
&data[0..data.len() - 8].to_vec(), &hex::decode(cfg.get_mac_key().as_ref().unwrap()).unwrap()) {
Ok(v) => {
let pos: u32;
if self.bmp.is_on(1) {
pos = 128;
} else {
pos = 64;
}
self.set_on(pos, hex::encode(v).as_str()).unwrap_or_default();
Ok(())
}
Err(e) => {
Err(IsoError { msg: e.msg })
}
}
}
}
fn collect_children(f: &dyn Field, ordered_fields: &mut Vec<String>) {
ordered_fields.push(f.name().clone());
f.children().iter().for_each(|f| collect_children(*f, ordered_fields));
}
impl Display for IsoMsg {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
let mut res = "".to_string();
let mut ordered_fields = vec![];
self.msg.fields.iter().for_each(|f| collect_children(f.as_ref(), &mut ordered_fields));
res = res + format!("\n{:20.40} : {:5} : {} ", "-Field-", "-Position-", "-Field Value-").as_str();
for f in ordered_fields {
if self.fd_map.contains_key(f.as_str()) {
let field = self.msg.field_by_name(&f).unwrap();
let field_value = &self.fd_map.get(f.as_str()).unwrap();
let mut pos_str: String = String::new();
if field.position() > 0 {
pos_str = format!("{:03}", field.position());
}
//debug!("** formatting {}",field.name());
res = res + format!("\n{:20.40} : {:^10} : {} ", f, pos_str.as_str(), field.to_string(field_value)).as_str();
}
}
f.write_str(&res).unwrap();
Ok(())
}
}
/// Returns a spec given its name
pub fn | spec | identifier_name | |
spatial.rs | pub fn from_pos(pos: Vec2f) -> Self {
Self {
x: (pos.x / CHUNK_WIDTH).floor() as i32,
y: (pos.y / CHUNK_HEIGHT).floor() as i32,
}
}
pub fn to_world_pos(self) -> Vec2f |
}
pub struct World_Chunks {
chunks: HashMap<Chunk_Coords, World_Chunk>,
to_destroy: Event_Callback_Data,
}
#[derive(Default, Debug)]
pub struct World_Chunk {
pub colliders: Vec<Collider_Handle>,
}
impl World_Chunks {
pub fn new() -> Self {
Self {
chunks: HashMap::new(),
to_destroy: wrap_cb_data(Vec::<Entity>::new()),
}
}
pub fn init(&mut self, engine_state: &mut Engine_State) {
engine_state
.systems
.evt_register
.subscribe::<Evt_Entity_Destroyed>(
Box::new(|entity, to_destroy| {
with_cb_data(to_destroy.unwrap(), |to_destroy: &mut Vec<Entity>| {
to_destroy.push(entity);
});
}),
Some(self.to_destroy.clone()),
);
}
pub fn update(&mut self, ecs_world: &Ecs_World, phys_world: &Physics_World) {
trace!("world_chunks::update");
let mut to_remove = vec![];
with_cb_data(&mut self.to_destroy, |to_destroy: &mut Vec<Entity>| {
for &entity in to_destroy.iter() {
if let Some(collider) = ecs_world.get_component::<C_Collider>(entity) {
for (cld, handle) in
phys_world.get_all_colliders_with_handles(collider.phys_body_handle)
{
to_remove.push((handle, cld.position, cld.shape.extent()));
}
}
}
to_destroy.clear();
});
for (cld, pos, extent) in to_remove {
self.remove_collider(cld, pos, extent);
}
}
pub fn n_chunks(&self) -> usize {
self.chunks.len()
}
pub fn add_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) {
let mut chunks = vec![];
self.get_all_chunks_containing(pos, extent, &mut chunks);
for coords in chunks {
self.add_collider_coords(cld_handle, coords);
}
}
fn add_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) {
let chunk = self
.chunks
.entry(coords)
.or_insert_with(World_Chunk::default);
debug_assert!(
!chunk.colliders.contains(&cld_handle),
"Duplicate collider {:?} in chunk {:?}!",
cld_handle,
coords
);
chunk.colliders.push(cld_handle);
}
pub fn remove_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) {
let mut chunks = vec![];
self.get_all_chunks_containing(pos, extent, &mut chunks);
for coords in chunks {
self.remove_collider_coords(cld_handle, coords);
}
}
fn remove_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) {
let chunk = self.chunks.get_mut(&coords).unwrap_or_else(|| {
fatal!(
"Collider {:?} should be in chunk {:?}, but that chunk does not exist.",
cld_handle,
coords
)
});
let idx = chunk.colliders.iter().position(|&c| c == cld_handle);
if let Some(idx) = idx {
chunk.colliders.remove(idx);
if chunk.colliders.is_empty() {
self.chunks.remove(&coords);
}
} else {
lerr!(
"Collider {:?} not found in expected chunk {:?}.",
cld_handle,
coords
);
}
}
pub fn update_collider(
&mut self,
cld_handle: Collider_Handle,
prev_pos: Vec2f,
new_pos: Vec2f,
extent: Vec2f,
frame_alloc: &mut Temp_Allocator,
) {
trace!("world_chunks::update_collider");
let mut prev_coords = excl_temp_array(frame_alloc);
self.get_all_chunks_containing(prev_pos, extent, &mut prev_coords);
let prev_coords = unsafe { prev_coords.into_read_only() };
let mut new_coords = excl_temp_array(frame_alloc);
self.get_all_chunks_containing(new_pos, extent, &mut new_coords);
let new_coords = unsafe { new_coords.into_read_only() };
let mut all_chunks = excl_temp_array(frame_alloc);
// Pre-allocate enough memory to hold all the chunks; then `chunks_to_add` starts at index 0,
// while `chunks_to_remove` starts at index `new_coords.len()`.
// This works because we can have at most `new_coords.len()` chunks to add and `prev_coords.len()`
// chunks to remove.
unsafe {
all_chunks.alloc_additional_uninit(new_coords.len() + prev_coords.len());
}
let mut n_chunks_to_add = 0;
let mut n_chunks_to_remove = 0;
let chunks_to_add_offset = 0;
let chunks_to_remove_offset = new_coords.len();
// Find chunks to add and to remove in O(n).
// This algorithm assumes that both prev_coords and new_coords are sorted and deduped.
let mut p_idx = 0;
let mut n_idx = 0;
while p_idx < prev_coords.len() && n_idx < new_coords.len() {
let pc = prev_coords[p_idx];
let nc = new_coords[n_idx];
match pc.cmp(&nc) {
Ordering::Less => {
all_chunks[chunks_to_remove_offset + n_chunks_to_remove] = pc;
n_chunks_to_remove += 1;
p_idx += 1;
}
Ordering::Greater => {
all_chunks[chunks_to_add_offset + n_chunks_to_add] = nc;
n_chunks_to_add += 1;
n_idx += 1;
}
Ordering::Equal => {
p_idx += 1;
n_idx += 1;
}
}
}
if p_idx < prev_coords.len() {
let diff = prev_coords.len() - p_idx;
for i in 0..diff {
all_chunks[chunks_to_remove_offset + n_chunks_to_remove + i] =
prev_coords[p_idx + i];
}
n_chunks_to_remove += diff;
} else if n_idx < new_coords.len() {
let diff = new_coords.len() - n_idx;
for i in 0..diff {
all_chunks[chunks_to_add_offset + n_chunks_to_add + i] = new_coords[n_idx + i];
}
n_chunks_to_add += diff;
}
#[cfg(debug_assertions)]
{
let to_remove = all_chunks
.iter()
.cloned()
.skip(chunks_to_remove_offset)
.take(n_chunks_to_remove)
.collect::<HashSet<_>>();
let to_add = all_chunks
.iter()
.cloned()
.skip(chunks_to_add_offset)
.take(n_chunks_to_add)
.collect::<HashSet<_>>();
debug_assert_eq!(to_remove.intersection(&to_add).count(), 0);
}
for coord in all_chunks
.iter()
.skip(chunks_to_add_offset)
.take(n_chunks_to_add)
{
self.add_collider_coords(cld_handle, *coord);
}
for coord in all_chunks
.iter()
.skip(chunks_to_remove_offset)
.take(n_chunks_to_remove)
{
self.remove_collider_coords(cld_handle, *coord);
}
}
fn get_all_chunks_containing<T>(&self, pos: Vec2f, extent: Vec2f, coords: &mut T)
where
T: Extend<Chunk_Coords>,
{
trace!("get_all_chunks_containing");
#[cfg(debug_assertions)]
let mut chk_coords = vec![];
// We need to @Cleanup the -extent*0.5 offset we need to apply and make it consistent throughout the game!
let pos = pos - extent * 0.5;
let coords_topleft = Chunk_Coords::from_pos(pos);
coords.extend(Some(coords_topleft));
#[cfg(debug_assertions)]
chk_coords.push(coords_topleft);
let coords_botright = Chunk_Coords::from_pos(pos + extent);
// Note: we cycle y-major so the result is automatically sorted (as for Chunk_Coords::cmp)
for y in 0..=coords_botright.y - coords_topleft.y {
for x in 0..=coords_botright.x - coords_topleft.x {
if x == 0 && y == 0 {
continue;
}
coords.extend(Some(Chunk_Coords::from_pos(
pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT),
)));
#[cfg(debug_assertions)]
chk_coords.push(Chunk_Coords::from_pos(
pos | {
Vec2f {
x: self.x as f32 * CHUNK_WIDTH,
y: self.y as f32 * CHUNK_HEIGHT,
}
} | identifier_body |
spatial.rs | pub fn from_pos(pos: Vec2f) -> Self {
Self { | pub fn to_world_pos(self) -> Vec2f {
Vec2f {
x: self.x as f32 * CHUNK_WIDTH,
y: self.y as f32 * CHUNK_HEIGHT,
}
}
}
pub struct World_Chunks {
chunks: HashMap<Chunk_Coords, World_Chunk>,
to_destroy: Event_Callback_Data,
}
#[derive(Default, Debug)]
pub struct World_Chunk {
pub colliders: Vec<Collider_Handle>,
}
impl World_Chunks {
pub fn new() -> Self {
Self {
chunks: HashMap::new(),
to_destroy: wrap_cb_data(Vec::<Entity>::new()),
}
}
pub fn init(&mut self, engine_state: &mut Engine_State) {
engine_state
.systems
.evt_register
.subscribe::<Evt_Entity_Destroyed>(
Box::new(|entity, to_destroy| {
with_cb_data(to_destroy.unwrap(), |to_destroy: &mut Vec<Entity>| {
to_destroy.push(entity);
});
}),
Some(self.to_destroy.clone()),
);
}
pub fn update(&mut self, ecs_world: &Ecs_World, phys_world: &Physics_World) {
trace!("world_chunks::update");
let mut to_remove = vec![];
with_cb_data(&mut self.to_destroy, |to_destroy: &mut Vec<Entity>| {
for &entity in to_destroy.iter() {
if let Some(collider) = ecs_world.get_component::<C_Collider>(entity) {
for (cld, handle) in
phys_world.get_all_colliders_with_handles(collider.phys_body_handle)
{
to_remove.push((handle, cld.position, cld.shape.extent()));
}
}
}
to_destroy.clear();
});
for (cld, pos, extent) in to_remove {
self.remove_collider(cld, pos, extent);
}
}
pub fn n_chunks(&self) -> usize {
self.chunks.len()
}
pub fn add_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) {
let mut chunks = vec![];
self.get_all_chunks_containing(pos, extent, &mut chunks);
for coords in chunks {
self.add_collider_coords(cld_handle, coords);
}
}
fn add_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) {
let chunk = self
.chunks
.entry(coords)
.or_insert_with(World_Chunk::default);
debug_assert!(
!chunk.colliders.contains(&cld_handle),
"Duplicate collider {:?} in chunk {:?}!",
cld_handle,
coords
);
chunk.colliders.push(cld_handle);
}
pub fn remove_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) {
let mut chunks = vec![];
self.get_all_chunks_containing(pos, extent, &mut chunks);
for coords in chunks {
self.remove_collider_coords(cld_handle, coords);
}
}
fn remove_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) {
let chunk = self.chunks.get_mut(&coords).unwrap_or_else(|| {
fatal!(
"Collider {:?} should be in chunk {:?}, but that chunk does not exist.",
cld_handle,
coords
)
});
let idx = chunk.colliders.iter().position(|&c| c == cld_handle);
if let Some(idx) = idx {
chunk.colliders.remove(idx);
if chunk.colliders.is_empty() {
self.chunks.remove(&coords);
}
} else {
lerr!(
"Collider {:?} not found in expected chunk {:?}.",
cld_handle,
coords
);
}
}
pub fn update_collider(
&mut self,
cld_handle: Collider_Handle,
prev_pos: Vec2f,
new_pos: Vec2f,
extent: Vec2f,
frame_alloc: &mut Temp_Allocator,
) {
trace!("world_chunks::update_collider");
let mut prev_coords = excl_temp_array(frame_alloc);
self.get_all_chunks_containing(prev_pos, extent, &mut prev_coords);
let prev_coords = unsafe { prev_coords.into_read_only() };
let mut new_coords = excl_temp_array(frame_alloc);
self.get_all_chunks_containing(new_pos, extent, &mut new_coords);
let new_coords = unsafe { new_coords.into_read_only() };
let mut all_chunks = excl_temp_array(frame_alloc);
// Pre-allocate enough memory to hold all the chunks; then `chunks_to_add` starts at index 0,
// while `chunks_to_remove` starts at index `new_coords.len()`.
// This works because we can have at most `new_coords.len()` chunks to add and `prev_coords.len()`
// chunks to remove.
unsafe {
all_chunks.alloc_additional_uninit(new_coords.len() + prev_coords.len());
}
let mut n_chunks_to_add = 0;
let mut n_chunks_to_remove = 0;
let chunks_to_add_offset = 0;
let chunks_to_remove_offset = new_coords.len();
// Find chunks to add and to remove in O(n).
// This algorithm assumes that both prev_coords and new_coords are sorted and deduped.
let mut p_idx = 0;
let mut n_idx = 0;
while p_idx < prev_coords.len() && n_idx < new_coords.len() {
let pc = prev_coords[p_idx];
let nc = new_coords[n_idx];
match pc.cmp(&nc) {
Ordering::Less => {
all_chunks[chunks_to_remove_offset + n_chunks_to_remove] = pc;
n_chunks_to_remove += 1;
p_idx += 1;
}
Ordering::Greater => {
all_chunks[chunks_to_add_offset + n_chunks_to_add] = nc;
n_chunks_to_add += 1;
n_idx += 1;
}
Ordering::Equal => {
p_idx += 1;
n_idx += 1;
}
}
}
if p_idx < prev_coords.len() {
let diff = prev_coords.len() - p_idx;
for i in 0..diff {
all_chunks[chunks_to_remove_offset + n_chunks_to_remove + i] =
prev_coords[p_idx + i];
}
n_chunks_to_remove += diff;
} else if n_idx < new_coords.len() {
let diff = new_coords.len() - n_idx;
for i in 0..diff {
all_chunks[chunks_to_add_offset + n_chunks_to_add + i] = new_coords[n_idx + i];
}
n_chunks_to_add += diff;
}
#[cfg(debug_assertions)]
{
let to_remove = all_chunks
.iter()
.cloned()
.skip(chunks_to_remove_offset)
.take(n_chunks_to_remove)
.collect::<HashSet<_>>();
let to_add = all_chunks
.iter()
.cloned()
.skip(chunks_to_add_offset)
.take(n_chunks_to_add)
.collect::<HashSet<_>>();
debug_assert_eq!(to_remove.intersection(&to_add).count(), 0);
}
for coord in all_chunks
.iter()
.skip(chunks_to_add_offset)
.take(n_chunks_to_add)
{
self.add_collider_coords(cld_handle, *coord);
}
for coord in all_chunks
.iter()
.skip(chunks_to_remove_offset)
.take(n_chunks_to_remove)
{
self.remove_collider_coords(cld_handle, *coord);
}
}
fn get_all_chunks_containing<T>(&self, pos: Vec2f, extent: Vec2f, coords: &mut T)
where
T: Extend<Chunk_Coords>,
{
trace!("get_all_chunks_containing");
#[cfg(debug_assertions)]
let mut chk_coords = vec![];
// We need to @Cleanup the -extent*0.5 offset we need to apply and make it consistent throughout the game!
let pos = pos - extent * 0.5;
let coords_topleft = Chunk_Coords::from_pos(pos);
coords.extend(Some(coords_topleft));
#[cfg(debug_assertions)]
chk_coords.push(coords_topleft);
let coords_botright = Chunk_Coords::from_pos(pos + extent);
// Note: we cycle y-major so the result is automatically sorted (as for Chunk_Coords::cmp)
for y in 0..=coords_botright.y - coords_topleft.y {
for x in 0..=coords_botright.x - coords_topleft.x {
if x == 0 && y == 0 {
continue;
}
coords.extend(Some(Chunk_Coords::from_pos(
pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT),
)));
#[cfg(debug_assertions)]
chk_coords.push(Chunk_Coords::from_pos(
pos + v | x: (pos.x / CHUNK_WIDTH).floor() as i32,
y: (pos.y / CHUNK_HEIGHT).floor() as i32,
}
}
| random_line_split |
spatial.rs | pub fn from_pos(pos: Vec2f) -> Self {
Self {
x: (pos.x / CHUNK_WIDTH).floor() as i32,
y: (pos.y / CHUNK_HEIGHT).floor() as i32,
}
}
pub fn to_world_pos(self) -> Vec2f {
Vec2f {
x: self.x as f32 * CHUNK_WIDTH,
y: self.y as f32 * CHUNK_HEIGHT,
}
}
}
pub struct World_Chunks {
chunks: HashMap<Chunk_Coords, World_Chunk>,
to_destroy: Event_Callback_Data,
}
#[derive(Default, Debug)]
pub struct World_Chunk {
pub colliders: Vec<Collider_Handle>,
}
impl World_Chunks {
pub fn new() -> Self {
Self {
chunks: HashMap::new(),
to_destroy: wrap_cb_data(Vec::<Entity>::new()),
}
}
pub fn init(&mut self, engine_state: &mut Engine_State) {
engine_state
.systems
.evt_register
.subscribe::<Evt_Entity_Destroyed>(
Box::new(|entity, to_destroy| {
with_cb_data(to_destroy.unwrap(), |to_destroy: &mut Vec<Entity>| {
to_destroy.push(entity);
});
}),
Some(self.to_destroy.clone()),
);
}
pub fn update(&mut self, ecs_world: &Ecs_World, phys_world: &Physics_World) {
trace!("world_chunks::update");
let mut to_remove = vec![];
with_cb_data(&mut self.to_destroy, |to_destroy: &mut Vec<Entity>| {
for &entity in to_destroy.iter() {
if let Some(collider) = ecs_world.get_component::<C_Collider>(entity) {
for (cld, handle) in
phys_world.get_all_colliders_with_handles(collider.phys_body_handle)
{
to_remove.push((handle, cld.position, cld.shape.extent()));
}
}
}
to_destroy.clear();
});
for (cld, pos, extent) in to_remove {
self.remove_collider(cld, pos, extent);
}
}
pub fn | (&self) -> usize {
self.chunks.len()
}
pub fn add_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) {
let mut chunks = vec![];
self.get_all_chunks_containing(pos, extent, &mut chunks);
for coords in chunks {
self.add_collider_coords(cld_handle, coords);
}
}
fn add_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) {
let chunk = self
.chunks
.entry(coords)
.or_insert_with(World_Chunk::default);
debug_assert!(
!chunk.colliders.contains(&cld_handle),
"Duplicate collider {:?} in chunk {:?}!",
cld_handle,
coords
);
chunk.colliders.push(cld_handle);
}
pub fn remove_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) {
let mut chunks = vec![];
self.get_all_chunks_containing(pos, extent, &mut chunks);
for coords in chunks {
self.remove_collider_coords(cld_handle, coords);
}
}
fn remove_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) {
let chunk = self.chunks.get_mut(&coords).unwrap_or_else(|| {
fatal!(
"Collider {:?} should be in chunk {:?}, but that chunk does not exist.",
cld_handle,
coords
)
});
let idx = chunk.colliders.iter().position(|&c| c == cld_handle);
if let Some(idx) = idx {
chunk.colliders.remove(idx);
if chunk.colliders.is_empty() {
self.chunks.remove(&coords);
}
} else {
lerr!(
"Collider {:?} not found in expected chunk {:?}.",
cld_handle,
coords
);
}
}
pub fn update_collider(
&mut self,
cld_handle: Collider_Handle,
prev_pos: Vec2f,
new_pos: Vec2f,
extent: Vec2f,
frame_alloc: &mut Temp_Allocator,
) {
trace!("world_chunks::update_collider");
let mut prev_coords = excl_temp_array(frame_alloc);
self.get_all_chunks_containing(prev_pos, extent, &mut prev_coords);
let prev_coords = unsafe { prev_coords.into_read_only() };
let mut new_coords = excl_temp_array(frame_alloc);
self.get_all_chunks_containing(new_pos, extent, &mut new_coords);
let new_coords = unsafe { new_coords.into_read_only() };
let mut all_chunks = excl_temp_array(frame_alloc);
// Pre-allocate enough memory to hold all the chunks; then `chunks_to_add` starts at index 0,
// while `chunks_to_remove` starts at index `new_coords.len()`.
// This works because we can have at most `new_coords.len()` chunks to add and `prev_coords.len()`
// chunks to remove.
unsafe {
all_chunks.alloc_additional_uninit(new_coords.len() + prev_coords.len());
}
let mut n_chunks_to_add = 0;
let mut n_chunks_to_remove = 0;
let chunks_to_add_offset = 0;
let chunks_to_remove_offset = new_coords.len();
// Find chunks to add and to remove in O(n).
// This algorithm assumes that both prev_coords and new_coords are sorted and deduped.
let mut p_idx = 0;
let mut n_idx = 0;
while p_idx < prev_coords.len() && n_idx < new_coords.len() {
let pc = prev_coords[p_idx];
let nc = new_coords[n_idx];
match pc.cmp(&nc) {
Ordering::Less => {
all_chunks[chunks_to_remove_offset + n_chunks_to_remove] = pc;
n_chunks_to_remove += 1;
p_idx += 1;
}
Ordering::Greater => {
all_chunks[chunks_to_add_offset + n_chunks_to_add] = nc;
n_chunks_to_add += 1;
n_idx += 1;
}
Ordering::Equal => {
p_idx += 1;
n_idx += 1;
}
}
}
if p_idx < prev_coords.len() {
let diff = prev_coords.len() - p_idx;
for i in 0..diff {
all_chunks[chunks_to_remove_offset + n_chunks_to_remove + i] =
prev_coords[p_idx + i];
}
n_chunks_to_remove += diff;
} else if n_idx < new_coords.len() {
let diff = new_coords.len() - n_idx;
for i in 0..diff {
all_chunks[chunks_to_add_offset + n_chunks_to_add + i] = new_coords[n_idx + i];
}
n_chunks_to_add += diff;
}
#[cfg(debug_assertions)]
{
let to_remove = all_chunks
.iter()
.cloned()
.skip(chunks_to_remove_offset)
.take(n_chunks_to_remove)
.collect::<HashSet<_>>();
let to_add = all_chunks
.iter()
.cloned()
.skip(chunks_to_add_offset)
.take(n_chunks_to_add)
.collect::<HashSet<_>>();
debug_assert_eq!(to_remove.intersection(&to_add).count(), 0);
}
for coord in all_chunks
.iter()
.skip(chunks_to_add_offset)
.take(n_chunks_to_add)
{
self.add_collider_coords(cld_handle, *coord);
}
for coord in all_chunks
.iter()
.skip(chunks_to_remove_offset)
.take(n_chunks_to_remove)
{
self.remove_collider_coords(cld_handle, *coord);
}
}
fn get_all_chunks_containing<T>(&self, pos: Vec2f, extent: Vec2f, coords: &mut T)
where
T: Extend<Chunk_Coords>,
{
trace!("get_all_chunks_containing");
#[cfg(debug_assertions)]
let mut chk_coords = vec![];
// We need to @Cleanup the -extent*0.5 offset we need to apply and make it consistent throughout the game!
let pos = pos - extent * 0.5;
let coords_topleft = Chunk_Coords::from_pos(pos);
coords.extend(Some(coords_topleft));
#[cfg(debug_assertions)]
chk_coords.push(coords_topleft);
let coords_botright = Chunk_Coords::from_pos(pos + extent);
// Note: we cycle y-major so the result is automatically sorted (as for Chunk_Coords::cmp)
for y in 0..=coords_botright.y - coords_topleft.y {
for x in 0..=coords_botright.x - coords_topleft.x {
if x == 0 && y == 0 {
continue;
}
coords.extend(Some(Chunk_Coords::from_pos(
pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT),
)));
#[cfg(debug_assertions)]
chk_coords.push(Chunk_Coords::from_pos(
pos + | n_chunks | identifier_name |
utils.py | : bool = False):
logger = logging.getLogger()
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger.setLevel(log_level)
if not any(type(i) == logging.StreamHandler for i in logger.handlers):
sh = logging.StreamHandler()
sh.setLevel(log_level)
logger.addHandler(sh)
file_handlers = [i for i in logger.handlers if type(i) == logging.FileHandler]
for h in file_handlers:
logger.removeHandler(h)
os.makedirs(log_dir, exist_ok=True)
logger.info('Logging to {}'.format(log_dir))
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
fh.setLevel(log_level)
fh.setFormatter(
logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M'))
logger.addHandler(fh)
# Only works if you have an SMTP server like postfix running on your box
if aws_config['email_to_send_alerts_to'] and not any(
type(i) == logging.handlers.SMTPHandler for i in logger.handlers):
eh = logging.handlers.SMTPHandler('localhost',
'logger@{}'.format(socket.getfqdn()),
aws_config['email_to_send_alerts_to'],
'Log Message from {} about {}'.format(socket.getfqdn(), log_dir))
eh.setLevel(logging.ERROR)
eh.setFormatter(
logging.Formatter('Hey there, heads up:\n\n%(name)-12s: %(levelname)-8s %(message)s'))
logger.addHandler(eh)
return logger
def tuple_of_tuples_to_padded_array(tup_of_tups: Tuple[Tuple[int, ...], ...], ctx, pad_amount=None):
'''
Converts a tuple of tuples into a PaddedArray (i.e. glorified pair of nd.Arrays for working with SequenceMask)
Pads to the length of the longest tuple in the outer tuple, unless pad_amount is specified.
'''
value_lengths = nd.array([len(i) for i in tup_of_tups], dtype='float32',
ctx=ctx) # float type to play nice with SequenceMask later
if pad_amount is not None and value_lengths.max().asscalar() < pad_amount:
tup_of_tups = list(tup_of_tups)
tup_of_tups[0] = tup_of_tups[0] + (0,) * (pad_amount - len(tup_of_tups[0]))
values = list(itertools.zip_longest(*tup_of_tups, fillvalue=0))
values = nd.array(values, dtype='int32', ctx=ctx).T[:, :pad_amount]
return PaddedArray(values, value_lengths)
def evaluate_loss(data_loader: AsyncDataLoader, model, loss_fxn):
with data_loader as data_loader:
total_loss = nd.zeros((1,), ctx=data_loader.ctx[0])
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
losses = [loss_fxn(model(batch.data), batch.label, model.data_encoder) for batch in split_batch]
loss_sums = nd.concat(*[loss.sum().as_in_context(data_loader.ctx[0]) for loss in losses], dim=0)
total_loss += nd.sum(loss_sums)
total_loss.wait_to_read()
return total_loss.asscalar() / len(data_loader)
def evaluate_FITB_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in indicating the correct variable
'''
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
correct += int(nd.dot(prediction, label).asscalar())
return correct / len(data_loader)
def evaluate_full_name_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting the full true name, in batches
'''
logged_example = False
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
if prediction == label:
correct += 1
return correct / len(data_loader)
def evaluate_subtokenwise_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting each subtoken in the true names (with penalty for extra subtokens)
'''
logged_example = False
with data_loader as data_loader:
correct = 0
total = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
for i in range(min(len(prediction), len(label))):
if prediction[i] == label[i]:
correct += 1
total += max(len(prediction), len(label))
return correct / total
def evaluate_edit_distance(data_loader: AsyncDataLoader, model):
| true = []
for i in tqdm(range(0, math.ceil(len(dataset) / n_batch))):
data = dataset[n_batch * i:n_batch * (i + 1)]
graph, label = model.batchify(data, ctx)
output = model(graph)
predictions = nd.argmax(output, axis=2)
# Masking output to max(length_of_output, length_of_label)
output_preds = predictions.asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(model.max_name_length)
output_lengths = nd.array(output_lengths, ctx=ctx)
mask_lengths = nd.maximum(output_lengths, label.value_lengths)
output = nd.SequenceMask(predictions, value=-1, use_sequence_length=True, sequence_length=mask_lengths,
axis=1).asnumpy().astype('int32')
labels = nd.SequenceMask(label.values, value=-1, use_sequence_length=True,
sequence_length=mask_lengths.astype('int32'), axis=1).asnumpy()
pred += [i for i in output.flatten().tolist() if i >= 0]
true += [i for i in labels.flatten().tolist() if i >= 0]
return metrics.f1_score(true, pred, average='weighted')
class FITBLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
label, _ = args
loss = SigmoidBinaryCrossEntropyLoss()
return loss(output, label)
class VarNamingLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Masks the outputs and returns the SoftMaxCrossEntropy loss
output is a (batch x max_name_length x len(all_node_name_subtokens)) tensor of name predictions for each graph
Note: last dimension of output are pre-softmax values - SoftmaxCrossEntropy does the softmax
'''
(label, _), data_encoder = args
softmax_xent = gluon.loss.SoftmaxCrossEntropyLoss(axis=2)
# Masking output to max(where_RNN_emitted_PAD_token, length_of_label)
output_preds = F.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == data_encoder.all_node_name_subtokens['__PAD__'])[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(output_preds.shape[1])
output_lengths = F.array(output_lengths, ctx=output.context)
mask_lengths = F.maximum(output_lengths, label.value_lengths)
output = F.Sequence | '''
Measures the mean (over instances) of the characterwise edit distance (Levenshtein distance) between predicted and true names
'''
logged_example = False
with data_loader as data_loader:
cum_edit_distance = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
pred_name = ''.join(prediction)
real_name = ''.join(label)
cum_edit_distance += editdistance.eval(pred_name, real_name)
return cum_edit_distance / len(data_loader)
pred = [] | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.