input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'a'],
['figurare', 'verb', 'a'],
['figurina', 'noun', 'c'],
['fila', 'noun', 'a'],
['filante', 'pres_part', 'c'],
['filante', 'adjective', 'c'],
['filante', 'noun', 'c'],
['filare', 'verb', 'b'],
['filastrocca', 'noun', 'c'],
['file', 'noun', 'a'],
['filetto', 'noun', 'c'],
['film', 'noun', 'a'],
['filmato', 'past_part', 'b'],
['filmato', 'adjective', 'b'],
['filmato', 'noun', 'b'],
['filo', 'noun', 'a'],
['filosofia', 'noun', 'a'],
['filosofico', 'adjective', 'b'],
['filosofo', 'noun', 'b'],
['filtrare', 'verb', 'b'],
['filtro', 'noun', 'b'],
['finale', 'adjective', 'a'],
['finale', 'noun', 'a'],
['finalità', 'noun', 'b'],
['finalmente', 'adverb', 'a'],
['finanza', 'noun', 'b'],
['finanziamento', 'noun', 'b'],
['finanziare', 'verb', 'b'],
['finanziario', 'adjective', 'a'],
['finanziatore', 'adjective', 'c'],
['finanziatore', 'noun', 'c'],
['finché', 'conjunction', 'a'],
['fine', 'noun', 'a'],
['fine', 'adjective', 'b'],
['finestra', 'noun', 'a'],
['finestrino', 'noun', 'b'],
['fingere', 'verb', 'a'],
['finimondo', 'noun', 'c'],
['finire', 'verb', 'a'],
['finire', 'noun', 'a'],
['finito', 'past_part', 'b'],
['finito', 'adjective', 'b'],
['finlandese', 'adjective', 'c'],
['finlandese', 'noun', 'c'],
['fino', 'preposition', 'a'],
['fino', 'adverb', 'a'],
['finocchio', 'noun', 'c'],
['finora', 'adverb', 'b'],
['finta', 'noun', 'b'],
['finto', 'past_part', 'a'],
['finto', 'adjective', 'a'],
['fiocco', 'noun', 'c'],
['fionda', 'noun', 'c'],
['fioraio', 'noun', 'c'],
['fiore', 'noun', 'a'],
['fiorentino', 'adjective', 'b'],
['fiorentino', 'noun', 'b'],
['fiorito', 'past_part', 'c'],
['fiorito', 'adjective', 'c'],
['firma', 'noun', 'a'],
['firmare', 'verb', 'a'],
['fiscale', 'adjective', 'b'],
['fiscale', 'noun', 'b'],
['fisicamente', 'adverb', 'b'],
['fisico', 'adjective', 'a'],
['fisico', 'noun', 'a'],
['fissare', 'verb', 'a'],
['fisso', 'adjective', 'a'],
['fisso', 'adverb', 'a'],
['fisso', 'noun', 'a'],
['fitto', 'past_part', 'b'],
['fitto', 'adjective', 'b'],
['fitto', 'adverb', 'b'],
['fitto', 'noun', 'b'],
['fiume', 'noun', 'a'],
['fiuto', 'noun', 'c'],
['flash', 'noun', 'b'],
['flauto', 'noun', 'c'],
['flessibile', 'adjective', 'b'],
['flessibile', 'noun', 'b'],
['flora', 'noun', 'c'],
['fluido', 'adjective', 'b'],
['fluido', 'noun', 'b'],
['fluoro', 'noun', 'c'],
['flusso', 'noun', 'b'],
['foca', 'noun', 'c'],
['focaccia', 'noun', 'c'],
['fodera', 'noun', 'c'],
['foderare', 'verb', 'c'],
['foglia', 'noun', 'b'],
['foglio', 'noun', 'a'],
['fogna', 'noun', 'c'],
['folla', 'noun', 'b'],
['folle', 'adjective', 'b'],
['folle', 'noun', 'b'],
['follia', 'noun', 'b'],
['fondamentale', 'adjective', 'a'],
['fondamentale', 'noun', 'a'],
['fondamentalmente', 'adverb', 'b'],
['fondamento', 'noun', 'b'],
['fondare', 'verb', 'a'],
['fondatore', 'noun', 'b'],
['fondazione', 'noun', 'b'],
['fondere', 'verb', 'b'],
['fondo', 'adjective', 'loc-comando'],
['fondo', 'noun', 'loc-comando'],
['fondo', 'adverb', 'loc-comando'],
['fontana', 'noun', 'b'],
['fontanella', 'noun', 'c'],
['fonte', 'noun', 'a'],
['forare', 'verb', 'b'],
['forbice', 'noun', 'c'],
['forchetta', 'noun', 'c'],
['forcina', 'noun', 'c'],
['foresta', 'noun', 'b'],
['forestale', 'adjective', 'c'],
['forestale', 'noun', 'c'],
['forfora', 'noun', 'c'],
['forma', 'noun', 'a'],
['formaggino', 'noun', 'c'],
['formaggio', 'noun', 'b'],
['formale', 'adjective', 'b'],
['formare', 'verb', 'a'],
['formato', 'past_part', 'b'],
['formato', 'adjective', 'b'],
['formato', 'noun', 'b'],
['formazione', 'noun', 'a'],
['formula', 'noun', 'a'],
['formulare', 'verb', 'b'],
['fornace', 'noun', 'c'],
['fornaio', 'noun', 'c'],
['fornello', 'noun', 'b'],
['fornire', 'verb', 'a'],
['fornitore', 'adjective', 'b'],
['fornitore', 'noun', 'b'],
['forno', 'noun', 'b'],
['foro', 'noun', 'b'],
['forse', 'adverb', 'a'],
['forse', 'noun', 'a'],
['forte', 'adjective', 'a'],
['forte', 'adverb', 'a'],
['forte', 'noun', 'a'],
['fortemente', 'adverb', 'b'],
['fortuna', 'noun', 'a'],
['fortunatamente', 'adverb', 'b'],
['fortunato', 'adjective', 'b'],
['forum', 'noun', 'b'],
['forza', 'noun', 'a'],
['forzare', 'verb', 'b'],
['fosforescente', 'adjective', 'c'],
['fossa', 'noun', 'b'],
['fossetta', 'noun', 'c'],
['fosso', 'noun', 'c'],
['foto', 'noun', 'a'],
['fotografare', 'verb', 'b'],
['fotografia', 'noun', 'a'],
['fotografico', 'adjective', 'b'],
['fotografo', 'noun', 'b'],
['fottere', 'verb', 'b'],
['foulard', 'noun', 'c'],
['fra', 'preposition', 'a'],
['fracasso', 'noun', 'c'],
['fragile', 'adjective', 'b'],
['frammento', 'noun', 'b'],
['francamente', 'adverb', 'b'],
['francese', 'adjective', 'a'],
['francese', 'noun', 'a'],
['francobollo', 'noun', 'c'],
['frangia', 'noun', 'c'],
['frase', 'noun', 'a'],
['fratello', 'noun', 'a'],
['frazione', 'noun', 'b'],
['freccia', 'noun', 'b'],
['freddezza', 'noun', 'c'],
['freddo', 'adjective', 'a'],
['freddo', 'noun', 'a'],
['fregare', 'verb', 'a'],
['frenare', 'verb', 'b'],
['frenetico', 'adjective', 'b'],
['freno', 'noun', 'b'],
['frequentare', 'verb', 'a'],
['frequente', 'adjective', 'b'],
['frequenza', 'noun', 'b'],
['fresco', 'adjective', 'a'],
['fresco', 'noun', 'a'],
['fretta', 'noun', 'a'],
['frigo', 'noun', 'b'],
['frigorifero', 'adjective', 'b'],
['frigorifero', 'noun', 'b'],
['fringuello', 'noun', 'c'],
['frittata', 'noun', 'c'],
['fritto', 'past_part', 'c'],
['fritto', 'adjective', 'c'],
['fritto', 'noun', 'c'],
['friulano', 'adjective', 'c'],
['friulano', 'noun', 'c'],
['fronte', 'noun', 'a'],
['frontiera', 'noun', 'b'],
['frugare', 'verb', 'b'],
['frumento', 'noun', 'c'],
['fruscio', 'noun', 'c'],
['frusta', 'noun', 'c'],
['frutta', 'noun', 'b'],
['fruttivendolo', 'noun', 'c'],
['frutto', 'noun', 'a'],
['fucile', 'noun', 'b'],
['fuga', 'noun', 'a'],
['fuggire', 'verb', 'a'],
['fulmine', 'noun', 'b'],
['fumare', 'verb', 'a'],
['fumetto', 'noun', 'b'],
['fumo', 'noun', 'a'],
['fumo', 'adjective', 'a'],
['fune', 'noun', 'c'],
['funerale', 'noun', 'b'],
['funerale', 'adjective', 'b'],
['fungo', 'noun', 'b'],
['funzionale', 'adjective', 'b'],
['funzionale', 'noun', 'b'],
['funzionamento', 'noun', 'b'],
['funzionare', 'verb', 'a'],
['funzionario', 'noun', 'b'],
['funzione', 'noun', 'a'],
['fuoco', 'noun', 'loc-comando'],
['fuori', 'adverb', 'a'],
['fuori', 'preposition', 'a'],
['fuori', 'noun', 'a'],
['fuori', 'adjective', 'a'],
['furbo', 'adjective', 'b'],
['furbo', 'noun', 'b'],
['furfante', 'noun', 'c'],
['furgone', 'noun', 'b'],
['furia', 'noun', 'b'],
['furioso', 'adjective', 'b'],
['furto', 'noun', 'b'],
['fusione', 'noun', 'b'],
['fuso', 'past_part', 'b'],
['fuso', 'adjective', 'b'],
['fuso', 'noun', 'b'],
['futuro', 'adjective', 'a'],
['futuro', 'noun', 'a'],
['gabbia', 'noun', 'b'],
['galassia', 'noun', 'b'],
['galeotto', 'noun', 'c'],
['galera', 'noun', 'b'],
['galleggiare', 'verb', 'c'],
['galleria', 'noun', 'b'],
['gallese', 'adjective', 'c'],
['gallese', 'noun', 'c'],
['galletta', 'noun', 'c'],
['gallina', 'noun', 'b'],
['gallo', 'noun', 'c'],
['gamba', 'noun', 'a'],
['gambero', 'noun', 'c'],
['gambo', 'noun', 'c'],
['ganascia', 'noun', 'c'],
['gancio', 'noun', 'c'],
['gara', 'noun', 'a'],
['garage', 'noun', 'b'],
['garantire', 'verb', 'a'],
['garanzia', 'noun', 'b'],
['garbo', 'noun', 'c'],
['gargarismo', 'noun', 'c'],
['garofano', 'noun', 'c'],
['garza', 'noun', 'c'],
['gas', 'noun', 'a'],
['gasolio', 'noun', 'c'],
['gassosa', 'noun', 'c'],
['gastronomia', 'noun', 'c'],
['gatto', 'noun', 'a'],
['gavetta', 'noun', 'c'],
['gay', 'adjective', 'b'],
['gay', 'noun', 'b'],
['gazza', 'noun', 'c'],
['gelateria', 'noun', 'c'],
['gelatina', 'noun', 'c'],
['gelato', 'past_part', 'b'],
['gelato', 'adjective', 'b'],
['gelato', 'noun', 'b'],
['gelido', 'adjective', 'b'],
['gelo', 'noun', 'c'],
['gelosia', 'noun', 'b'],
['geloso', 'adjective', 'b'],
['gelsomino', 'noun', 'c'],
['gemello', 'adjective', 'b'],
['gemello', 'noun', 'b'],
['gemma', 'noun', 'c'],
['gene', 'noun', 'b'],
['generale', 'adjective', 'a'],
['generale', 'noun', 'a'],
['generalmente', 'adverb', 'b'],
['generare', 'verb', 'a'],
['generazione', 'noun', 'a'],
['genere', 'noun', 'a'],
['generico', 'adjective', 'b'],
['generico', 'noun', 'b'],
['generosità', 'noun', 'c'],
['generoso', 'adjective', 'b'],
['genetico', 'adjective', 'b'],
['gengiva', 'noun', 'c'],
['geniale', 'adjective', 'b'],
['genio', 'noun', 'b'],
['genitore', 'noun', 'a'],
['gennaio', 'noun', 'a'],
['genovese', 'adjective', 'c'],
['genovese', 'noun', 'c'],
['gente', 'noun', 'a'],
['gentile', 'adjective', 'a'],
['gentile', 'noun', 'a'],
['genuino', 'adjective', 'c'],
['geografico', 'adjective', 'b'],
['geografo', 'noun', 'c'],
['geometra', 'noun', 'c'],
['geometria', 'noun', 'c'],
['geometrico', 'adjective', 'c'],
['gesso', 'noun', 'b'],
['gestione', 'noun', 'a'],
['gestire', 'verb', 'a'],
['gesto', 'noun', 'a'],
['gestore', 'noun', 'b'],
['gettare', 'verb', 'a'],
['gettone', 'noun', 'c'],
['ghiaccio', 'noun', 'b'],
['ghiacciolo', 'noun', 'c'],
['ghianda', 'noun', 'c'],
['ghiro', 'noun', 'c'],
['gi', 'noun', 'c'],
['già', 'adverb', 'a'],
['giacca', 'noun', 'a'],
['giacere', 'verb', 'b'],
['giaguaro', 'noun', 'c'],
['giallo', 'adjective', 'a'],
['giallo', 'noun', 'a'],
['giapponese', 'adjective', 'a'],
['giapponese', 'noun', 'a'],
['giardinaggio', 'noun', 'c'],
['giardiniera', 'noun', 'c'],
['giardino', 'noun', 'a'],
['gigante', 'noun', 'b'],
['gigante', 'adjective', 'b'],
['gigantesco', 'adjective', 'b'],
['giglio', 'noun', 'b'],
['ginnastica', 'noun', 'b'],
['ginocchio', 'noun', 'a'],
['giocare', 'verb', 'a'],
['giocatore', 'noun', 'a'],
['giocattolo', 'noun', 'b'],
['gioco', 'noun', 'a'],
['gioia', 'noun', 'a'],
['gioiello', 'noun', 'b'],
['gioioso', 'adjective', 'c'],
['giordano', 'adjective', 'c'],
['giordano', 'noun', 'c'],
['giornale', 'noun', 'a'],
['giornale', 'adjective', 'a'],
['giornalino', 'noun', 'c'],
['giornalista', 'noun', 'a'],
['giornata', 'noun', 'a'],
['giorno', 'noun', 'a'],
['giostra', 'noun', 'c'],
['giovane', 'adjective', 'a'],
['giovane', 'noun', 'a'],
['giovanile', 'adjective', 'b'],
['giovedì', 'noun', 'b'],
['gioventù', 'noun', 'b'],
['giovinezza', 'noun', 'b'],
['giraffa', 'noun', 'c'],
['girare', 'verb', 'a'],
['giravite', 'noun', 'c'],
['giretto', 'noun', 'c'],
['giro', 'noun', 'a'],
['gironzolare', 'verb', 'c'],
['girotondo', 'noun', 'c'],
['gita', 'noun', 'b'],
['giù', 'adverb', 'a'],
['giù', 'adjective', 'a'],
['giubba', 'noun', 'c'],
['giubbotto', 'noun', 'c'],
['giudicare', 'verb', 'a'],
['giudice', 'noun', 'a'],
['giudiziario', 'adjective', 'b'],
['giudizio', 'noun', 'a'],
['giugno', 'noun', 'a'],
['giungere', 'verb', 'a'],
['giungla', 'noun', 'c'],
['giuramento', 'noun', 'b'],
['giurare', 'verb', 'a'],
['giuria', 'noun', 'c'],
['giuridico', 'adjective', 'b'],
['giustamente', 'adverb', 'b'],
['giustificare', 'verb', 'b'],
['giustizia', 'noun', 'a'],
['giusto', 'adjective', 'a'],
['giusto', 'noun', 'a'],
['giusto', 'adverb', 'a'],
['gli', 'pronoun', 'a'],
['glicine', 'noun', 'c'],
['global', 'adjective', 'b'],
['global', 'noun', 'b'],
['globale', 'adjective', 'b'],
['gloria', 'noun', 'b'],
['gnocco', 'noun', 'c'],
['gnomo', 'noun', 'c'],
['goal', 'noun', 'b'],
['gobbo', 'adjective', 'c'],
['gobbo', 'noun', 'c'],
['goccia', 'noun', 'b'],
['godere', 'verb', 'a'],
['gola', 'noun', 'b'],
['goloso', 'adjective', 'c'],
['gomito', 'noun', 'b'],
['gomitolo', 'noun', 'c'],
['gomma', 'noun', 'b'],
['gonfiare', 'verb', 'b'],
['gonfio', 'adjective', 'b'],
['gonfio', 'noun', 'b'],
['gonna', 'noun', 'b'],
['gorgonzola', 'noun', 'c'],
['gorilla', 'noun', 'c'],
['gossip', 'noun', 'b'],
['governare', 'verb', 'b'],
['governatore', 'noun', 'b'],
['governo', 'noun', 'a'],
['gradino', 'noun', 'b'],
['gradire', 'verb', 'b'],
['grado', 'noun', 'a'],
['graffiare', 'verb', 'c'],
['graffio', 'noun', 'c'],
['grafico', 'adjective', 'b'],
['grafico', 'noun', 'b'],
['grammatica', 'noun', 'b'],
['grammo', 'noun', 'b'],
['grana', 'noun', 'c'],
['granaio', 'noun', 'c'],
['granchio', 'noun', 'c'],
['grande', 'adjective', 'a'],
['grande', 'noun', 'a'],
['grandezza', 'noun', 'b'],
['grandine', 'noun', 'c'],
['grandioso', 'adjective', 'b'],
['grano', 'noun', 'b'],
['granturco', 'noun', 'c'],
['grappa', 'noun', 'c'],
['grasso', 'adjective', 'a'],
['grasso', 'noun', 'a'],
['gratis', 'adverb', 'b'],
['gratis', 'adjective', 'b'],
['grattare', 'verb', 'b'],
['grattugiato', 'past_part', 'c'],
['grattugiato', 'adjective', 'c'],
['gratuito', 'adjective', 'b'],
['grave', 'adjective', 'a'],
['grave', 'noun', 'a'],
['grave', 'adverb', 'a'],
['gravidanza', 'noun', 'b'],
['gravità', 'noun', 'b'],
['grazie', 'exclamation', 'a'],
['grazie', 'noun', 'a'],
['grazioso', 'adjective', 'c'],
['greco', 'adjective', 'a'],
['greco', 'noun', 'a'],
['grembiule', 'noun', 'c'],
['gridare', 'verb', 'a'],
['grido', 'noun', 'b'],
['grigio', 'adjective', 'a'],
['grigio', 'noun', 'a'],
['griglia', 'noun', 'c'],
['grinza', 'noun', 'c'],
['grissino', 'noun', 'c'],
['grossista', 'noun', 'c'],
['grosso', 'adjective', 'a'],
['grosso', 'noun', 'a'],
['grotta', 'noun', 'b'],
['gru', 'noun', 'c'],
['gruppo', 'noun', 'a'],
['guadagnare', 'verb', 'a'],
['guadagno', 'noun', 'b'],
['guaio', 'noun', 'b'],
['guaire', 'verb', 'c'],
['guancia', 'noun', 'b'],
['guanciale', 'noun', 'c'],
['guanciale', 'adjective', 'c'],
['guanto', 'noun', 'b'],
['guardare', 'verb', 'a'],
['guardaroba', 'noun', 'c'],
['guardia', 'noun', 'a'],
['guarire', 'verb', 'b'],
['guarnizione', 'noun', 'c'],
['guasto', 'noun', 'c'],
['guerra', 'noun', 'a'],
['guerriero', 'noun', 'b'],
['guerriero', 'adjective', 'b'],
['gufo', 'noun', 'c'],
['guida', 'noun', 'a'],
['guidare', 'verb', 'a'],
['guidatore', 'noun', 'c'],
['guinzaglio', 'noun', 'c'],
['gustare', 'verb', 'b'],
['gusto', 'noun', 'a'],
['gustoso', 'adjective', 'c'],
['hamburger', 'noun', 'c'],
['hobby', 'noun', 'b'],
['home', 'noun', 'b'],
['hotel', 'noun', 'b'],
['hyperlink', 'noun', 'b'],
['i', 'noun', 'c'],
['i', 'determiner', 'b'],
['icona', 'noun', 'b'],
['ics', 'noun', 'c'],
['idea', 'noun', 'a'],
['ideale', 'adjective', 'a'],
['ideale', 'noun', 'a'],
['ideare', 'verb', 'b'],
['identico', 'adjective', 'b'],
['identico', 'noun', 'b'],
['identificare', 'verb', 'a'],
['identificazione', 'noun', 'b'],
['identità', 'noun', 'a'],
['ideologia', 'noun', 'b'],
['ideologico', 'adjective', 'b'],
['idiota', 'adjective', 'a'],
['idiota', 'noun', 'a'],
['idraulico', 'adjective', 'b'],
['idraulico', 'noun', 'b'],
['idrico', 'adjective', 'b'],
['idrogeno', 'noun', 'b'],
['ieri', 'adverb', 'a'],
['ieri', 'noun', 'a'],
['igiene', 'noun', 'c'],
['ignorante', 'pres_part', 'b'],
['ignorante', 'adjective', 'b'],
['ignorante', 'noun', 'b'],
['ignoranza', 'noun', 'b'],
['ignorare', 'verb', 'a'],
['ignoto', 'adjective', 'b'],
['ignoto', 'noun', 'b'],
['il', 'determiner', 'a'],
['il', 'pronoun', 'a'],
['illecito', 'adjective', 'b'],
['illecito', 'noun', 'b'],
['illegale', 'adjective', 'b'],
['illegale', 'noun', 'b'],
['illegittimo', 'adjective', 'c'],
['illegittimo', 'noun', 'c'],
['illudere', 'verb', 'b'],
['illuminare', 'verb', 'b'],
['illuminato', 'past_part', 'b'],
['illuminato', 'adjective', 'b'],
['illuminato', 'noun', 'b'],
['illusione', 'noun', 'b'],
['illustrare', 'verb', 'b'],
['illustre', 'adjective', 'b'],
['imballare', 'verb', 'c'],
['imbarazzante', 'pres_part', 'b'],
['imbarazzante', 'adjective', 'b'],
['imbarazzato', 'past_part', 'b'],
['imbarazzato', 'adjective', 'b'],
['imbarazzo', 'noun', 'b'],
['imbattersi', 'verb', 'b'],
['imbecille', 'adjective', 'b'],
['imbecille', 'noun', 'b'],
['imbiancare', 'verb', 'c'],
['imbianchino', 'noun', 'c'],
['imbottigliare', 'verb', 'c'],
['imbrogliare', 'verb', 'c'],
['imbroglio', 'noun', 'c'],
['imbuto', 'noun', 'c'],
['imitare', 'verb', 'b'],
['immaginare', 'verb', 'a'],
['immaginare', 'noun', 'a'],
['immaginario', 'adjective', 'b'],
['immaginario', 'noun', 'b'],
['immaginazione', 'noun', 'b'],
['immagine', 'noun', 'a'],
['immaturo', 'adjective', 'c'],
['immediatamente', 'adverb', 'a'],
['immediato', 'adjective', 'b'],
['immediato', 'noun', 'b'],
['immenso', 'adjective', 'b'],
['immenso', 'noun', 'b'],
['immergere', 'verb', 'b'],
['immigrato', 'past_part', 'b'],
['immigrato', 'adjective', 'b'],
['immigrato', 'noun', 'b'],
['immobile', 'adjective', 'a'],
['immobile', 'noun', 'a'],
['immobiliare', 'adjective', 'b'],
['immobiliare', 'noun', 'b'],
['immondizia', 'noun', 'c'],
['impallidire', 'verb', 'c'],
['imparare', 'verb', 'a'],
['impastare', 'verb', 'c'],
['impatto', 'noun', 'b'],
['impaziente', 'adjective', 'c'],
['impaziente', 'noun', 'c'],
['impazzire', 'verb', 'b'],
['impedire', 'verb', 'a'],
['impegnare', 'verb', 'a'],
['impegnativo', 'adjective', 'b'],
['impegnato', 'past_part', 'c'],
['impegnato', 'adjective', 'c'],
['impegno', 'noun', 'a'],
['imperare', 'verb', 'b'],
['imperatore', 'noun', 'b'],
['imperiale', 'adjective', 'b'],
['imperiale', 'noun', 'b'],
['impermeabile', 'adjective', 'c'],
['impermeabile', 'noun', 'c'],
['impero', 'noun', 'b'],
['impero', 'adjective', 'b'],
['impianto', 'noun', 'a'],
['impiegare', 'verb', 'a'],
['impiegato', 'past_part', 'b'],
['impiegato', 'adjective', 'b'],
['impiegato', 'noun', 'b'],
['impiego', 'noun', 'b'],
['implicare', 'verb', 'b'],
['imporre', 'verb', 'a'],
['importante', 'pres_part', 'a'],
['importante', 'adjective', 'a'],
['importante', 'noun', 'a'],
['importanza', 'noun', 'a'],
['importare', 'verb', 'a'],
['importo', 'noun', 'b'],
['impossibile', 'adjective', 'a'],
['impossibile', 'noun', 'a'],
['impostare', 'verb', 'b'],
['impostazione', 'noun', 'b'],
['impreciso', 'adjective', 'c'],
['imprenditore', 'noun', 'b'],
['impresa', 'noun', 'a'],
['impressionante', 'pres_part', 'b'],
['impressionante', 'adjective', 'b'],
['impressionare', 'verb', 'b'],
['impressione', 'noun', 'a'],
['imprevisto', 'adjective', 'b'],
['imprevisto', 'noun', 'b'],
['imprigionare', 'verb', 'c'],
['improbabile', 'adjective', 'b'],
['impronta', 'noun', 'b'],
['improvvisamente', 'adverb', | |
"""Module containing the PyMDbScraper class."""
import json
import re
import requests
import time
from collections import defaultdict
from selectolax.parser import HTMLParser
from pymdb.exceptions import InvalidCompanyId
from pymdb.models import (
CompanyScrape,
CompanyCreditScrape,
CreditScrape,
NameCreditScrape,
NameScrape,
SearchResultName,
SearchResultTitle,
TitleScrape,
TitleTechSpecsScrape,
)
from pymdb.models.name import (
ACTOR,
_CREDIT_MAPPINGS,
)
from pymdb.utils import (
get_category,
get_company_id,
get_denomination,
get_episode_info,
get_name_id,
get_ref_marker,
get_title_id,
is_money_string,
remove_tags,
remove_tags_and_content,
split_by_br,
trim_name,
trim_year,
trim_money_string,
)
class PyMDbScraper:
"""Scrapes various information from IMDb web pages.
Contains functions for various IMDb pages and scrapes information into Python classes.
Rate limit is defaulted to 1000ms.
"""
_rate_limit = 1000 # ms
_headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/77.0.3865.90 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml'
}
def __init__(self, rate_limit=1000):
if rate_limit > 0:
self._rate_limit = rate_limit
else:
print(f'Invalid rate limit {rate_limit}, defaulting to {self._rate_limit}ms')
def get_title(self, title_id, include_taglines=False):
"""Scrapes information from the IMDb web page for the specified title.
Uses the given title ID to request the IMDb page for the title and scrapes
the page's information into a new `TitleScrape` object. An optional argument
`include_taglines` allows an additional request to be made to gather all
taglines IMDb has for the title.
Args:
title_id (:obj:`str`): The title's ID used by IMDb prefixed with `tt`.
include_taglines (:obj:`bool`, optional): Specify if an extra request should be
made to get all the taglines for the title
Returns:
:class:`~.models.title.TitleScrape`: An object containing the page's information.
Raises:
HTTPError: If the request failed.
"""
request = f'https://www.imdb.com/title/{title_id}/'
tree = self._get_tree(request)
display_title = None
title_parent_id = None
rating = None
country = None
language = None
release_date = None
end_year = None
season_number = None
episode_number = None
taglines = []
plot = None
storyline = None
production_companies = []
top_cast = []
budget = None
budget_denomination = None
opening_weekend_gross = None
opening_weekend_date = None
usa_gross = None
worldwide_gross = None
# Get title text
title_node = tree.css_first('div.title_wrapper')
if title_node:
display_title_node = title_node.css_first('h1')
if display_title_node:
# Remove title year
title_year_node = display_title_node.css_first('span#titleYear')
if title_year_node:
title_year_node.decompose()
display_title = display_title_node.text().strip()
title_info_node = title_node.css_first('div.subtext')
if title_info_node:
# If this is a TV series, get the year the show ended
for link_node in title_info_node.css('a'):
if 'href' in link_node.attributes and 'releaseinfo' in link_node.attributes['href']:
series_dates_match = re.search(r'[\d]{4}[-–][\d]{4}', link_node.text())
if series_dates_match:
end_year_split = re.sub(r'[-–]', '\t', series_dates_match.group(0)).split('\t')
if len(end_year_split) > 1:
end_year = end_year_split[1]
break
# Get MPAA Rating
title_info_node.strip_tags(['span', 'a', 'time'])
rating = re.sub(r'(\s|,)*', '', title_info_node.text()).strip()
# Get title parent (if TV episode)
title_parent_node = tree.css_first('div.titleParent > a')
if title_parent_node:
title_parent_id = get_title_id(title_parent_node)
# Get plot
plot_node = tree.css_first('div.summary_text')
if plot_node:
plot = plot_node.text().strip()
# Get storyline
storyline_node = tree.css_first('div#titleStoryLine')
if storyline_node:
storyline_node = storyline_node.css_first('div > p > span')
if storyline_node:
storyline = storyline_node.text().strip()
# Get taglines
if include_taglines:
tagline_request = f'https://www.imdb.com/title/{title_id}/taglines'
tagline_tree = self._get_tree(tagline_request)
if not tagline_tree.css_first('div#no_content'):
for tagline_node in tagline_tree.css('div.soda'):
# TODO: should a Tagline object be created that stores the note for each tagline separately?
taglines.append(tagline_node.text().strip())
# Parse through text blocks
text_block_nodes = tree.css('div#titleDetails > div.txt-block')
for text_block_node in text_block_nodes:
text_block_id = text_block_node.css_first('h4.inline')
if text_block_id:
text_block_id = text_block_id.text().lower().strip()
text_block_text = text_block_node.text()
if 'country' in text_block_id:
country_node = text_block_node.css_first('a')
if country_node:
country = country_node.text().strip()
elif 'language' in text_block_id:
language_node = text_block_node.css_first('a')
if language_node:
language = language_node.text().strip()
elif 'release date' in text_block_id:
release_date_match = re.search(r'\d+?\s*\w+?\s*[\d]{4}', text_block_text)
if release_date_match:
release_date = release_date_match.group(0)
elif 'production co' in text_block_id:
companies = text_block_node.css('a')
for company in companies:
company_id = get_company_id(company)
if company_id:
production_companies.append(company_id)
# Box office info
elif 'budget' in text_block_id:
if is_money_string(text_block_text):
budget = trim_money_string(text_block_text)
budget_denomination = get_denomination(text_block_text)
elif 'opening weekend' in text_block_id:
if is_money_string(text_block_text):
opening_weekend_gross = trim_money_string(text_block_text)
opening_weekend_date_node = text_block_node.css_first('span')
if opening_weekend_date_node:
opening_weekend_date = opening_weekend_date_node.text().strip()
elif 'gross usa' in text_block_id:
if is_money_string(text_block_text):
usa_gross = trim_money_string(text_block_text)
elif 'worldwide gross' in text_block_id:
if is_money_string(text_block_text):
worldwide_gross = trim_money_string(text_block_text)
# Get top cast members
cast_node = tree.css_first('table.cast_list')
if cast_node:
for cast_member in cast_node.css('tr.odd, tr.even'):
cast_member_node = cast_member.css_first('td:nth-of-type(2) > a')
if cast_member_node:
character_credit = None
episode_count = None
episode_year_start = None
episode_year_end = None
character_node = cast_member.css_first('td.character')
if character_node:
# Check if there is episode information, save it, then remove it
episode_info_node = character_node.css_first('a.toggle-episodes')
if episode_info_node:
episode_count, episode_year_start, episode_year_end = get_episode_info(episode_info_node)
episode_info_node.decompose()
character_credit = re.sub(r'\s+', ' ', character_node.text().strip())
top_cast.append(
CreditScrape(
name_id=get_name_id(cast_member_node),
title_id=title_id,
job_title=ACTOR,
credit=character_credit,
episode_count=episode_count,
episode_year_start=episode_year_start,
episode_year_end=episode_year_end
))
# Get season and episode numbers if TV episode
heading_nodes = tree.css('div.bp_heading')
for heading_node in heading_nodes:
if 'Season' in heading_node.text():
heading_node_text = heading_node.text().lower()
season_number_match = re.search(r'season\s*\d+', heading_node_text)
if season_number_match:
season_number_match = re.search(r'\d+', season_number_match.group(0))
if season_number_match:
season_number = season_number_match.group(0)
episode_number_match = re.search(r'episode\s*\d+', heading_node_text)
if episode_number_match:
episode_number_match = re.search(r'\d+', episode_number_match.group(0))
if episode_number_match:
episode_number = episode_number_match.group(0)
return TitleScrape(
title_id=title_id,
display_title=display_title,
title_parent_id=title_parent_id,
mpaa_rating=rating,
country=country,
language=language,
release_date=release_date,
end_year=end_year,
season_number=season_number,
episode_number=episode_number,
taglines=taglines,
plot=plot,
storyline=storyline,
production_companies=production_companies,
top_cast=top_cast,
budget=budget,
budget_denomination=budget_denomination,
opening_weekend_gross=opening_weekend_gross,
opening_weekend_date=opening_weekend_date,
usa_gross=usa_gross,
worldwide_gross=worldwide_gross
)
def get_full_cast(self, title_id, include_episodes=False):
"""Scrapes the full cast of actors for a specified title.
Will scrape the full cast of actors for a title, each into their own `CreditScrape` object.
An optional argument `include_episodes` will also scrape each episode an actor is in
if the title is a TV series.
Args:
title_id (:obj:`str`): The title's ID used by IMDb prefixed with `tt`.
include_episodes (:obj:`bool`, optional): Specify if individual episodes of a
TV series should also be scraped.
Yields:
:class:`~.models.title.CreditScrape`: An object for each cast member in the title.
Raises:
HTTPError: If a request failed.
"""
request = f'https://www.imdb.com/title/{title_id}/fullcredits'
tree = self._get_tree(request)
cast_node = tree.css_first('table.cast_list').css('tr')
for cast_member in cast_node:
actor_node = cast_member.css_first('td.primary_photo + td > a')
if actor_node:
name_id = get_name_id(actor_node)
credit = None
episode_count = None
episode_year_start = None
episode_year_end = None
# Check if this is a TV series
toggle_episodes_node = cast_member.css_first('a.toggle-episodes')
if toggle_episodes_node:
episode_count, episode_year_start, episode_year_end = get_episode_info(toggle_episodes_node)
# Include all individual episodes an actor is in
if include_episodes:
ref_marker = get_ref_marker(toggle_episodes_node)
request = f'https://www.imdb.com/name/{name_id}/episodes/_ajax?title={title_id}' + \
f'&category=actor&ref_marker={ref_marker}&start_index=0'
episodes_tree = self._get_tree(request)
episode_nodes = episodes_tree.css('div.filmo-episodes')
for episode_node in episode_nodes:
episode_id = get_title_id(episode_node.css_first('a'))
episode_year = None
episode_credit = None
episode_info = episode_node.text().strip().split('...')
if len(episode_info) > 1:
episode_year_info = episode_info[0]
episode_credit = '...'.join(episode_info[1:]).strip()
else:
episode_year_info, = episode_info
episode_year_match = re.search(r'\([\d]{4}\)', episode_year_info)
if episode_year_match:
episode_year = episode_year_match.group(0).strip('()')
yield CreditScrape(
name_id=name_id,
title_id=episode_id,
job_title=ACTOR,
credit=episode_credit,
episode_count=None,
episode_year_start=episode_year,
episode_year_end=None
)
# Remove the TV series info from character node if exists
if toggle_episodes_node:
toggle_episodes_node.decompose()
# Get the actor's credits
character_node = cast_member.css_first('td.character')
if character_node:
credit = re.sub(r'(\s|\r|\n)+', ' ', character_node.text().strip())
yield CreditScrape(
name_id=name_id,
title_id=title_id,
job_title=ACTOR,
credit=credit,
episode_count=episode_count,
episode_year_start=episode_year_start,
episode_year_end=episode_year_end
)
def get_full_crew(self, title_id):
"""Scrapes the full list of credited crew people for a title, not including actors.
Will scrape all the credited crew members of a title, without the actors. For example, this will
include all directors, writers, producers, cinematographers, etc.
Args:
title_id (:obj:`str`): The title's ID used by IMDb prefixed with `tt`.
Yields:
:class:`~.models.title.CreditScrape`: An object for each credited crew member in the title.
Raises:
HTTPError: If the request failed.
"""
request = f'https://www.imdb.com/title/{title_id}/fullcredits'
tree = self._get_tree(request)
credits_node = tree.css_first('div#fullcredits_content')
if credits_node:
found_title = False
curr_title = None
for node in credits_node.iter():
if not found_title:
if node.tag == 'h4' and node.id != 'cast':
# Remove any extra info in a span next to the job title
title_span_node = node.css_first('span')
if title_span_node:
title_span_node.decompose()
title = node.text().strip()
if len(title) > 0:
found_title = True
curr_title = title.lower()
curr_title = re.sub(r'series', '', curr_title).strip()
if curr_title in _CREDIT_MAPPINGS:
curr_title = _CREDIT_MAPPINGS[curr_title]
else:
print(f'Found an unknown job title: {curr_title}, for {title_id}')
continue
else:
if node.tag == 'table':
content = node.css('tr')
for item in content:
name_node = item.css_first('td.name > a')
if name_node:
name_id = get_name_id(name_node)
credit = None
episode_count = None
episode_year_start = None
episode_year_end = None
credit_node = item.css_first('td.credit')
if credit_node:
credit = credit_node.text().strip()
# Grab episode count and years if TV series
episode_details_regex = r'\(\d+\s*episodes?,\s*\d{4}(-\d{4})?\)'
episode_details_match = re.search(episode_details_regex, credit)
if episode_details_match:
episode_count_details, episode_year_details = episode_details_match.group(0).strip('()').split(',')
episode_count_match = re.search(r'\d+', episode_count_details)
if episode_count_match:
episode_count = episode_count_match.group(0)
episode_year_split = episode_year_details.strip().split('-')
episode_year_start = episode_year_split[0]
if len(episode_year_split) > 1:
episode_year_end = episode_year_split[1]
credit = re.sub(episode_details_regex, '', credit).strip()
# Strip ending 'and' for a credit
if credit[-3:] == | |
ae_target_ind:
ae_target_id = track.track_id
# if ae_attack_id is not None and ae_target_id is not None:
# return id_features_, output, ae_attack_id, ae_target_id
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
for i, idet in enumerate(u_detection):
if idet == ae_attack_ind:
ae_attack_ind = i
elif idet == ae_target_ind:
ae_target_ind = i
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = unconfirmed[itracked]
if idet == ae_attack_ind:
ae_attack_id = track.track_id
elif idet == ae_target_ind:
ae_target_id = track.track_id
return id_features_, output, ae_attack_id, ae_target_id, hm_index
def forwardFeatureMt(self, im_blob, img0, dets_, inds_, remain_inds_, attack_ids, attack_inds, target_ids,
target_inds, last_info, grad=True):
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
im_blob.requires_grad = True
self.model.zero_grad()
if grad:
output = self.model(im_blob)[-1]
else:
with torch.no_grad():
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
dets_index = [i for i in range(len(dets))]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
ious = bbox_ious(np.ascontiguousarray(dets_[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
row_inds, col_inds = linear_sum_assignment(-ious)
match = True
if target_inds is not None:
for index, attack_ind in enumerate(attack_inds):
target_ind = target_inds[index]
if attack_ind not in row_inds or target_ind not in row_inds:
match = False
break
att_index = row_inds.tolist().index(attack_ind)
tar_index = row_inds.tolist().index(target_ind)
if ious[attack_ind, col_inds[att_index]] < 0.6 or ious[target_ind, col_inds[tar_index]] < 0.6:
match = False
break
else:
for index, attack_ind in enumerate(attack_inds):
if attack_ind not in row_inds:
match = False
break
att_index = row_inds.tolist().index(attack_ind)
if ious[attack_ind, col_inds[att_index]] < 0.8:
match = False
break
if not match:
dets = dets_
inds = inds_
remain_inds = remain_inds_
# assert match
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
fail_ids = 0
if not match:
return id_features, output, None
ae_attack_inds = []
ae_attack_ids = []
for i in range(len(row_inds)):
if ious[row_inds[i], col_inds[i]] > 0.6:
if row_inds[i] in attack_inds:
ae_attack_inds.append(col_inds[i])
index = attack_inds.tolist().index(row_inds[i])
ae_attack_ids.append(self.multiple_ori2att[attack_ids[index]])
# ae_attack_inds = [col_inds[row_inds == attack_ind] for attack_ind in attack_inds]
# ae_attack_inds = np.concatenate(ae_attack_inds)
id_features_ = [torch.zeros([len(dets_), id_features[0].size(1)]).to(id_features[0].device) for _ in range(len(id_features))]
for i in range(9):
id_features_[i][row_inds] = id_features[i][col_inds]
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
id_feature = id_feature[remain_inds]
id_feature = id_feature.detach().cpu().numpy()
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
unconfirmed = copy.deepcopy(last_info['last_unconfirmed'])
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
kalman_filter = copy.deepcopy(last_info['kalman_filter'])
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if dets_index[idet] in ae_attack_inds:
index = ae_attack_inds.index(dets_index[idet])
if track.track_id == ae_attack_ids[index]:
fail_ids += 1
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if dets_index[idet] in ae_attack_inds:
index = ae_attack_inds.index(dets_index[idet])
if track.track_id == ae_attack_ids[index]:
fail_ids += 1
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = unconfirmed[itracked]
if dets_index[idet] in ae_attack_inds:
index = ae_attack_inds.index(dets_index[idet])
if track.track_id == ae_attack_ids[index]:
fail_ids += 1
return id_features_, output, fail_ids
def CheckFit(self, dets, id_feature, attack_ids, attack_inds):
ad_attack_ids_ = [self.multiple_ori2att[attack_id] for attack_id in attack_ids] \
if self.opt.attack == 'multiple' else attack_ids
attack_dets = dets[attack_inds, :4]
ad_attack_dets = []
ad_attack_ids = []
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
unconfirmed = copy.deepcopy(self.ad_last_info['last_unconfirmed'])
strack_pool = copy.deepcopy(self.ad_last_info['last_strack_pool'])
kalman_filter = copy.deepcopy(self.ad_last_info['kalman_filter'])
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.track_id in ad_attack_ids_:
ad_attack_dets.append(det.tlbr)
ad_attack_ids.append(track.track_id)
''' Step 3: Second association, with IOU'''
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.track_id in ad_attack_ids_:
ad_attack_dets.append(det.tlbr)
ad_attack_ids.append(track.track_id)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = unconfirmed[itracked]
det = detections[idet]
if track.track_id in ad_attack_ids_:
ad_attack_dets.append(det.tlbr)
ad_attack_ids.append(track.track_id)
if len(ad_attack_dets) == 0:
return []
ori_dets = np.array(attack_dets)
ad_dets = np.array(ad_attack_dets)
ious = bbox_ious(ori_dets.astype(np.float64), ad_dets.astype(np.float64))
row_ind, col_ind = linear_sum_assignment(-ious)
attack_index = []
for i in range(len(row_ind)):
if self.opt.attack == 'multiple':
if ious[row_ind[i], col_ind[i]] > 0.9 and self.multiple_ori2att[attack_ids[row_ind[i]]] == ad_attack_ids[col_ind[i]]:
attack_index.append(row_ind[i])
else:
if ious[row_ind[i], col_ind[i]] > 0.9:
attack_index.append(row_ind[i])
return attack_index
def update_attack_sg(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = | |
<reponame>kmjohnson3/ML4MI_BootCamp<filename>ImageSegmentation/ImageSegmentation.py
# coding: utf-8
# # Introduction
# This tutorial will give an example application of using deep learning for medical image segmentation. This example will demonstrate how to train a convolutional neural network for the purpose of lung segmentation in CT images. The tutorial will have 3 main parts:
# 1. Loading and examining data for model training
# 2. Creating, training, and evaluating a deep learning segmentation model
# 3. Making improvements to the model with skip connections
#
# Keep an eye out for questions through this demo to test your new DL knowledge and critical thinking. There are answers at the end of the document.
# ### Initial preparation
# These are some modules that we will definitely need throughout this example.
# In[ ]:
import os # operating system operations
import numpy as np # number crunching
np.random.seed(1) # set seed for random number generator
import keras # our deep learning library
import matplotlib.pyplot as plt # for plotting our results
# set plotting to be in-line and interactive
get_ipython().magic(u'matplotlib notebook')
# We will import other necessary modules as we go and need them
# # Part 1: Data Preparation
# All deep learning applications start with getting the data. In this case, the data has already been collected from subjects through CT scans and annotations have been made.
#
# Additionally, we have already created a function for loading in this data in an organized way and get it ready to feed into a deep learning model for training. The data is currently stored as DICOMs in a mess of directories. This function sorts through the directories and loads in the necessary images and masks.
#
# So, we can just import this function and load the data into our various needed arrays.
# In[ ]:
from Demo_Functions import GetLungSegData
trainX,trainY,valX,valY = GetLungSegData()
# Let's examine these arrays we've loaded.
# In[ ]:
print(trainX.shape)
print(trainY.shape)
print(valX.shape)
print(valY.shape)
# We have two sets of corresponding images and masks. There are 1299 slices of 256x256 images in the training set, and 144 in the validation set.
#
# Each of these sets has a 4th dimension that has length 1. Why?
#
# Keras, and most other deep learning libraries, expects images to be in color. That is, they have R,G, and B color channels. So images are expected to be passed in as 4 dimensional arrays. In this case, we are passing in grayscale images, so they will just have a single color channel instead of 3.
# ##### Question: 1 What could be another use for having multiple input channels?
# Hint: Think MRI.
# At this point, it would be good to check that our data loaded correctly and the masks correspond to the input images. We'll using the python plotting package matplotlib to display a sample image and mask side by side for both the training and validation datasets.
#
# It's a good idea to try several different display indices to make sure all your data is lined up correctly.
# In[ ]:
# pick a random index for display
disp_ind = 1
# make a figure
plt.figure()
# concatenate the input and target image together
disp = np.c_[trainX[disp_ind,...,0],
trainY[disp_ind,...,0]]
# display image (with grayscale)
plt.imshow(disp,cmap='gray')
plt.show()
# repeat for validation set
disp_ind = 55
plt.figure()
# concatenate the input and target image together
disp = np.c_[valX[disp_ind,...,0],
valY[disp_ind,...,0]]
# display image (with grayscale)
plt.imshow(disp,cmap='gray')
plt.show()
# Looks good!
# ## Our data is now ready for training!
# # Part 2: Building a segmentation network
#
# We will build a deep convolutional neural network layer by layer, using Keras' high-level libraries that are relatively easy to work with to create exactly the network that we want.
#
# For this segmentation problem, the most common and effective networks follow a style known as 'convolutional encoder-decoder' or CED. This means that using convolutional layers we will downsample, or 'encode', our input data, and then upsample, or 'encode' back to our original input size. In this way, the convolutional layers will learn to create a mapping of our input images into a segmentation mask.
# One final note before we build the model. The filters (or 'kernels') are intialized in the background by some random distribution before training. Different distributions can greatly affect how quickly the model learns, or whether it converges at all. Each task can require different intialization distributions and usually requires playing around with different options. For the models we are using today, we already did this work for you and found that the He Normal distribution is most effective (He et al., http://arxiv.org/abs/1502.01852). We will set this parameter in all the convolutional layers.
# In[ ]:
init = 'he_normal'
# Now, let's build a segmentation model!
#
# First, import some layers we will use:
# In[ ]:
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import ZeroPadding2D
# also, import the Model function for building the model
from keras.models import Model
# We first need an input layer. Our input layer just needs the shape of the input we are providing. The shape dimensions are [sample,row,column,channel].
#
# For this 2D network, our samples are different slices. We don't need to provide this dimension to the input layer, since we will feed those samples in as batches during training. But we need the rest of the dimensions.
#
# Keep in mind that Python uses 0-indexing. So `[1:]` means collect all the parts of the array except the first one.
# In[ ]:
# create our input layer by giving it an input shape
inp = Input(shape=trainX.shape[1:])
# Now, we will add on convolutional layers
# The syntax for adding layers to our network is:
#
# `newlayer = LayerType(layer_parameters)(input_layer)`
#
# newlayer: the variable that stores the current output of the network.
# LayerType: the type of the new layer we are adding onto the network, in this case Conv2D layers.
# layer_parameters: the inputs we provide to define the new layer. For Conv2D layers, this is given as (number of filters, size of filters, and type of nonlinearity applied to the layer).
# input_layer: the previous layer that our new layer is going to be connected to.
#
# So for example: `x = Conv2D(10,(3,3), activation='relu')(inp)` creates a 2D convolutional layer with 10 filters that are 3x3 in size. The non-linearity (activation) is a Rectified Linear Unit, and it takes 'inp' as an input and gives its output as x.
# Without further ado, let's make a convolutional neural network!
# In[ ]:
# add on a couple convolutional layers
x = Conv2D(10,kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(inp)
x = Conv2D(20, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
# We will have to use some specific zero padding
# to keep our layer sizes friendly for this segmentation model
# make a zero padding layer that does 1 pad of zeros
# on all sides
x = ZeroPadding2D(padding=(1,1))(x)
# Add a strided convolution layer
x = Conv2D(30, kernel_size=(4,4),
strides=(2,2),
activation='relu',
kernel_initializer=init)(x)
# Now repeat the process, hanging onto the second layer again
x = Conv2D(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
x = Conv2D(40, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
x = ZeroPadding2D(padding=(1,1))(x)
x = Conv2D(40, kernel_size=(4,4),
strides=(2,2),
activation='relu',
kernel_initializer=init)(x)
# We've now done 2 downsampling layers, like before.
# Now for the decoding side of the network, we will start
# adding skip connections
# The first couple of layers are the same as usual.
x = Conv2D(50, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
x = Conv2D(50, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
# now, we will reverse the downsampling using Transposed Convolutions, also
# incorrectly but commonly called Deconvolution
from keras.layers import Conv2DTranspose
# This is now the decoding side of the network
# The syntax is identical. However, we need the decoding side of the network to end
# up with the same output size as our images, so the
# precise order and size of layers matter
x = Conv2DTranspose(40, kernel_size=(4,4),
strides=(2,2),
activation='relu',
kernel_initializer=init)(x)
# Mixing in regular Conv2D layers is sometimes necessary
# for getting layer shapes to work out
x = Conv2D(40, kernel_size=(3,3),activation='relu',kernel_initializer=init)(x)
x = Conv2DTranspose(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
x = Conv2DTranspose(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
# Do all that again
x = Conv2DTranspose(20, kernel_size=(4,4),strides=(2,2),activation='relu',kernel_initializer=init)(x)
x = Conv2D(20, kernel_size=(3,3),activation='relu',kernel_initializer=init)(x)
x = Conv2D(10, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
x = Conv2D(10, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x)
# Final output layer
out = Conv2D(1,kernel_size=(1,1),activation='sigmoid',kernel_initializer=init)(x)
# Make the model using the input and output layers
SegModel = Model(inp,out)
# In[ ]:
# Print a summary of the model we just made
SegModel.summary()
# ### Compiling the model
#
# Compiling the model is the final | |
== 0x0000034E: funcName = "XAudioSetDuckerLevel"
elif id == 0x0000034F: funcName = "XAudioIsDuckerEnabled"
elif id == 0x00000350: funcName = "XAudioGetDuckerLevel"
elif id == 0x00000351: funcName = "XAudioGetDuckerThreshold"
elif id == 0x00000352: funcName = "XAudioSetDuckerThreshold"
elif id == 0x00000353: funcName = "XAudioGetDuckerAttackTime"
elif id == 0x00000354: funcName = "XAudioSetDuckerAttackTime"
elif id == 0x00000355: funcName = "XAudioGetDuckerReleaseTime"
elif id == 0x00000356: funcName = "XAudioSetDuckerReleaseTime"
elif id == 0x00000357: funcName = "XAudioGetDuckerHoldTime"
elif id == 0x00000358: funcName = "XAudioSetDuckerHoldTime"
elif id == 0x00000359: funcName = "DevAuthShouldAlwaysEnforce"
elif id == 0x0000035A: funcName = "XAudioGetUnderrunCount"
elif id == 0x0000035B: funcName = "DrvSetAudioLatencyCallback"
elif id == 0x0000035C: funcName = "XVoicedIsActiveProcess"
elif id == 0x0000035D: funcName = "KeExecuteOnProtectedStack"
elif id == 0x0000035E: funcName = "XeKeysVerifyPIRSSignature"
elif id == 0x0000035F: funcName = "XeCryptAesCtr"
elif id == 0x00000360: funcName = "XeCryptAesCbcMac"
elif id == 0x00000361: funcName = "XeCryptAesDmMac"
elif id == 0x00000362: funcName = "EmaExecute"
elif id == 0x00000363: funcName = "XeKeysGetTruncatedSecondaryConsoleId"
elif id == 0x00000364: funcName = "ExFreeDebugPool"
elif id == 0x00000365: funcName = "VdQueryVideoCapabilities"
elif id == 0x00000366: funcName = "UsbdGetDeviceRootPortType"
elif id == 0x00000367: funcName = "VdGet3dVideoFormat"
elif id == 0x00000368: funcName = "VdGetWSS2Data"
elif id == 0x00000369: funcName = "VdSet3dVideoFormat"
elif id == 0x0000036A: funcName = "VdSetWSS2Data"
elif id == 0x0000036B: funcName = "XexReserveCodeBuffer"
elif id == 0x0000036C: funcName = "XexCommitCodeBuffer"
elif id == 0x0000036D: funcName = "RtlSetVectoredExceptionHandler"
elif id == 0x0000036E: funcName = "RtlClearVectoredExceptionHandler"
elif id == 0x0000036F: funcName = "XAudioSetProcessFrameCallback"
elif id == 0x00000370: funcName = "UsbdGetRootHubDeviceNode"
elif id == 0x00000371: funcName = "UsbdGetPortDeviceNode"
elif id == 0x00000372: funcName = "UsbdGetNatalHub"
elif id == 0x00000373: funcName = "UsbdGetNatalHardwareVersion"
elif id == 0x00000374: funcName = "UsbdNatalHubRegisterNotificationCallback"
elif id == 0x00000375: funcName = "KeCallAndBlockOnDpcRoutine"
elif id == 0x00000376: funcName = "KeCallAndWaitForDpcRoutine"
elif id == 0x00000377: funcName = "TidDeviceRequest"
elif id == 0x00000378: funcName = "DmPrintData"
elif id == 0x00000379: funcName = "VdSetStudioRGBMode"
elif id == 0x0000037A: funcName = "UsbdTitleDriverResetAllUnrecognizedPorts"
elif id == 0x0000037B: funcName = "UsbdTitleDriverSetUnrecognizedPort"
elif id == 0x0000037C: funcName = "UsbdResetEndpoint"
elif id == 0x0000037D: funcName = "UsbdSetTimer"
elif id == 0x0000037E: funcName = "UsbdCancelTimer"
elif id == 0x0000037F: funcName = "UsbdQueueIsochTransfer"
elif id == 0x00000380: funcName = "KeSetPageRelocationCallback"
elif id == 0x00000381: funcName = "XexRegisterUsermodeModule"
elif id == 0x00000383: funcName = "TitleDeviceAuthRequest"
elif id == 0x00000384: funcName = "KeRegisterSwapNotification"
elif id == 0x00000385: funcName = "XInputdGetFailedConnectionOrBind"
elif id == 0x00000386: funcName = "XInputdSetFailedConnectionOrBindCallback"
elif id == 0x00000388: funcName = "XInputdSetMinMaxAuthDelay"
elif id == 0x00000389: funcName = "VgcHandler_SetHandlers"
elif id == 0x0000038A: funcName = "VvcHandlerCancelTransfers"
elif id == 0x0000038B: funcName = "VvcHandlerRetrieveVoiceExtension"
elif id == 0x0000038D: funcName = "MmResetLowestAvailablePages"
elif id == 0x00000394: funcName = "VeSetHandlers"
elif id == 0x00000395: funcName = "HalConfigureVeDevice"
elif id == 0x00000396: funcName = "XeCryptSha224Init"
elif id == 0x00000397: funcName = "XeCryptAesCreateKeySchedule"
elif id == 0x00000398: funcName = "XeCryptAesEncryptOne"
elif id == 0x00000399: funcName = "XeCryptAesDecryptOne"
elif id == 0x0000039A: funcName = "XeCryptAesCbcEncrypt"
elif id == 0x0000039B: funcName = "XeCryptAesCbcDecrypt"
elif id == 0x0000039C: funcName = "XeCryptAesGcmInitialize"
elif id == 0x0000039D: funcName = "XeCryptAesGcmUpdate"
elif id == 0x0000039E: funcName = "XeCryptAesGcmFinalize"
elif id == 0x0000039F: funcName = "XeCryptEccGetCurveParameters"
elif id == 0x000003A0: funcName = "XeCryptEccEcdhGenerateKeypair"
elif id == 0x000003A1: funcName = "XeCryptEccEcdhExponentiate"
elif id == 0x000003A2: funcName = "XeCryptEccEcdsaGenerateSignature"
elif id == 0x000003A3: funcName = "XeCryptEccEcdsaVerifySignature"
return funcName
def syscallNameGen(libName, version, id):
funcName = "%s_%08X" % (libName, id)
if id == 0x00000000: funcName = "HvxGetVersions"
elif id == 0x00000001: funcName = "HvxStartupProcessors"
elif id == 0x00000002: funcName = "HvxQuiesceProcessor"
elif id == 0x00000003: funcName = "HvxFlushEntireTb"
elif id == 0x00000004: funcName = "HvxFlushSingleTb"
elif id == 0x00000005: funcName = "HvxRelocateAndFlush"
elif id == 0x00000006: funcName = "HvxGetSpecialPurposeRegister"
elif id == 0x00000007: funcName = "HvxSetSpecialPurposeRegister"
elif id == 0x00000008: funcName = "HvxGetSocRegister"
elif id == 0x00000009: funcName = "HvxSetSocRegister"
elif id == 0x0000000A: funcName = "HvxSetTimeBaseToZero"
elif id == 0x0000000B: funcName = "HvxZeroPage"
elif id == 0x0000000C: funcName = "HvxFlushDcacheRange"
elif id == 0x0000000D: funcName = "HvxPostOutput"
elif id == 0x0000000E: funcName = "HvxEnablePPUPerformanceMonitor"
elif id == 0x0000000F: funcName = "HvxGetImagePageTableEntry"
elif id == 0x00000010: funcName = "HvxSetImagePageTableEntry"
elif id == 0x00000011: funcName = "HvxCreateImageMapping"
elif id == 0x00000012: funcName = "HvxMapImagePage"
elif id == 0x00000013: funcName = "HvxCompleteImageMapping"
elif id == 0x00000014: funcName = "HvxLoadImageData"
elif id == 0x00000015: funcName = "HvxFinishImageDataLoad"
elif id == 0x00000016: funcName = "HvxStartResolveImports"
elif id == 0x00000017: funcName = "HvxResolveImports"
elif id == 0x00000018: funcName = "HvxFinishImageLoad"
elif id == 0x00000019: funcName = "HvxAbandonImageLoad"
elif id == 0x0000001A: funcName = "HvxUnmapImagePages"
elif id == 0x0000001B: funcName = "HvxUnmapImage"
elif id == 0x0000001C: funcName = "HvxUnmapImageRange"
elif id == 0x0000001D: funcName = "HvxCreateUserMode"
elif id == 0x0000001E: funcName = "HvxDeleteUserMode"
elif id == 0x0000001F: funcName = "HvxFlushUserModeTb"
elif id == 0x00000020: funcName = "HvxSetPowerMode"
elif id == 0x00000021: funcName = "HvxShadowBoot"
elif id == 0x00000022: funcName = "HvxBlowFuses"
elif id == 0x00000023: funcName = "HvxFsbInterrupt"
elif id == 0x00000024: funcName = "HvxLockL2"
elif id == 0x00000025: funcName = "HvxDvdAuthBuildNVPage"
elif id == 0x00000026: funcName = "HvxDvdAuthVerifyNVPage"
elif id == 0x00000027: funcName = "HvxDvdAuthRecordAuthenticationPage"
elif id == 0x00000028: funcName = "HvxDvdAuthRecordXControl"
elif id == 0x00000029: funcName = "HvxDvdAuthGetAuthPage"
elif id == 0x0000002A: funcName = "HvxDvdAuthVerifyAuthPage"
elif id == 0x0000002B: funcName = "HvxDvdAuthGetNextLBAIndex"
elif id == 0x0000002C: funcName = "HvxDvdAuthVerifyLBA"
elif id == 0x0000002D: funcName = "HvxDvdAuthClearDiscAuthInfo"
elif id == 0x0000002E: funcName = "HvxKeysInitialize"
elif id == 0x0000002F: funcName = "HvxKeysGetKeyProperties"
elif id == 0x00000030: funcName = "HvxKeysGetStatus"
elif id == 0x00000031: funcName = "HvxKeysGenerateRandomKey"
elif id == 0x00000032: funcName = "HvxKeysGetFactoryChallenge"
elif id == 0x00000033: funcName = "HvxKeysSetFactoryResponse"
elif id == 0x00000034: funcName = "HvxKeysSaveBootLoader"
elif id == 0x00000035: funcName = "HvxKeysSaveKeyVault"
elif id == 0x00000036: funcName = "HvxKeysSetKey"
elif id == 0x00000037: funcName = "HvxKeysGetKey"
elif id == 0x00000038: funcName = "HvxKeysGetDigest"
elif id == 0x00000039: funcName = "HvxKeysRsaPrvCrypt"
elif id == 0x0000003A: funcName = "HvxKeysHmacSha"
elif id == 0x0000003B: funcName = "HvxKeysAesCbc"
elif id == 0x0000003C: funcName = "HvxKeysDes2Cbc"
elif id == 0x0000003D: funcName = "HvxKeysDesCbc"
elif id == 0x0000003E: funcName = "HvxKeysObscureKey"
elif id == 0x0000003F: funcName = "HvxKeysSaveSystemUpdate"
elif id == 0x00000040: funcName = "HvxKeysExecute"
elif id == 0x00000041: funcName = "HvxDvdAuthTestMode"
elif id == 0x00000042: funcName = "HvxEnableTimebase"
elif id == 0x00000043: funcName = "HvxHdcpCalculateMi"
elif id == 0x00000044: funcName = "HvxHdcpCalculateAKsvSignature"
elif id == 0x00000045: funcName = "HvxHdcpCalculateBKsvSignature"
elif id == 0x00000046: funcName = "HvxSetRevocationList"
elif id == 0x00000047: funcName = "HvxEncryptedReserveAllocation"
elif id == 0x00000048: funcName = "HvxEncryptedReleaseAllocation"
elif id == 0x00000049: funcName = "HvxEncryptedEncryptAllocation"
elif id == 0x0000004A: funcName = "HvxEncryptedSweepAddressRange"
elif id == 0x0000004B: funcName = "HvxKeysExCreateKeyVault"
elif id == 0x0000004C: funcName = "HvxKeysExLoadKeyVault"
elif id == 0x0000004D: funcName = "HvxKeysExSaveKeyVault"
elif id == 0x0000004E: funcName = "HvxKeysExSetKey"
elif id == 0x0000004F: funcName = "HvxKeysExGetKey"
elif id == 0x00000050: funcName = "HvxGetUpdateSequence"
elif id == 0x00000051: funcName = "HvxSecurityInitialize"
elif id == 0x00000052: funcName = "HvxSecurityLoadSettings"
elif id == 0x00000053: funcName = "HvxSecuritySaveSettings"
elif id == 0x00000054: funcName = "HvxSecuritySetDetected"
elif id == 0x00000055: funcName = "HvxSecurityGetDetected"
elif id == 0x00000056: funcName = "HvxSecuritySetActivated"
elif id == 0x00000057: funcName = "HvxSecurityGetActivated"
elif id == 0x00000058: funcName = "HvxSecuritySetStat"
elif id == 0x00000059: funcName = "HvxGetProtectedFlags"
elif id == 0x0000005A: funcName = "HvxSetProtectedFlag"
elif id == 0x0000005B: funcName = "HvxDvdAuthGetAuthResults"
elif id == 0x0000005C: funcName = "HvxDvdAuthSetDriveAuthResult"
elif id == 0x0000005D: funcName = "HvxDvdAuthSetDiscAuthResult"
elif id == 0x0000005E: funcName = "HvxImageTransformImageKey"
elif id == 0x0000005F: funcName = "HvxImageXexHeader"
elif id == 0x00000060: funcName = "HvxRevokeLoad"
elif id == 0x00000061: funcName = "HvxRevokeSave"
elif id == 0x00000062: funcName = "HvxRevokeUpdate"
elif id == 0x00000063: funcName = "HvxDvdAuthGetMediaId"
elif id == 0x00000064: funcName = "HvxKeysLoadKeyVault"
elif id == 0x00000065: funcName = "HvxXexActivationGetNonce"
elif id == 0x00000066: funcName = "HvxXexActivationSetLicense"
elif id == 0x00000067: funcName = "HvxXexActivationVerifyOwnership"
elif | |
from nutils import *
from nutils.testing import *
from nutils.elementseq import References
from nutils.topology import Topology
import numpy
import copy
import sys
import pickle
import subprocess
import base64
import itertools
import os
import unittest
def as_rounded_list(data):
return numpy.round(data, 5).tolist()
def pairwise(items):
return [[i, j] for i, j in zip(items[:-1], items[1:])]
def subdiv(V):
V = iter(V)
items = [next(V)]
for v in V:
items += [(items[-1] + v) / 2, v]
return items
class CommonAssertions:
def assertVertices(self, topo, desired_coords):
assert len(desired_coords) == len(topo)
bezier = topo.sample('bezier', 2)
actual_coords_flat = as_rounded_list(bezier.eval(self.geom))
for ielem, desired_elem_coords in enumerate(desired_coords):
actual_elem_coords = numpy.take(actual_coords_flat, bezier.getindex(ielem), axis=0)
self.assertEqual(actual_elem_coords.tolist(), desired_elem_coords)
def assertTake(self, topo, selection):
# Like `assertVertices` but using `self.desired_vertices[selection]`.
selection = numpy.asarray(selection, dtype=int).ravel()
verts = [self.desired_vertices[i] for i in selection]
self.assertVertices(topo, verts)
def assertCompressed(self, topo, mask):
# Like `assertVertices` but using `self.desired_vertices[mask]`.
mask = numpy.asarray(mask, dtype=bool).ravel()
assert mask.size == self.desired_nelems
verts = [verts for select, verts in zip(mask, self.desired_vertices) if select]
self.assertVertices(topo, verts)
def assertUnorderedVertices(self, topo, desired_coords):
assert len(desired_coords) == len(topo)
bezier = topo.sample('bezier', 2)
actual_coords_flat = as_rounded_list(bezier.eval(self.geom))
actual_coords = []
for ielem, desired_elem_coords in enumerate(desired_coords):
actual_elem_coords = numpy.take(actual_coords_flat, bezier.getindex(ielem), axis=0)
actual_coords.append(actual_elem_coords.tolist())
self.assertEqual(sorted(actual_coords), sorted(desired_coords))
class CommonTests(CommonAssertions):
def test_empty_like(self):
empty = self.topo.empty_like()
self.assertEqual(len(empty), 0)
self.assertEqual(empty.spaces, self.desired_spaces)
self.assertEqual(empty.space_dims, self.desired_space_dims)
self.assertEqual(empty.ndims, self.desired_ndims)
def test_spaces(self):
self.assertEqual(self.topo.spaces, self.desired_spaces)
def test_space_dims(self):
self.assertEqual(self.topo.space_dims, self.desired_space_dims)
def test_ndims(self):
self.assertEqual(self.topo.ndims, self.desired_ndims)
def test_len(self):
self.assertEqual(len(self.topo), self.desired_nelems)
def test_references(self):
assert len(self.desired_references) == self.desired_nelems
self.assertSequenceEqual(self.topo.references, self.desired_references)
def test_elements(self):
# This sort of tests the `self.topo.transforms` by evaluating `self.geom`
# and comparing with `self.desired_vertices`.
self.assertVertices(self.topo, self.desired_vertices)
def test_get_groups_nonexistent(self):
self.assertFalse(self.topo.get_groups('nonexistent'))
def test_getitem_empty(self):
with self.assertRaises(KeyError):
self.topo['nonexistent']
def test_take(self):
self.assertFalse(self.topo.take([]))
for ielem in range(self.desired_nelems):
self.assertTake(self.topo.take([ielem]), [ielem])
def test_take_invalid_indices(self):
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.take(numpy.array(0))
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.take(numpy.array([[0, 1], [2, 3]]))
def test_compress(self):
self.assertFalse(self.topo.compress([False]*self.desired_nelems))
for ielem in range(self.desired_nelems):
self.assertTake(self.topo.compress([i == ielem for i in range(self.desired_nelems)]), [ielem])
def test_slice_invalid_dim(self):
with self.assertRaisesRegex(IndexError, '^dimension index out of range$'):
self.topo.slice(slice(None), self.desired_ndims)
def test_f_index(self):
self.assertEqual(self.topo.sample('gauss', 0).eval(self.topo.f_index).tolist(), list(range(self.desired_nelems)))
def test_unit_integral(self):
self.assertAlmostEqual(self.topo.integral(function.J(self.geom), degree=0).eval(), sum(self.desired_volumes))
def test_unit_integrate(self):
self.assertAlmostEqual(self.topo.integrate(function.J(self.geom), degree=0), sum(self.desired_volumes))
def test_unit_integrate_elementwise(self):
self.assertEqual(as_rounded_list(self.topo.integrate_elementwise(function.J(self.geom), degree=0)), self.desired_volumes)
def test_refine_spaces_none(self):
self.assertEqual(self.topo.refine_spaces([]), self.topo)
def test_invalid_intersections(self):
with self.assertRaises(ValueError):
self.topo & Topology.empty(tuple('other' + space for space in self.desired_spaces), self.desired_space_dims, self.desired_ndims)
with self.assertRaises(ValueError):
self.topo & Topology.empty(self.desired_spaces, tuple(dim + 1 for dim in self.desired_space_dims), self.desired_ndims)
with self.assertRaises(ValueError):
self.topo & Topology.empty(self.desired_spaces, self.desired_space_dims, self.desired_ndims + 1)
def test_invalid_unions(self):
with self.assertRaises(ValueError):
self.topo | Topology.empty(tuple('other' + space for space in self.desired_spaces), self.desired_space_dims, self.desired_ndims)
with self.assertRaises(ValueError):
self.topo | Topology.empty(self.desired_spaces, tuple(dim + 1 for dim in self.desired_space_dims), self.desired_ndims)
with self.assertRaises(ValueError):
self.topo | Topology.empty(self.desired_spaces, self.desired_space_dims, self.desired_ndims + 1)
def test_select(self):
if self.desired_ndims == 0:
return
if self.desired_nelems:
centers = numpy.stack([numpy.mean(v, axis=0) for v in self.desired_vertices])
center = numpy.mean(centers, axis=0)
center = centers[numpy.argmin(((centers - center)**2).sum(1).round(5))]
else:
center = numpy.zeros(self.desired_ndims)
direction = 1 / (self.desired_ndims - numpy.arange(self.desired_ndims))
for i in range(self.desired_ndims):
desired_selection, = numpy.where([(numpy.sum((numpy.array(v) - center) * direction, axis=1) > 0).any() for v in self.desired_vertices])
desired_vertices = [self.desired_vertices[i] for i in desired_selection]
self.assertVertices(self.topo.select(((self.geom - center) * direction).sum()), desired_vertices)
direction = numpy.roll(direction, shift=1)
class ConformingTests:
@property
def edge_map(self):
# Mapping from edge vertices to pairs of element and edge indices based on
# `self.desired_references` and `self.desired_vertices`.
assert len(self.desired_references) == len(self.desired_vertices) == self.desired_nelems
edge_map = {}
for ielem, (ref, verts) in enumerate(zip(self.desired_references, self.desired_vertices)):
local_verts = as_rounded_list(ref.vertices)
for iedge, (trans, edge) in enumerate(ref.edges):
local_edge_verts = as_rounded_list(trans.apply(edge.vertices))
edge_verts = tuple(tuple(verts[local_verts.index(v)]) for v in local_edge_verts)
edge_map.setdefault(edge_verts, set()).add((ielem, iedge))
return edge_map
@property
def connectivity(self):
assert len(self.desired_references) == self.desired_nelems
connectivity = [[-1] * ref.nedges for ref in self.desired_references]
for sides in self.edge_map.values():
assert len(sides) <= 2
if len(sides) == 2:
(ielem1, iedge1), (ielem2, iedge2) = sides
connectivity[ielem1][iedge1] = ielem2
connectivity[ielem2][iedge2] = ielem1
return connectivity
def test_connectivity(self):
self.assertEqual(list(map(list, self.topo.connectivity)), self.connectivity)
def test_boundary_all_spaces(self):
boundary_vertices = [list(map(list, verts)) for verts, sides in self.edge_map.items() if len(sides) == 1]
self.assertUnorderedVertices(self.topo.boundary, boundary_vertices)
def test_interfaces_all_spaces(self):
interface_vertices = [list(map(list, verts)) for verts, sides in self.edge_map.items() if len(sides) == 2]
self.assertUnorderedVertices(self.topo.interfaces, interface_vertices)
def test_basis_std_degree1(self):
basis = self.topo.basis('std', degree=1)
values, verts = self.topo.sample('bezier', 2).eval([basis, self.geom])
dofs_to_verts = {}
verts_to_dofs = {}
for val, vert in zip(map(as_rounded_list, values), (tuple(as_rounded_list(v)) for v in verts)):
self.assertCountEqual(val, [1]+[0]*(len(val)-1))
dof = val.index(1)
if dof in dofs_to_verts:
self.assertEqual(dofs_to_verts[dof], vert)
else:
dofs_to_verts[dof] = vert
if vert in verts_to_dofs:
self.assertEqual(verts_to_dofs[vert], dof)
else:
verts_to_dofs[vert] = dof
self.assertEqual(sorted(dofs_to_verts), list(range(len(basis))))
self.assertEqual(sorted(verts_to_dofs), sorted(set(tuple(v) for e in self.desired_vertices for v in e)))
class NewTopologyRefine(TestCase, CommonAssertions):
# Tests for default implementations of `Topology.refine_*`.
def setUp(self):
super().setUp()
class TestTopo(Topology):
def __init__(self, real):
self.real = real
super().__init__(real.spaces, real.space_dims, real.references)
def refine_spaces(self, spaces):
return TestTopo(self.real.refine_spaces(spaces))
def sample(self, ischeme, degree):
return self.real.sample(ischeme, degree)
topo, self.geom = mesh.newrectilinear([4, 2], spaces=['X', 'Y'])
self.topo = TestTopo(topo)
@staticmethod
def mkverts(XX, YY):
return [[[x, y] for x in X for y in Y] for X in pairwise(XX) for Y in pairwise(YY)]
def test_refine_count_iter(self):
refine_iter = iter(self.topo.refine_iter)
X, Y = range(5), range(3)
for i in range(3):
desired = self.mkverts(X, Y)
self.assertVertices(self.topo.refine_count(i), desired)
self.assertVertices(self.topo.refine(i), desired)
self.assertVertices(next(refine_iter), desired)
X, Y = subdiv(X), subdiv(Y)
def test_refine_spaces(self):
# We only test `Topology.refine` because `Topology.refine_spaces` is
# abstract.
self.assertVertices(self.topo.refine(['X']), self.mkverts(subdiv(range(5)), range(3)))
self.assertVertices(self.topo.refine(['Y']), self.mkverts(range(5), subdiv(range(3))))
def test_refine_spaces_count(self):
self.assertVertices(self.topo.refine(dict(X=1, Y=2)), self.mkverts(subdiv(range(5)), subdiv(subdiv(range(3)))))
def test_refine_count_negative(self):
with self.assertRaisesRegex(ValueError, '^Negative counts are invalid.$'):
self.topo.refine_count(-1)
with self.assertRaisesRegex(ValueError, '^Negative counts are invalid.$'):
self.topo.refine(-1)
with self.assertRaisesRegex(ValueError, '^Negative counts are invalid.$'):
self.topo.refine_spaces_count(dict(X=-1))
def test_refine_unknown_space(self):
with self.assertRaisesRegex(ValueError, '^This topology does not have space Z.$'):
self.topo.refine_spaces(['Z'])
class NewTopologyTake(TestCase, CommonAssertions):
# Tests for default implementations of `Topology.take` and
# `Topology.compress`.
def setUp(self):
super().setUp()
class TestTopo(Topology):
def __init__(self, real):
self.real = real
super().__init__(real.spaces, real.space_dims, real.references)
def sample(self, ischeme, degree):
return self.real.sample(ischeme, degree)
topo, self.geom = mesh.newrectilinear([4, 2], spaces=['X', 'Y'])
self.topo = TestTopo(topo)
self.desired_vertices = [[[x, y] for x in X for y in Y] for X in pairwise(range(5)) for Y in pairwise(range(3))]
def test_take(self):
self.assertTake(self.topo.take([1, 3, 4]), [1, 3, 4])
self.assertTake(self.topo.take(numpy.array([1, 3, 4])), [1, 3, 4])
def test_getitem(self):
self.assertTake(self.topo[[1, 3, 4]], [1, 3, 4])
self.assertTake(self.topo[numpy.array([1, 3, 4])], [1, 3, 4])
def test_take_empty(self):
self.assertTake(self.topo.take([]), [])
self.assertTake(self.topo.take(numpy.array([], dtype=int)), [])
# Test whether an empty float array is allowed.
self.assertTake(self.topo.take(numpy.array([])), [])
def test_take_invalid_array(self):
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.take(numpy.array([[1, 2], [3, 4]]))
with self.assertRaises(TypeError):
self.topo.take(numpy.array([1, 2], dtype=float))
def test_compress(self):
self.assertTake(self.topo.compress([False, True, False, True, True, False, False, False]), [1, 3, 4])
def test_compress_invalid_array(self):
with self.assertRaisesRegex(ValueError, '^expected a one-dimensional array$'):
self.topo.compress([[False, True]]*4)
with self.assertRaisesRegex(ValueError, '^length of mask does not match number of elements$'):
self.topo.compress([False])
class NewTopologySlice(TestCase, CommonAssertions):
# Tests for default implementation of `Topology.__getitem__`.
def setUp(self):
super().setUp()
self.topo, self.geom = mesh.newrectilinear([4, 2], spaces=['X', 'Y'])
self.desired_vertices = [[[x, y] for x in X for y in Y] for X in pairwise(range(5)) for Y in pairwise(range(3))]
self.idx = numpy.arange(8).reshape(4, 2)
def test_slice(self):
self.assertTake(self.topo.slice(slice(None), 0), self.idx)
self.assertTake(self.topo.slice(slice(None), 1), self.idx)
self.assertTake(self.topo.slice(slice(2, None), 0), self.idx[2:])
self.assertTake(self.topo.slice(slice(None, 1), 1), self.idx[:, :1])
def test_getitem(self):
self.assertTake(self.topo[:], self.idx)
self.assertTake(self.topo[:, :], self.idx)
self.assertTake(self.topo[..., :], self.idx)
self.assertTake(self.topo[..., :, :], self.idx)
self.assertTake(self.topo[:, ..., :], self.idx)
self.assertTake(self.topo[:, :, ...], self.idx)
self.assertTake(self.topo[2:], self.idx[2:])
self.assertTake(self.topo[2:, 1:], self.idx[2:, 1:])
def test_getitem_multiple_ellipsis(self):
with self.assertRaisesRegex(Exception, '^only one ellipsis is allowed$'):
self.topo[..., :, ...]
def test_getitem_too_many_indices(self):
with self.assertRaisesRegex(Exception, '^too many indices'):
self.topo[:, :, :]
def test_slice_invalid_dimensions(self):
with self.assertRaises(IndexError):
self.topo.slice(slice(None), -1)
with self.assertRaises(IndexError):
self.topo.slice(slice(None), 2)
class NewTopologyBoundaryInterfaces(TestCase):
def setUp(self):
super().setUp()
self.topo1, self.geom = mesh.line([0, 1, 2], space='X')
self.topo0 = self.topo1.boundary_spaces(['X'])
def test_boundary_0d(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no boundary.$'):
self.topo0.boundary_spaces(['X'])
def test_interfaces_0d(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no interfaces.$'):
self.topo0.interfaces_spaces(['X'])
def test_boundary_empty_spaces(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no boundary.$'):
self.topo0.boundary_spaces([])
def test_interfaces_empty_spaces(self):
with self.assertRaisesRegex(ValueError, '^A 0D topology has no interfaces.$'):
self.topo0.interfaces_spaces([])
def test_boundary_unknown_space(self):
with self.assertRaisesRegex(ValueError, '^This topology does not have space Y.$'):
self.topo1.boundary_spaces(['Y'])
def test_interfaces_unknown_space(self):
with self.assertRaisesRegex(ValueError, '^This topology does not have space Y.$'):
self.topo1.interfaces_spaces(['Y'])
def test_basis_0d(self):
basis = self.topo0.basis('std', degree=0)
sampled = self.topo0.sample('bezier', 2).eval(basis)
self.assertEqual(as_rounded_list(sampled), [[1.], [1.]])
class NewEmpty(TestCase, CommonTests, ConformingTests):
def setUp(self):
super().setUp()
self.desired_spaces = 'a', 'b'
self.desired_space_dims = 1, 2
self.desired_ndims = 3
self.topo = Topology.empty(self.desired_spaces, self.desired_space_dims, self.desired_ndims)
self.geom = function.concatenate([function.rootcoords(space, dim) for space, dim in zip(self.desired_spaces, self.desired_space_dims)])
self.desired_nelems = 0
self.desired_volumes = []
self.desired_references = []
self.desired_vertices = []
def test_opposite(self):
self.assertEqual(len(~self.topo), 0)
def test_intersection(self):
atrans = transformseq.IndexTransforms(1, 1, 0)
btrans = transformseq.IndexTransforms(2, 1, 1)
other = topology.SimplexTopology('a', numpy.array([[0, 1]]), atrans, atrans) * topology.SimplexTopology('b', numpy.array([[0, 1, 2]]), btrans, btrans)
self.assertEqual(self.topo & other, self.topo)
self.assertEqual(other & | |
<reponame>aganve/pysb
from __future__ import absolute_import
from __future__ import print_function as _
import pysb.core
from pysb.generator.bng import BngGenerator, format_complexpattern
import os
import subprocess
import re
import itertools
import sympy
import sympy.parsing.sympy_parser as sympy_parser
import numpy
import tempfile
import abc
from warnings import warn
import shutil
import collections
import pysb.pathfinder as pf
import tokenize
from pysb.logging import get_logger, EXTENDED_DEBUG
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from future_builtins import zip
except ImportError:
pass
# Alias basestring under Python 3 for forwards compatibility
try:
basestring
except NameError:
basestring = str
def set_bng_path(dir):
""" Deprecated. Use pysb.pathfinder.set_path() instead. """
warn("Function %s() is deprecated; use pysb.pathfinder.set_path() "
"instead" % set_bng_path.__name__, category=DeprecationWarning,
stacklevel=2)
pf.set_path('bng', dir)
class BngInterfaceError(RuntimeError):
"""BNG reported an error"""
pass
class BngBaseInterface(object):
""" Abstract base class for interfacing with BNG """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, model=None, verbose=False, cleanup=False,
output_prefix=None, output_dir=None,
model_additional_species=None,
model_population_maps=None):
self._logger = get_logger(__name__,
model=model,
log_level=verbose)
self._base_file_stem = 'pysb'
self.cleanup = cleanup
self.output_prefix = 'tmpBNG' if output_prefix is None else \
output_prefix
if model:
self.generator = BngGenerator(
model, additional_initials=model_additional_species,
population_maps=model_population_maps
)
self.model = self.generator.model
self._check_model()
else:
self.generator = None
self.model = None
self.base_directory = tempfile.mkdtemp(prefix=self.output_prefix,
dir=output_dir)
self._logger.debug('{} instantiated in directory {}'.format(
self.__class__, self.base_directory)
)
def __enter__(self):
return self
@abc.abstractmethod
def __exit__(self):
return
def _delete_tmpdir(self):
shutil.rmtree(self.base_directory)
def _check_model(self):
"""
Checks a model has at least one initial condition and rule, raising
an exception if not
"""
if not self.model.rules:
raise NoRulesError()
if (
not self.model.initials
and not any(r.is_synth() for r in self.model.rules)
):
raise NoInitialConditionsError()
@classmethod
def _bng_param(cls, param):
"""
Ensures a BNG console parameter is in the correct format
Strings are double quoted and booleans are mapped to [0,1]. Other
types are currently used verbatim.
Parameters
----------
param :
An argument to a BNG action call
"""
if isinstance(param, basestring):
return '"%s"' % param
elif isinstance(param, bool):
return 1 if param else 0
elif isinstance(param, (collections.Sequence, numpy.ndarray)):
return list(param)
return param
@abc.abstractmethod
def action(self, action, **kwargs):
"""
Generates code to execute a BNG action command
Parameters
----------
action: string
The name of the BNG action function
kwargs: kwargs, optional
Arguments and values to supply to BNG
"""
return
@classmethod
def _format_action_args(cls, **kwargs):
"""
Formats a set of arguments for BNG
Parameters
----------
kwargs: kwargs, optional
Arguments and values to supply to BNG
"""
if kwargs:
action_args = ','.join('%s=>%s' % (k, BngConsole._bng_param(v))
for k, v in kwargs.items())
else:
action_args = ''
return action_args
@property
def base_filename(self):
"""
Returns the base filename (without extension) for BNG output files
"""
return os.path.join(self.base_directory, self._base_file_stem)
@property
def bng_filename(self):
"""
Returns the BNG command list (.bngl) filename (does not check
whether the file exists)
"""
return self.base_filename + '.bngl'
@property
def net_filename(self):
"""
Returns the BNG network filename (does not check whether the file
exists)
"""
return self.base_filename + '.net'
def read_netfile(self):
"""
Reads a BNG network file as a string. Note that you must execute
network generation separately before attempting this, or the file will
not be found.
:return: Contents of the BNG network file as a string
"""
self._logger.debug('Reading BNG netfile: %s' % self.net_filename)
with open(self.net_filename, 'r') as net_file:
output = net_file.read()
return output
def read_simulation_results(self):
"""
Read the results of a BNG simulation as a numpy array
Returns
-------
numpy.ndarray
Simulation results in a 2D matrix (time on Y axis,
species/observables/expressions on X axis depending on
simulation type)
"""
return self.read_simulation_results_multi([self.base_filename])[0]
@staticmethod
def read_simulation_results_multi(base_filenames):
"""
Read the results of multiple BNG simulations
Parameters
----------
base_filenames: list of str
A list of filename stems to read simulation results in from,
including the full path but not including any file extension.
Returns
-------
list of numpy.ndarray
List of simulation results, each in a 2D matrix (time on Y axis,
species/observables/expressions on X axis depending on
simulation type)
"""
list_of_yfulls = []
for base_filename in base_filenames:
names = ['time']
# Read concentrations data
try:
cdat_arr = numpy.loadtxt(base_filename + '.cdat', skiprows=1, ndmin=2)
# -1 for time column
names += ['__s%d' % i for i in range(cdat_arr.shape[1] - 1)]
except IOError:
cdat_arr = None
# Read groups data
try:
with open(base_filename + '.gdat', 'r') as f:
# Exclude \# and time column
names += f.readline().split()[2:]
# Exclude first column (time)
gdat_arr = numpy.loadtxt(f, ndmin=2)
if cdat_arr is None:
cdat_arr = numpy.ndarray((len(gdat_arr), 0))
else:
gdat_arr = gdat_arr[:, 1:]
except IOError:
if cdat_arr is None:
raise BngInterfaceError('Need at least one of .cdat file or '
'.gdat file to read simulation '
'results')
gdat_arr = numpy.ndarray((len(cdat_arr), 0))
yfull_dtype = list(zip(names, itertools.repeat(float)))
yfull = numpy.ndarray(len(cdat_arr), yfull_dtype)
yfull_view = yfull.view(float).reshape(len(yfull), -1)
yfull_view[:, :cdat_arr.shape[1]] = cdat_arr
yfull_view[:, cdat_arr.shape[1]:] = gdat_arr
list_of_yfulls.append(yfull)
return list_of_yfulls
class BngConsole(BngBaseInterface):
""" Interact with BioNetGen through BNG Console """
def __init__(self, model=None, verbose=False, cleanup=True,
output_dir=None, output_prefix=None, timeout=30,
suppress_warnings=False, model_additional_species=None):
super(BngConsole, self).__init__(
model, verbose, cleanup, output_prefix, output_dir,
model_additional_species=model_additional_species
)
try:
import pexpect
except ImportError:
raise ImportError("Library 'pexpect' is required to use "
"BNGConsole, please install it to continue.\n"
"It is not currently available on Windows.")
if suppress_warnings:
warn("suppress_warnings is deprecated and has no effect. Adjust "
"the log level with the verbose argument instead.",
category=DeprecationWarning,
stacklevel=2)
# Generate BNGL file
if self.model:
with open(self.bng_filename, mode='w') as bng_file:
bng_file.write(self.generator.get_content())
# Start BNG Console and load BNGL
bng_path = pf.get_path('bng')
bng_exec_path = '%s --console' % bng_path
if not bng_path.endswith('.bat'):
bng_exec_path = 'perl %s' % bng_exec_path
self.console = pexpect.spawn(bng_exec_path,
cwd=self.base_directory,
timeout=timeout)
self._console_wait()
if self.model:
self.console.sendline('load %s' % self.bng_filename)
self._console_wait()
def __exit__(self, exc_type, exc_val, exc_tb):
"""
In console mode, commands have already been executed, so we simply
close down the console and erase the temporary directory if applicable.
"""
self.console.sendline('done')
self.console.close()
if self.cleanup:
self._delete_tmpdir()
def _console_wait(self):
"""
Wait for BNG console to process the command, and return the output
:return: BNG console output from the previous command
"""
self.console.expect('BNG>')
# Python 3 requires explicit conversion of 'bytes' to 'str'
console_msg = self.console.before.decode('utf-8')
if "ERROR:" in console_msg:
raise BngInterfaceError(console_msg)
elif "WARNING:" in console_msg:
self._logger.warning(console_msg)
else:
self._logger.debug(console_msg)
return console_msg
def generate_network(self, overwrite=False):
"""
Generates a network in BNG and returns the network file contents as
a string
Parameters
----------
overwrite: bool, optional
Overwrite existing network file, if any
"""
self.action('generate_network', overwrite=overwrite)
return self.read_netfile()
def action(self, action, **kwargs):
"""
Generates a BNG action command and executes it through the console,
returning any console output
Parameters
----------
action : string
The name of the BNG action function
kwargs : kwargs, optional
Arguments and values to supply to BNG
"""
# Process BNG arguments into a string
action_args = self._format_action_args(**kwargs)
# Execute the command via the console
if action_args == '':
cmd = 'action %s()' % action
else:
cmd = 'action %s({%s})' % (action, action_args)
self._logger.debug(cmd)
self.console.sendline(cmd)
# Wait for the command to execute and return the result
return self._console_wait()
def load_bngl(self, bngl_file):
"""
Load a BNGL file in the BNG console
Parameters
----------
bngl_file : string
The filename of a .bngl file
"""
cmd = 'load %s' % bngl_file
self._logger.debug(cmd)
self.console.sendline(cmd)
self._console_wait()
self._base_file_stem = os.path.splitext(os.path.basename(bngl_file))[0]
class BngFileInterface(BngBaseInterface):
def __init__(self, model=None, verbose=False, output_dir=None,
output_prefix=None, cleanup=True,
model_additional_species=None,
model_population_maps=None):
super(BngFileInterface, self).__init__(
model, verbose, cleanup, output_prefix, output_dir,
model_additional_species=model_additional_species,
model_population_maps=model_population_maps
)
self._init_command_queue()
def _init_command_queue(self):
"""
Initializes the BNG command queue
"""
self.command_queue = StringIO()
self.command_queue.write('begin actions\n')
def __exit__(self, exc_type, exc_val, exc_tb):
"""
In file interface mode, we close the command queue buffer (whether
or not it's been executed) and erase the temporary directory if
applicable.
"""
self.command_queue.close()
if self.cleanup:
self._delete_tmpdir()
def execute(self, reload_netfile=False, skip_file_actions=True):
"""
Executes all BNG commands in the command queue.
Parameters
----------
reload_netfile: bool or str
If true, attempts to reload an existing .net file from a
previous execute() iteration. If a string, the filename
specified in the string is supplied to BNG's readFile (which can be
any file type BNG supports, such as .net or .bngl).
This is useful for running multiple actions in a row,
where results need to be read into PySB before a new series of
actions is executed.
skip_file_actions: bool
Only used if the previous argument is not False. Set this
argument to True to ignore any actions block in the loaded file.
| |
r"""
This module provides the main preprocessor engine.
"""
import sys
import os
import os.path
import shutil
#import re
import datetime
import importlib
import logging
from pylatexenc import latexwalker
from . import __version__
logger = logging.getLogger(__name__)
from .fixes.builtin.remaining_pragmas import ReportRemainingPragmas
from .fixes.builtin.skip import SkipPragma
from ._lpp_parsing import _LPPLatexWalker #, LatexCodeRecomposer, _LPPParsingState
def get_datetime_now_tzaware():
utc_dt = datetime.datetime.now(datetime.timezone.utc)
return utc_dt.astimezone()
_PROCESSED_BY_HEADING = r"""
% Automatically processed by latexpp v{version} on {today}
% See https://github.com/phfaist/latexpp
""".lstrip()
class _TemporarilySetSysPath:
def __init__(self, dir):
self.dir = dir
def __enter__(self):
self.oldsyspath = sys.path
if self.dir:
sys.path = [self.dir] + sys.path
return self
def __exit__(self, typ, value, traceback):
if self.dir:
sys.path = self.oldsyspath
class LatexPreprocessor:
r"""
Main preprocessor class.
This class collects together various fixes and applies them to a LaTeX
document or parts of the document.
Arguments:
- `output_dir` is the folder where the resulting processed document should
be placed.
- `main_doc_fname` is the main document that we need to process.
- `main_doc_output_fname` is the name to give to the processed main document
inside the `output_dir` folder.
- `config_dir` is the root directory (when using the command-line `latexpp`
tool, this is where the `lppconfig.yml` resides). Relative paths
specified to some helpers such as :py:meth:`copy_file()` are interpreted
as relative to this directory.
The fixes can be installed directly via a configuration data structure with
:py:meth:`install_fixes_from_config()` (as extracted from a YaML reader from
a `lppconfig.yml` file, for instance), or fix instances can be installed
manually with :py:meth:`install_fix()`.
Initialization tasks should be run by calling :py:meth:`initialize()` after
all fixes have been installed, but before :py:meth:`execute_main()` (or
friends) are called.
The actual processing is performed by calling one of
:py:meth:`execute_main()`, :py:meth:`execute_file()`, or
:py:meth:`execute_string()`. These parse the corresponding LaTeX code into
nodes and runs all fixes.
After calling the `execute_*()` methods as required, you should call
:py:meth:`finalize()` to finish the processing and carry out final tasks
that the fixes need to do at the end. You'll also get a warning for files
that are in the output directory but that weren't generated by `latexpp`,
etc.
This preprocessor class also exposes several methods that are intended for
individual fixes' convenience. These are :py:meth:`make_latex_walker()`,
:py:meth:`create_subpreprocessor()`, :py:meth:`check_autofile_up_to_date()`,
:py:meth:`register_output_file()`, :py:meth:`copy_file()` and
:py:meth:`open_file()`. See their doc below.
Attributes:
.. py:attribute:: parent_preprocessor
This attribute is used for sub-preprocessors. See
:py:meth:`create_subpreprocessor()`.
Methods:
"""
def __init__(self, *,
output_dir='_latexpp_output',
main_doc_fname=None,
main_doc_output_fname=None,
config_dir=None):
super().__init__()
self.output_dir = os.path.realpath(os.path.abspath(output_dir))
self.main_doc_fname = main_doc_fname
self.main_doc_output_fname = main_doc_output_fname
# directory relative to which to search for custom python fixes:
self.config_dir = config_dir
# version of output_dir for displaying purposes
self.display_output_dir = output_dir.rstrip('/') + '/'
self.latex_context = latexwalker.get_default_latex_context_db()
self.latex_context.add_context_category('latexpp-categories-marker-end',
macros=[], prepend=True)
self.fixes = []
self.initialized = False
self.output_files = []
self.omit_processed_by = False
self.add_preamble_comment_start = '\n%%%\n'
self.add_preamble_comment_end = '\n%%%\n'
# set to non-None if this is a sub-preprocessor of a main preprocessor
self.parent_preprocessor = None
def install_fix(self, fix, *, prepend=False):
r"""
Register the given fix instance to be run after (respectively before if
`prepend=True`) the existing list of fixes.
The type of `fix` must be a subclass of
:py:class:`latexpp.fix.BaseFix`.
"""
# sanity check -- make sure custom fix classes don't forget to call
# their superclass constructor.
if not getattr(fix, '_basefix_constr_called', False):
raise RuntimeError("Fix class {}.{} does not call its superclass constructor"
.format(fix.__class__.__module__, fix.__class__.__name__))
if prepend:
self.fixes.insert(fix, 0)
else:
self.fixes.append(fix)
fix.set_lpp(self)
def install_fixes_from_config(self, lppconfig_fixes):
r"""
Load all the fixes from the given configuration data structure. The
`lppconfig_fixes` are a list of dictionaries with keys 'name' and
'config'. It's the same as what you specify in the `lppconfig.yml` in
the `fixes:` configuration.
This automatically calls `install_fix()` for all the loaded fixes.
"""
for fixconfig in lppconfig_fixes:
if isinstance(fixconfig, str):
fixconfig = {'name': fixconfig}
fixname = fixconfig['name']
modname, clsname = fixname.rsplit('.', maxsplit=1)
# allow package to be in current working directory
with _TemporarilySetSysPath(dir=self.config_dir):
mod = importlib.import_module(modname)
if clsname not in mod.__dict__:
raise ValueError("Module ‘%s’ does not provide a class named ‘%s’"%(
modname, clsname))
cls = mod.__dict__[clsname]
self.install_fix(cls(**fixconfig.get('config', {})))
def initialize(self):
r"""
Perform essential initialization tasks.
Must be called after all fixes are installed, but before
:py:meth:`execute_main()` is called.
"""
logger.debug("initializing preprocessor and fixes")
if not os.path.isdir(self.output_dir):
self._do_ensure_destdir(self.output_dir, self.display_output_dir)
if not self.parent_preprocessor:
self._warn_if_output_dir_nonempty()
for fix in self.fixes:
fix.initialize()
#
# Now check if the fixes have macro/env/specials specs to add. Do this
# after initialize() so that fixes have the opportinity to determine
# what specs they need.
#
for fixn, fix in enumerate(self.fixes):
specs = fix.specs()
if specs:
self.latex_context.add_context_category(
'lppfix{:02d}:{}.{}'.format(fixn, fix.__class__.__module__,
fix.__class__.__name__),
insert_before='latexpp-categories-marker-end',
**specs
)
self.initialized = True
def finalize(self):
r"""
Calls the `finalize()` routine on all fixes. Fixes have the opportunity to
finish up stuff after the document has been processed.
Must be called after :py:meth:`execute_main()` is called.
"""
logger.debug("finalizing preprocessor and fixes")
for fix in self.fixes:
fix.finalize()
if self.parent_preprocessor:
# report other new files
self.parent_preprocessor.output_files += self.output_files
else:
# produce a warning for alien files in output directory
self._warn_alien_files()
def _warn_alien_files(self):
r"""
Check for any files that are in the output directory but that haven't been
generated by us.
"""
our_files_norm = [
os.path.relpath(os.path.realpath(os.path.join(self.output_dir, x)),
self.output_dir)
for x in self.output_files
] # in case output_files has a structure with symlinks, canonicalize
# paths relative to output_dir
logger.debug("Our output files are: %r", our_files_norm)
alien_files = []
for (dirpath, dirnames, filenames) in self._os_walk_output_dir():
for fn in filenames:
ofn = os.path.relpath(os.path.join(dirpath, fn), self.output_dir)
if ofn not in our_files_norm:
alien_files.append(ofn)
if alien_files:
logger.warning("The following files were found in the output directory, "
"but they were not generated by latexpp:\n%s\n",
"\n".join(' {}'.format(x) for x in alien_files))
def execute_main(self):
r"""
Main execution routine. Call this to process the main document with all our
installed fixes.
"""
self.execute_file(self.main_doc_fname,
output_fname=self.main_doc_output_fname)
def _resolve_source_fname(self, fname):
if self.config_dir:
return os.path.join(self.config_dir, fname)
return fname
def execute_file(self, fname, *, output_fname, omit_processed_by=False):
r"""
Process an input file named `fname`, apply all the fixes, and write the
output to `output_fname`. The output file name `output_fname` is
relative to the output directory.
Unless `omit_processed_by` is set to `True`, the output file will start
with a brief comment stating that it was the result of preprocessing by
*latexpp*.
"""
with open(self._resolve_source_fname(fname), 'r') as f:
s = f.read()
outdata = self.execute_string(s, input_source='file ‘{}’'.format(fname))
self.register_output_file(output_fname)
with open(os.path.join(self.output_dir, output_fname), 'w') as f:
f.write(outdata)
def execute_string(self, s, *, pos=0, input_source=None, omit_processed_by=False):
r"""
Parse the string `s` as LaTeX code, apply all installed fixes, and return
the preprocessed LaTeX code.
The `input_source` argument is a short descriptive string of the source
of the LaTeX content for error messages (e.g., the file name).
Unless `omit_processed_by` is set to `True`, the output file will start
with a brief comment stating that it was the result of preprocessing by
*latexpp*.
"""
if self.omit_processed_by:
omit_processed_by = True
lw = self.make_latex_walker(s)
try:
(nodelist, pos, len_) = lw.get_latex_nodes(pos=pos)
except latexwalker.LatexWalkerParseError as e:
if input_source and not e.input_source:
e.input_source = input_source
raise
newnodelist = self.preprocess(nodelist)
newstr = ''.join(n.to_latex() for n in newnodelist)
if not omit_processed_by:
return (
_PROCESSED_BY_HEADING.format(
version=__version__,
today=get_datetime_now_tzaware().strftime("%a, %d-%b-%Y %H:%M:%S %Z%z")
)
+ newstr
)
return newstr
def preprocess(self, nodelist):
r"""
Run all the installed fixes on the given list of nodes `nodelist`.
"""
if not self.initialized:
raise RuntimeError("You forgot to call LatexPreprocessor.initialize()")
newnodelist = list(nodelist)
#
# Execute %%!lpp skip pragmas as a built-in fix before all other fixes
#
skip_pragma_fix = SkipPragma()
skip_pragma_fix.set_lpp(self)
newnodelist = skip_pragma_fix.preprocess(newnodelist)
#
# do add_preamble if necessary
#
for j in range(len(newnodelist)):
n = newnodelist[j]
if n is not None and n.isNodeType(latexwalker.LatexEnvironmentNode) \
and n.environmentname == 'document':
# here is where we should insert preamble instructions.
add_preamble = ''
for fix in self.fixes:
p = fix.add_preamble()
if p:
add_preamble += p
if not add_preamble.strip():
# no preamble to add, all ok
break
add_preamble = self.add_preamble_comment_start + add_preamble + \
self.add_preamble_comment_end
# and insert preamble before document. TODO: mark nodes with
# "lpp_ignore" to inhibit further processing; see TODO below.
try:
lw = self.make_latex_walker(add_preamble)
preamble_nodes = lw.get_latex_nodes()[0]
except latexwalker.LatexWalkerParseError as e:
logger.error("Internal error: can't parse latex code that "
"fixes want to include:\n%r\n%s", s, e)
raise
newnodelist[j:j] | |
"when calling `create_manual_journal_history_record`"
)
# verify the required parameter 'history_records' is set
if history_records is None:
raise ValueError(
"Missing the required parameter `history_records` "
"when calling `create_manual_journal_history_record`"
)
collection_formats = {}
path_params = {
"ManualJournalID": manual_journal_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = history_records
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/ManualJournals/{ManualJournalID}/History")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="HistoryRecords",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_manual_journal_history_record"
)
def create_manual_journals(
self,
xero_tenant_id,
manual_journals,
summarize_errors=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates one or more manual journals # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param ManualJournals manual_journals: ManualJournals array with ManualJournal object in body of request (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ManualJournals
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_manual_journals`"
)
# verify the required parameter 'manual_journals' is set
if manual_journals is None:
raise ValueError(
"Missing the required parameter `manual_journals` "
"when calling `create_manual_journals`"
)
collection_formats = {}
path_params = {}
query_params = []
if summarize_errors is not empty:
query_params.append(("summarizeErrors", summarize_errors))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = manual_journals
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/ManualJournals")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ManualJournals",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_manual_journals")
def create_overpayment_allocations(
self,
xero_tenant_id,
overpayment_id,
allocations,
summarize_errors=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a single allocation for a specific overpayment # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str overpayment_id: Unique identifier for a Overpayment (required)
:param Allocations allocations: Allocations array with Allocation object in body of request (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Allocations
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_overpayment_allocations`"
)
# verify the required parameter 'overpayment_id' is set
if overpayment_id is None:
raise ValueError(
"Missing the required parameter `overpayment_id` "
"when calling `create_overpayment_allocations`"
)
# verify the required parameter 'allocations' is set
if allocations is None:
raise ValueError(
"Missing the required parameter `allocations` "
"when calling `create_overpayment_allocations`"
)
collection_formats = {}
path_params = {
"OverpaymentID": overpayment_id,
}
query_params = []
if summarize_errors is not empty:
query_params.append(("summarizeErrors", summarize_errors))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = allocations
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Overpayments/{OverpaymentID}/Allocations")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Allocations",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_overpayment_allocations"
)
def create_overpayment_history(
self,
xero_tenant_id,
overpayment_id,
history_records,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a history record for a specific overpayment # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str overpayment_id: Unique identifier for a Overpayment (required)
:param HistoryRecords history_records: HistoryRecords containing an array of HistoryRecord objects in body of request (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: HistoryRecords
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_overpayment_history`"
)
# verify the required parameter 'overpayment_id' is set
if overpayment_id is None:
raise ValueError(
"Missing the required parameter `overpayment_id` "
"when calling `create_overpayment_history`"
)
# verify the required parameter 'history_records' is set
if history_records is None:
raise ValueError(
"Missing the required parameter `history_records` "
"when calling `create_overpayment_history`"
)
collection_formats = {}
path_params = {
"OverpaymentID": overpayment_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = history_records
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Overpayments/{OverpaymentID}/History")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="HistoryRecords",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_overpayment_history")
def create_payment(
self,
xero_tenant_id,
payment,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a single payment for invoice or credit notes # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Payment payment: Request body with a single Payment object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Payments
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_payment`"
)
# verify the required parameter 'payment' is set
if payment is None:
raise ValueError(
"Missing the required parameter `payment` "
"when calling `create_payment`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = payment
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Payments")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Payments",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_payment")
def create_payment_history(
self,
xero_tenant_id,
payment_id,
history_records,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a history record for a specific payment # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str payment_id: Unique identifier for a Payment (required)
:param HistoryRecords history_records: HistoryRecords containing an array of HistoryRecord objects in body of request (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: HistoryRecords
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_payment_history`"
)
# verify the required parameter 'payment_id' is set
if payment_id is None:
raise ValueError(
"Missing the required parameter `payment_id` "
"when calling `create_payment_history`"
)
# verify the required parameter 'history_records' is set
if history_records is None:
raise ValueError(
"Missing the required parameter `history_records` "
"when calling `create_payment_history`"
)
collection_formats = {}
path_params = {
"PaymentID": payment_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = history_records
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Payments/{PaymentID}/History")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="HistoryRecords",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_payment_history")
def create_payment_service(
self,
xero_tenant_id,
payment_services,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a payment service # noqa: E501
OAuth2 scope: paymentservices
:param str | |
<gh_stars>1-10
import torch
import sys, math
import numpy as np
import torch.jit as jit
import warnings
import torch.nn.init as init
import torch.nn as nn
from distutils.util import strtobool
from models.base import Model
from models.utils import *
from models.ssm.ssm import TransitionFunction
from models.ssm.inference import RNN_STInf, Attention_STInf
from models.iefs.gated import GatedTransition
from models.iefs.moe import MofE
from pyro.distributions import Normal, Independent, Categorical, LogNormal
from typing import List, Tuple
from torch import Tensor
from collections import namedtuple
from typing import List, Tuple
from torch.autograd import Variable
from argparse import ArgumentParser
class SDMM_InferenceNetwork(nn.Module):
def __init__(self, dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, dim_subtype, post_approx = 'diag', \
rank = 5, use_bn = False, nl = 'tanh', combiner_type = 'standard', bidirectional = False):
super(SDMM_InferenceNetwork, self).__init__()
self.dim_base = dim_base
self.dim_data = dim_data
self.dim_treat = dim_treat
self.dim_hidden = dim_hidden
self.dim_stochastic = dim_stochastic
self.dim_subtype = dim_subtype
self.use_bn = use_bn
if self.use_bn:
print ('using bn in inf. network')
self.bn = nn.LayerNorm(dim_hidden, elementwise_affine=False)
if nl == 'relu':
print ('using relu in inf. network')
self.nonlinearity = torch.relu
else:
print ('using tanh in inf. network')
self.nonlinearity = torch.tanh
self.inf_rnn = nn.GRU(dim_data+dim_treat, dim_hidden, 1, batch_first = True, bidirectional=bidirectional)
self.hid_zg_b = nn.Linear(dim_base+dim_data+dim_treat, dim_hidden)
self.base_h1 = nn.Linear(dim_base+dim_data+dim_treat, dim_hidden)
# combiner type, posterior approximation type, and rank
self.combiner_type = combiner_type
self.post_approx= post_approx
self.rank = rank
if bidirectional:
self.hid_zg = nn.Linear(dim_hidden*2, dim_hidden)
self.hid_rnn_zt = nn.Linear(dim_hidden*2, dim_hidden)
else:
self.hid_zg = nn.Linear(dim_hidden, dim_hidden)
self.hid_rnn_zt = nn.Linear(dim_hidden, dim_hidden)
self.mu_zg = nn.Linear(dim_hidden, dim_subtype)
self.sigma_zg = nn.Linear(dim_hidden, dim_subtype)
self.hid_zg_zt = nn.Linear(dim_subtype, dim_hidden)
self.hid_ztm1_zt= nn.Linear(dim_stochastic, dim_hidden)
if self.combiner_type == 'standard' or self.combiner_type == 'masked':
self.mu_z1 = nn.Linear(dim_hidden, dim_stochastic)
self.mu_zt = nn.Linear(dim_hidden, dim_stochastic)
if self.post_approx == 'diag':
self.sigma_z1 = nn.Linear(dim_hidden, dim_stochastic)
self.sigma_zt = nn.Linear(dim_hidden, dim_stochastic)
elif self.post_approx == 'low_rank':
self.sigma_z1 = nn.Linear(dim_hidden, (dim_stochastic*rank)+dim_stochastic)
self.sigma_zt = nn.Linear(dim_hidden, (dim_stochastic*rank)+dim_stochastic)
else:
raise ValueError('bad setting for post_approx:'+str(post_approx))
elif self.combiner_type == 'pog':
assert self.post_approx == 'diag','bad post_approx'
self.mu_zt = nn.Linear(dim_hidden, dim_stochastic)
self.sigma_zt = nn.Linear(dim_hidden, dim_stochastic)
self.mu_zt2 = nn.Linear(dim_hidden, dim_stochastic)
self.sigma_zt2 = nn.Linear(dim_hidden, dim_stochastic)
self.mu_zt3 = nn.Linear(dim_hidden, dim_stochastic)
self.sigma_zt3 = nn.Linear(dim_hidden, dim_stochastic)
else:
raise ValueError('Bad assignment to inference_type')
def reparam_dist(self, mu, sigma):
if self.post_approx == 'diag':
dist = Independent(Normal(mu, sigma), 1)
elif self.post_approx == 'low_rank':
if sigma.dim()==2:
W = sigma[...,self.dim_stochastic:].view(sigma.shape[0], self.dim_stochastic, self.rank)
elif sigma.dim()==3:
W = sigma[...,self.dim_stochastic:].view(sigma.shape[0], sigma.shape[1], self.dim_stochastic, self.rank)
else:
raise NotImplemented()
D = sigma[...,:self.dim_stochastic]
dist = LowRankMultivariateNormal(mu, W, D)
else:
raise ValueError('should not be here')
return torch.squeeze(dist.rsample((1,))), dist
def pogN(self, muN, sigN):
sigsqN = [sig.pow(2)+1e-8 for sig in sigN]
sigsqNm1 = [np.prod(sigsqN[:i] + sigsqN[i+1:]) for i,sig in enumerate(sigsqN)]
sigmasq = np.prod(sigsqN) / np.sum(sigsqNm1)
muDsigsq = [muN[i] / sigsqN[i] for i in range(len(muN))]
mu = (np.sum(muDsigsq))*sigmasq
sigma = sigmasq.pow(0.5)
return mu, sigma
def combiner_fxn(self, prev_hid, current_hid, rnn_mask, mu1fxn, sig1fxn, mu2fxn = None, \
sig2fxn = None, mu3fxn = None, sig3fxn = None, global_hid = None):
if self.combiner_type =='standard' or self.combiner_type == 'masked':
if self.combiner_type == 'standard':
if global_hid is not None:
out = 1/3.*(prev_hid+current_hid+global_hid)
else:
out = 0.5*(prev_hid+current_hid)
else:
if global_hid is not None:
out = rnn_mask*(1/3.*(prev_hid+current_hid+global_hid)) + (1-rnn_mask)*prev_hid
else:
out = rnn_mask*(0.5*(prev_hid+current_hid)) + (1-rnn_mask)*prev_hid
if self.use_bn:
h1 = self.nonlinearity(self.bn(out))
else:
h1 = self.nonlinearity(out)
mu, sigma = mu1fxn(h1), torch.nn.functional.softplus(sig1fxn(h1))
elif self.combiner_type == 'pog':
if self.use_bn:
h1 = self.nonlinearity(self.bn(prev_hid))
h2 = self.nonlinearity(self.bn(current_hid))
if global_hid is not None:
h3 = self.nonlinearity(self.bn(global_hid))
else:
h1 = self.nonlinearity(prev_hid)
h2 = self.nonlinearity(current_hid)
if global_hid is not None:
h3 = self.nonlinearity(global_hid)
mu1, sig1 = mu1fxn(h1), torch.nn.functional.softplus(sig1fxn(h1))
mu2, sig2 = mu2fxn(h2), torch.nn.functional.softplus(sig2fxn(h2))
muN = [mu1, mu2]; sigN = [sig1, sig2]
if global_hid is not None:
mu3, sig3 = mu3fxn(h3), torch.nn.functional.softplus(sig3fxn(h3))
muN.append(mu3); sigN.append(sig3)
mu, sigma = self.pogN(muN, sigN)
else:
raise ValueError('bad combiner type')
return mu, sigma
def forward(self, x, a, m, b):
rnn_mask = (m[:,1:].sum(-1)>1)*1.
inp = torch.cat([x[:,1:,:], a[:,:-1,:]], -1)
m_t, _, lens = get_masks(m[:,1:,:])
pdseq = torch.nn.utils.rnn.pack_padded_sequence(inp, lens, batch_first=True, enforce_sorted = False)
out_pd, _ = self.inf_rnn(pdseq)
out, _ = torch.nn.utils.rnn.pad_packed_sequence(out_pd, batch_first=True)
# Infer global latent variable
hid_zg = torch.tanh(self.hid_zg(out).sum(1)/lens[...,None] + self.hid_zg_b(torch.cat([b, x[:,0,:], a[:,0,:]],-1)))
zg_mu = self.mu_zg(hid_zg)
zg_sigma = torch.nn.functional.softplus(self.sigma_zg(hid_zg))
q_zg = Independent(Normal(zg_mu, zg_sigma), 1)
Z_g = torch.squeeze(q_zg.rsample((1,)))
# Infer per-time-step variables in the DMM
hid_zg_zt = self.hid_zg_zt(Z_g)
hid_rnn_zt = self.hid_rnn_zt(out)
hid_base = self.base_h1(torch.cat([x[:,0,:], b, a[:,0,:]],-1)) ## test this out
if self.combiner_type == 'standard' or self.combiner_type == 'masked':
mu, sigma = self.combiner_fxn(hid_base, hid_rnn_zt[:,0,:], rnn_mask[:,[0]], self.mu_z1, self.sigma_z1, global_hid=hid_zg_zt) # change to self.mu_zt, self.sigma_zt if necessary
else:
mu, sigma = self.combiner_fxn(hid_base, hid_rnn_zt[:,0,:], rnn_mask[:,[0]], self.mu_zt, self.sigma_zt, \
self.mu_zt2, self.sigma_zt2, self.mu_zt3, self.sigma_zt3, global_hid=hid_zg_zt)
z, _ = self.reparam_dist(mu, sigma)
meanlist = [mu[:,None,:]]
sigmalist= [sigma[:,None,:]]
zlist = [z[:,None,:]]
for t in range(1, out.shape[1]):
ztm1 = torch.squeeze(zlist[t-1])
hid_ztm1_zt= self.hid_ztm1_zt(ztm1)
if self.combiner_type == 'standard' or self.combiner_type == 'masked':
mu, sigma = self.combiner_fxn(hid_ztm1_zt, hid_rnn_zt[:,t,:], rnn_mask[:,[t]], self.mu_zt, self.sigma_zt, global_hid = hid_zg_zt)
else:
mu, sigma = self.combiner_fxn(hid_ztm1_zt, hid_rnn_zt[:,t,:], rnn_mask[:,[t]], self.mu_zt, self.sigma_zt, \
self.mu_zt2, self.sigma_zt2, self.mu_zt3, self.sigma_zt3, global_hid = hid_zg_zt)
z, _ = self.reparam_dist(mu, sigma)
meanlist += [mu[:,None,:]]
sigmalist += [sigma[:,None,:]]
zlist += [z[:,None,:]]
# q_zt = Independent(Normal(torch.cat(meanlist, 1), torch.cat(sigmalist, 1)), 1)
_,q_zt = self.reparam_dist(torch.cat(meanlist, 1), torch.cat(sigmalist, 1))
Z_t = torch.cat(zlist, 1)
return Z_g, q_zg, Z_t, q_zt
class TransitionFxnSDMM(TransitionFunction):
def __init__(self, dim_stochastic, dim_subtype, dim_data, dim_treat, dim_hidden, ttype, dim_base=0, augmented=False, alpha1_type='linear', otype='linear', add_stochastic=False):
super(TransitionFxnSDMM, self).__init__(dim_stochastic, dim_data, dim_treat, dim_hidden, ttype, augmented)
self.augmented = augmented
dim_treat = dim_treat + dim_base
if self.augmented:
dim_input = dim_stochastic+dim_data
else:
dim_input = dim_stochastic
pre_mu_sig = ['lin', 'contractive', 'monotonic', 'logcell', 'logcellkill', 'treatment_exp', 'gated', 'syn_trt']
if self.ttype in pre_mu_sig:
self.pre_t_mu = nn.Linear(dim_subtype, dim_stochastic)
self.pre_t_sigma = nn.Linear(dim_subtype, dim_stochastic)
if self.ttype == 'lin':
self.t_mu = nn.Linear(dim_input+dim_subtype+dim_treat, dim_stochastic)
self.t_sigma = nn.Linear(dim_input+dim_subtype+dim_treat, dim_stochastic)
elif self.ttype == 'nl':
self.pre_t_mu = nn.Sequential(nn.Linear(dim_subtype, dim_hidden), nn.ReLU(True), nn.Linear(dim_hidden, dim_stochastic))
self.pre_t_sigma = nn.Sequential(nn.Linear(dim_subtype, dim_hidden), nn.ReLU(True), nn.Linear(dim_hidden, dim_stochastic))
tmodel = nn.Sequential(nn.Linear(dim_input+dim_subtype+dim_treat, dim_hidden),nn.ReLU(True))
self.t_mu = nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic))
self.t_sigma = nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic))
elif self.ttype == 'monotonic':
self.t_mu = MonotonicLayer(dim_stochastic, dim_treat, sign='positive')
self.t_sigma = nn.Linear(dim_stochastic+dim_treat, dim_stochastic)
elif self.ttype == 'logcell':
self.t_mu = LogCellTransition(dim_stochastic, dim_treat)
self.t_sigma = nn.Linear(dim_stochastic+dim_treat, dim_stochastic)
elif 'logcellkill' in self.ttype:
self.t_mu = LogCellKill(dim_stochastic, dim_treat, mtype=self.ttype)
self.t_sigma = nn.Linear(dim_stochastic+dim_treat, dim_stochastic)
elif self.ttype=='treatment_exp':
self.t_mu = TreatmentExponential(dim_stochastic, dim_treat)
self.t_sigma = nn.Linear(dim_stochastic+dim_treat, dim_stochastic)
elif self.ttype=='gated':
avoid_init = False
if dim_data != 16:
avoid_init = True
self.t_mu = GatedTransition(dim_stochastic, dim_treat, dim_hidden=dim_hidden, dim_subtype=dim_subtype, \
dim_input=dim_input+dim_subtype+dim_treat, avoid_init = avoid_init, otype=otype, alpha1_type=alpha1_type, add_stochastic=add_stochastic)
tmodel = nn.Sequential(nn.Linear(dim_input+dim_subtype+dim_treat, dim_hidden),nn.ReLU(True))
self.t_sigma = nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic))
# self.t_sigma = nn.Linear(dim_subtype+dim_input+dim_treat, dim_stochastic)
elif self.ttype=='debug':
self.t_mu = Debug(dim_stochastic, dim_treat, dim_base)
self.t_sigma = nn.Linear(dim_stochastic+dim_treat, dim_stochastic)
elif self.ttype=='syn_trt':
self.t_mu = SyntheticTrtTransition(dim_stochastic, dim_treat)
self.t_sigma = nn.Linear(dim_stochastic+dim_treat, dim_stochastic)
else:
raise ValueError('bad ttype')
def apply(self, fxn, z, u, eps=0.):
if 'Monotonic' in fxn.__class__.__name__ or 'LogCellTransition' in fxn.__class__.__name__ or 'LogCellKill' in fxn.__class__.__name__ or 'TreatmentExp' in fxn.__class__.__name__ or 'GatedTransition' in fxn.__class__.__name__ or 'Synthetic' in fxn.__class__.__name__:
return fxn(z, u)
else:
return fxn(z)
def get_prior_global(self):
return self.pre_t_mu, self.pre_t_sigma
class SDMM(Model):
def __init__(self, trial, **kwargs):
super(SDMM, self).__init__(trial)
self.save_hyperparameters()
def init_model(self):
dim_subtype = self.trial.suggest_categorical('dim_subtype',[4,16,48])
dim_stochastic = self.trial.suggest_int('dim_stochastic',16,128)
dim_hidden = self.trial.suggest_int('dim_hidden',100,500)
dim_base = self.hparams.dim_base
dim_data = self.hparams.dim_data
dim_treat = self.hparams.dim_treat
ttype = self.hparams.ttype
etype = self.hparams.etype
inftype = self.hparams.inftype
post_approx = self.hparams.post_approx
self.include_baseline= self.hparams.include_baseline
self.elbo_samples = self.hparams.elbo_samples
self.augmented = self.hparams.augmented
self.fixed_var = None
alpha1_type = self.hparams.alpha1_type
otype = self.hparams.otype
add_stochastic = self.hparams.add_stochastic
rank = self.hparams.rank; combiner_type = self.hparams.combiner_type
# Inference network
if inftype == 'rnn':
self.inf_network = SDMM_InferenceNetwork(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, dim_subtype, \
post_approx = post_approx, rank = rank, combiner_type = combiner_type)
elif inftype=='rnn_bn':
self.inf_network = SDMM_InferenceNetwork(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, dim_subtype, \
post_approx = post_approx, rank = rank, use_bn = True, combiner_type = combiner_type)
elif inftype=='rnn_relu':
self.inf_network = SDMM_InferenceNetwork(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, dim_subtype, \
post_approx = post_approx, rank = rank, nl = 'relu', combiner_type = combiner_type)
elif inftype=='rnn_relu_bi':
self.inf_network = SDMM_InferenceNetwork(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, dim_subtype, \
post_approx = post_approx, rank = rank, nl = 'relu', combiner_type = combiner_type, bidirectional=True)
else:
raise ValueError('Bad inference type')
# Emission function
if etype == 'lin':
self.e_mu = nn.Linear(dim_stochastic+dim_subtype, dim_data)
self.e_sigma= nn.Linear(dim_stochastic+dim_subtype, dim_data)
elif etype == 'nl':
emodel = nn.Sequential(nn.Linear(dim_stochastic+dim_subtype, dim_hidden), nn.ReLU(True))
self.e_mu = nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data))
self.e_sigma= nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data))
elif etype == 'identity':
self.e_mu = nn.Sequential()
self.e_sigma | |
# -- coding: UTF-8 --
from __future__ import print_function, division
import bisect
# coding=utf8
# pi = 3.14
# radius = float(raw_input("radius:"))
# area = pi * radius ** 2
# print area
# input = raw_input()
# print input * 3
# weight = float(raw_input('weight:'))
# height = float(raw_input('height:'))
# bmi = weight / height ** 2
#
# print format(bmi, '.2f')
# userInput = int(raw_input())
#
# hour = userInput / 60 ** 2
# minites = userInput / 60 - hour * 60
# seconds = userInput % 60
#
# print hour, minites, seconds
#
# import math
#
# a = float(raw_input())
# b = float(raw_input())
# c = float(raw_input())
#
# C = math.acos((a ** 2 + b ** 2 - c ** 2) / (2 * a * b))
#
# print format(C * 180 / math.pi, '.1f')
#
# i = 0
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
# i = (i + 1000) * (1 + 0.047)
#
# print i
# points = int(raw_input("领先的分数:"))
# is_in_control = raw_input("是否领先队控球(Y/N:)")
# last_seconds = int(raw_input("比赛剩余秒数:"))
#
# points -= 3
#
# if is_in_control == 'Y':
# points += 0.5
# else:
# points -= 0.5
#
# if points < 0:
# points = 0
#
#
# points = points ** 2
#
# if points > last_seconds:
# print "safe"
# else:
# print "unsafe"
#
#
# n = 5
# while n > 0:
# n -= 1
# if n < 3:
# break
# else:
# print n
#
# print n
#
# e = 1
# factorial = 1
# for i in range(1, 10):
# factorial *= i
# e += 1.0 / factorial
#
# print e
#
#
#
#
# pi = 0;
#
# for i in range(1,1000000):
# pi += (-1.0)**(i + 1) / (2 * i - 1)
#
# pi *= 4;
#
# print pi
#
#
# count = 0
# for i in range(100 ,1000):
# if not i % 17:
# count += 1
#
# print count
#
# n = 6
#
# while n != 1 :
# if n % 2 == 0 :
# n /= 2
# else:
# n = 3 * n + 1
# print n,
# for i in range(1 ,10):
# for j in range(1 ,10):
# if i >= j:
# print format(i * j, '3'),
# print
# while True:
# for x in range(6):
# y = 2 * x + 1
# if y > 9:
# break
# import math
#
# n = int(raw_input())
# sum = 0
#
# def is_prime(num):
# if num <= 1:
# return False
#
# for i in range(2, int(math.sqrt(num)) + 1):
# if num % i == 0:
# return False
# return True
#
#
# for i in range(n):
# if is_prime(i):
# sum += i
# print sum
# def is_runniam(year):
# if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
# return True
# return False
#
# def days_per_month(month,year):
# if month == 0:
# year -= 1
# month = 12
# if month == 1 \
# or month == 3 \
# or month == 5 \
# or month == 7 \
# or month == 8 \
# or month == 10 \
# or month == 12:
# return 31
# elif month == 2:
# if is_runniam(year):
# return 29
# return 28
# else:
# return 30
#
# last_day = 6
# count = 0
# for year in range(1990, 2001):
# for month in range(1, 13):
#
# total_days = days_per_month(month - 1,year) + last_day
# if last_day == 0 and days_per_month(month -1 ,year) == 28:
# count += 1
# last_day = 0
# elif total_days == 35:
# count += 1
# last_day = 0
# else:
# if total_days > 35:
# last_day = total_days - 35
# else:
# last_day = total_days - 28
# print count
#
# import calendar
# count = 0
# for year in range(1901,2001):
# for month in range(1,13):
# if calendar.monthcalendar(year,month)[0].index(1) == 6:
# count += 1
# print count
#
# 题目内容:
# 数字197可以被称为循环素数,因为197的三个数位循环移位后的数字:197,971,719均为素数。100以内这样的数字包括13个
# ,2,3,5,7,11,13,17,31,37,71,73,79,97。要求任意正整数n以内一共有多少个这样的循环素数。
#
# 输入格式:
# 一个正整数n。
#
# 输出格式:
# n以内循环素数的数目。
#
# 输入样例:
# 100
#
# 输出样例:
# 13
import math
#
# def shift_number(number):
# if number < 10:
# return number
# temp = number / 10
# shifter = number % 10
# shift_one = shifter * 10 + temp
# return shift_one
#
#
#
# def is_prime(number):
# if number <= 1:
# return False
# for i in range(2 , int(math.sqrt(number)) + 1):
# if number % i == 0:
# return False
# return True
#
# def is_shift_prime(number):
# if number < 10 and is_prime(number):
# return True
#
# real_number = number
# if is_prime(real_number):
#
# for i in range(len(list(str(number))) - 1):
# number = shift_number(number)
# if is_prime(number):
# return True
# return False
#
#
# def counter(number):
# count = 0
# for i in range(1, number+1):
# if is_shift_prime(i):
# count += 1
# return count
#
# n = int(raw_input())
# print counter(n)
# 字符串
# fruit = 'banana'
# letter = fruit[1]
# print letter
# print len(fruit)
# print fruit[-1]
# 遍历字符串
# index = 0
# while index < len(fruit):
# letter = fruit[index]
# print letter
# index += 1
# for char in fruit:
# print char
# prefixes = 'JKLMNOPQ'
# suffix = 'ack'
#
# for letter in prefixes:
# print letter + suffix
# 字符串切片
# s = '<NAME>'
# print s[0:5]
# print s[6:12]
# [n:m]操作符返回从第n个到第m个字符串的部分字符串
# 包括第一个,但是不包括最后一个。
# 如果你省略第一个索引,切片起始与0,如果省略最后一个起始于结尾。
# 如果n>m结果是空字符串
# greeting = 'Hello World'
# new_greeting = 'J' + greeting[1:]
# print new_greeting
# searching 搜索
#
def find(word, letter):
index = 0
while index < len(word):
if word[index] == letter:
return index
index += 1
return -1
# print find('hello', 'm')
# 循环和计数
#
# word = 'banana'
# count = 0
# for letter in word:
# if letter == "a":
# count += 1
# print count
# 字符串相关方法
#
# word = 'banana'
# new_word = word.upper()
# # print new_word
# index = word.find('a')
# print index
# print word.find('an', 3, 5)
# 参数: 需要查找的字符(串),从第n个开始查找,查找终点
# in 运算符
# print 'a' in 'banana'
# 字符串比较
#
# if word == 'banana':
# print 'equal'
# > < 运算符,相当于 ASCII 排序
###############################
#
#
# 案例分析
#
#
###############################
# 读取单词列表
# fin = open('words.txt')
# print fin
# readline, 其从文件中读取字符直到到达新行并 将结果作为字符串返回:
# print fin.readline()
# line = fin.readline()
# word = line.strip()
# print word
# for line in fin:
# word = line.strip()
# print word
#
#
#
# line = fin.readline()
# line = fin.readline()
# print line
# 搜索
def has_no_e(word):
for letter in word:
if letter == 'e':
return False
return True
# 只要有一个字符在 forbidden 中,都返回真。
# 模糊查找
def avoids(word, forbidden):
for letter in word:
if letter in forbidden:
return False
return True
# 只有 word 中所有的字符都能在 available 中找到,才为真。
# 这个方法可以用来排除非法字符
def uses_only(word, available):
for letter in word:
if letter not in available:
return False
return True
# word 是否含有 required 中的所有字符
# 验证一个字符必须含有某些字符串
def uses_all(word, required):
for letter in required:
if letter not in word:
return False
return True
# print has_no_e("hello")
# print avoids('hello', 'm')
# print uses_only('hello', 'hello')
# print uses_all('Hello', 'ol')
# 字符串是否递增
#
# def is_abecedarian(word):
# previous = word[0]
# for c in word:
# if c < previous:
# return False
# previous = c
# return True
#
# print is_abecedarian('abcb')
#
#
# def is_abecedarian(word):
# if len(word) <= 1:
# return True
# if word[0] > word[1]:
# return False
# return is_abecedarian(word[1:])
#
# print is_abecedarian('abcb')
#
#
# def is_abecedarian(word):
# i = 0
# while i < len(word)-1:
# if word[i + 1] < word[i]:
# return False
# i += 1
# return True
#
# print is_abecedarian('abc')
# list 列表
# a = 10
# b = 'str'
# lists = [a, b]
# a = 9
# print lists
# 列表方法
# t = ['a', 'b', 'c']
# t.append('d')
# print t
#
# t2 = ['e' ,'f']
# t.extend(t2)
# print t
# t.sort()
# print t
########
# map, filter, reduce 映射,过滤,削减
########
def add_all(t):
total = 0
for x in t:
total += x
return total
t = [1, 2, 3]
# print add_all(t)
# print sum(t)
#
def capitalize_all(t):
res = []
for s in t:
res.append(s.capitalize())
return res
def only_upper(t):
res = []
for s in t:
if s.isupper():
res.append(s)
return res
# print only_upper('asdF')
# pop 方法 改变列表并返回被删除的元素。如果不提供索引,删除并返回最后一个元素。
# del 运算符 如果不需要被删除的值,使用这个
# remove 方法 删除不知道索引的元素, 只删除第一个
# 以上方法运算符都可以使用切片
# t = ['a', 'b', 'c']
# del t[1]
# print t.pop(1)
# print t
# t = ['a', 'b', 'c', 'a']
# t.remove('a')
# print t
# 使用 list 函数将 str 转成 list
# s = 'spam'
# t = list(s)
# print t
# 使用split 方法将字符串分成单词
# s = 'pinging for the fjords'
# t = s.split()
# print t
# # 分隔符
# s = 'spam-spam-spsm'
# delimiter = '-'
# s.split(delimiter)
# print s.split(delimiter)
#
# # join 方法 和 split 相反。他接受一个字符串的列表,并将元素串联起来。
# t = ['pinging', 'for', 'the', 'fjords']
# delimiter = ' '
# print delimiter.join(t)
# a = [12, 14, 15]
# b = a
# b[0] = 19
# print a
#
#
# c = 'aaaa'
# d = c
# d = 'bb'
# print c
# def delete_head(t):
# del t[0]
#
# | |
if not rev:
ant_str += '; ' + enzyme + 'kc' + str(reaction_index) + '*S' + str(r[1][0]) + enzyme_end
kc.append('kc' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf' + str(reaction_index) + '*S' + str(r[1][0]) \
+ ' - kr' + str(reaction_index) + '*S' + str(r[2][0]) + enzyme_end
kf.append('kf' + str(reaction_index))
kr.append('kr' + str(reaction_index))
if r[0] == TReactionType.BIUNI:
# BiUni
ant_str += 'S' + str(r[1][0])
ant_str += ' + '
ant_str += 'S' + str(r[1][1])
ant_str += ' -> '
ant_str += 'S' + str(r[2][0])
rev = reversibility()
if not rev:
ant_str += '; ' + enzyme + 'kc' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + enzyme_end
kc.append('kc' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + ' - kr' + str(reaction_index) + '*S' \
+ str(r[2][0]) + enzyme_end
kf.append('kf' + str(reaction_index))
kr.append('kr' + str(reaction_index))
if r[0] == TReactionType.UNIBI:
# UniBi
ant_str += 'S' + str(r[1][0])
ant_str += ' -> '
ant_str += 'S' + str(r[2][0])
ant_str += ' + '
ant_str += 'S' + str(r[2][1])
rev = reversibility()
if not rev:
ant_str += '; ' + enzyme + 'kc' + str(reaction_index) + '*S' + str(r[1][0]) + enzyme_end
kc.append('kc' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf' + str(reaction_index) + '*S' + str(r[1][0]) \
+ ' - kr' + str(reaction_index) + '*S' + str(r[2][0]) \
+ '*S' + str(r[2][1]) + enzyme_end
kf.append('kf' + str(reaction_index))
kr.append('kr' + str(reaction_index))
if r[0] == TReactionType.BIBI:
# BiBi
ant_str += 'S' + str(r[1][0])
ant_str += ' + '
ant_str += 'S' + str(r[1][1])
ant_str += ' -> '
ant_str += 'S' + str(r[2][0])
ant_str += ' + '
ant_str += 'S' + str(r[2][1])
rev = reversibility()
if not rev:
ant_str += '; ' + enzyme + 'kc' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + enzyme_end
kc.append('kc' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + ' - kr' + str(reaction_index) + '*S' \
+ str(r[2][0]) + '*S' + str(r[2][1]) + enzyme_end
kf.append('kf' + str(reaction_index))
kr.append('kr' + str(reaction_index))
ant_str += '\n'
parameter_index = None
if 'deg' in kinetics[2]:
reaction_index += 1
parameter_index = reaction_index
for sp in floating_ids:
ant_str += 'J' + str(reaction_index) + ': S' + str(sp) + ' ->; ' + 'k' \
+ str(reaction_index) + '*' + 'S' + str(sp) + '\n'
reaction_index += 1
ant_str += '\n'
if kinetics[1] == 'trivial':
for each in kf:
ant_str += each + ' = 1\n'
for each in kr:
ant_str += each + ' = 1\n'
for each in kc:
ant_str += each + ' = 1\n'
if kinetics[1] == 'uniform':
for each in kf:
const = uniform.rvs(loc=kinetics[3][kinetics[2].index('kf')][0],
scale=kinetics[3][kinetics[2].index('kf')][1]
- kinetics[3][kinetics[2].index('kf')][0])
ant_str += each + ' = ' + str(const) + '\n'
for each in kr:
const = uniform.rvs(loc=kinetics[3][kinetics[2].index('kr')][0],
scale=kinetics[3][kinetics[2].index('kr')][1]
- kinetics[3][kinetics[2].index('kr')][0])
ant_str += each + ' = ' + str(const) + '\n'
for each in kc:
const = uniform.rvs(loc=kinetics[3][kinetics[2].index('kc')][0],
scale=kinetics[3][kinetics[2].index('kc')][1]
- kinetics[3][kinetics[2].index('kc')][0])
ant_str += each + ' = ' + str(const) + '\n'
if kinetics[1] == 'loguniform':
for each in kf:
const = loguniform.rvs(kinetics[3][kinetics[2].index('kf')][0],
kinetics[3][kinetics[2].index('kf')][1])
ant_str += each + ' = ' + str(const) + '\n'
for each in kr:
const = loguniform.rvs(kinetics[3][kinetics[2].index('kr')][0],
kinetics[3][kinetics[2].index('kr')][1])
ant_str += each + ' = ' + str(const) + '\n'
for each in kc:
const = loguniform.rvs(kinetics[3][kinetics[2].index('kc')][0],
kinetics[3][kinetics[2].index('kc')][1])
ant_str += each + ' = ' + str(const) + '\n'
if kinetics[1] == 'normal':
for each in kf:
while True:
const = norm.rvs(loc=kinetics[3][kinetics[2].index('kf')][0],
scale=kinetics[3][kinetics[2].index('kf')][1])
if const >= 0:
ant_str += each + ' = ' + str(const) + '\n'
break
for each in kr:
while True:
const = norm.rvs(loc=kinetics[3][kinetics[2].index('kr')][0],
scale=kinetics[3][kinetics[2].index('kr')][1])
if const >= 0:
ant_str += each + ' = ' + str(const) + '\n'
break
for each in kc:
while True:
const = norm.rvs(loc=kinetics[3][kinetics[2].index('kc')][0],
scale=kinetics[3][kinetics[2].index('kc')][1])
if const >= 0:
ant_str += each + ' = ' + str(const) + '\n'
break
if kinetics[1] == 'lognormal':
for each in kf:
const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('kf')][0],
s=kinetics[3][kinetics[2].index('kf')][1])
ant_str += each + ' = ' + str(const) + '\n'
for each in kr:
const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('kr')][0],
s=kinetics[3][kinetics[2].index('kr')][1])
ant_str += each + ' = ' + str(const) + '\n'
for each in kc:
const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('kc')][0],
s=kinetics[3][kinetics[2].index('kc')][1])
ant_str += each + ' = ' + str(const) + '\n'
if 'deg' in kinetics[2]:
for _ in floating_ids:
if kinetics[1] == 'trivial':
ant_str += 'k' + str(parameter_index) + ' = 1\n'
if kinetics[1] == 'uniform':
const = uniform.rvs(loc=kinetics[3][kinetics[2].index('deg')][0],
scale=kinetics[3][kinetics[2].index('deg')][1]
- kinetics[3][kinetics[2].index('deg')][0])
ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n'
if kinetics[1] == 'loguniform':
const = loguniform.rvs(kinetics[3][kinetics[2].index('deg')][0],
kinetics[3][kinetics[2].index('deg')][1])
ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n'
if kinetics[1] == 'normal':
while True:
const = norm.rvs(loc=kinetics[3][kinetics[2].index('deg')][0],
scale=kinetics[3][kinetics[2].index('deg')][1])
if const >= 0:
ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n'
break
if kinetics[1] == 'lognormal':
const = lognorm.rvs(scale=kinetics[3][kinetics[2].index('deg')][0],
s=kinetics[3][kinetics[2].index('deg')][1])
ant_str += 'k' + str(parameter_index) + ' = ' + str(const) + '\n'
parameter_index += 1
ant_str += '\n'
if len(kinetics[2]) == 12 or len(kinetics[2]) == 13:
kf0 = []
kr0 = []
kc0 = []
kf1 = []
kr1 = []
kc1 = []
kf2 = []
kr2 = []
kc2 = []
kf3 = []
kr3 = []
kc3 = []
reaction_index = None
for reaction_index, r in enumerate(reaction_list_copy):
ant_str += 'J' + str(reaction_index) + ': '
if r[0] == TReactionType.UNIUNI:
# UniUni
ant_str += 'S' + str(r[1][0])
ant_str += ' -> '
ant_str += 'S' + str(r[2][0])
rev = reversibility()
if not rev:
ant_str += '; ' + enzyme + 'kc0_' + str(reaction_index) + '*S' + str(r[1][0]) + enzyme_end
kc0.append('kc0_' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf0_' + str(reaction_index) + '*S' + str(r[1][0]) \
+ ' - kr0_' + str(reaction_index) + '*S' + str(r[2][0]) + enzyme_end
kf0.append('kf0_' + str(reaction_index))
kr0.append('kr0_' + str(reaction_index))
if r[0] == TReactionType.BIUNI:
# BiUni
ant_str += 'S' + str(r[1][0])
ant_str += ' + '
ant_str += 'S' + str(r[1][1])
ant_str += ' -> '
ant_str += 'S' + str(r[2][0])
rev = reversibility()
if not rev:
ant_str += '; ' + enzyme + 'kc1_' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + enzyme_end
kc1.append('kc1_' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf1_' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + ' - kr1_' + str(reaction_index) + '*S' \
+ str(r[2][0]) + enzyme_end
kf1.append('kf1_' + str(reaction_index))
kr1.append('kr1_' + str(reaction_index))
if r[0] == TReactionType.UNIBI:
# UniBi
ant_str += 'S' + str(r[1][0])
ant_str += ' -> '
ant_str += 'S' + str(r[2][0])
ant_str += ' + '
ant_str += 'S' + str(r[2][1])
rev = reversibility()
if not rev:
ant_str += '; ' + enzyme + 'kc2_' + str(reaction_index) + '*S' + str(r[1][0]) + enzyme_end
kc2.append('kc2_' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf2_' + str(reaction_index) + '*S' + str(r[1][0]) \
+ ' - kr2_' + str(reaction_index) + '*S' + str(r[2][0]) \
+ '*S' + str(r[2][1]) + enzyme_end
kf2.append('kf2_' + str(reaction_index))
kr2.append('kr2_' + str(reaction_index))
if r[0] == TReactionType.BIBI:
# BiBi
ant_str += 'S' + str(r[1][0])
ant_str += ' + '
ant_str += 'S' + str(r[1][1])
ant_str += ' -> '
ant_str += 'S' + str(r[2][0])
ant_str += ' + '
ant_str += 'S' + str(r[2][1])
rev = reversibility()
if not rev:
ant_str += '; ' + enzyme + 'kc3_' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + enzyme_end
kc3.append('kc3_' + str(reaction_index))
else:
ant_str += '; ' + enzyme + 'kf3_' + str(reaction_index) + '*S' + str(r[1][0]) \
+ '*S' + str(r[1][1]) + ' - kr3_' + str(reaction_index) + '*S' | |
"""Graphical User Interface for Jupyter notebooks"""
# TODO: Refactor the whole GUI and move to submodule!
#from IPython.html.widgets import interact
#from IPython.html import widgets # IPython < 4
from __future__ import print_function, division
#import warnings
#import time
from collections import OrderedDict
import os.path
import os
import ast
import copy
import numpy as np
import ipywidgets as widgets
#from traitlets import link
#from traitlets import Unicode
from IPython.display import display
from IPython.display import clear_output
#from IPython import get_ipython
from lmfit import Parameters
#from IPython.core.pylabtools import print_figure
#import base64
from pyrho.parameters import modelParams, modelList, modelLabels, protList, protParams, simList, simParams, unitLabels, stateLabs, simUnitLabels, protParamLabels, protUnitLabels, protParamNotes, PyRhOparameters
from pyrho.models import *
from pyrho.simulators import *
from pyrho.protocols import *
from pyrho.expdata import *
from pyrho.fitting import *
from pyrho.config import simAvailable, GUIdir, setupGUI #, dDir # For dataSet loading
from pyrho.utilities import *
from pyrho import config
from pyrho.config import verbose
__all__ = ['loadGUI']
# LaTeX in widget descriptions/labels
# FloatRangeSlider and IntRangeSlider
# An Output widget was added, which allows you to print and display within widgets - replace Popup
# A SelectMultiple widget was added
# Suppress widget warning
# Add placeholder attribute to text widgets
# Tooltip on toggle button
# Dropdown options can be a dict, tuple or list
### Enable these for NEURON!!!
#import neuron
#from neuron import h
#%precision %.6g
#import numpy as np
#np.set_printoptions(precision=6)
#pprint()
# Create lists and dictionaries of titles
modelTitles = ['Three-state model', 'Four-state model', 'Six-state model']
fitMethodsDict = OrderedDict([(m,i) for i,m in enumerate(methods)])
# State keys must be padded with a leading ' ' to avoid a widgets bug: https://github.com/ipython/ipython/issues/6469
#statesDict = OrderedDict([(' '+s,i) for i,s in enumerate(list(modelParams.keys()))]) # enumerate(modelList)
#statesDict = OrderedDict([(' 3',0), (' 4',1), (' 6',2)])
statesDict = OrderedDict([(s,i) for i,s in enumerate(list(modelParams))]) #.keys()
statesArray = modelList #statesArray = list(statesDict) #.keys() #[' 3', ' 4', ' 6'] # [u' 3',u' 4',u' 6'] ### Redundant!
TabGroups = {'Fit':0, 'Models':1, 'Protocols':2, 'Simulators':3}
#TabGroups = {'Models':0, 'Simulators':1, 'Protocols':2}
#clearDelay = 1.5 # Pause [s] before clearing text entry fields
# Structures for cross-referencing arrays of widgets to their corresponding parameters
# http://stackoverflow.com/questions/18809482/python-nesting-dictionary-ordereddict-from-collections
#mParamsK2I = OrderedDict([ (model,OrderedDict([(p,i) for i,p in enumerate(list(modelParams[model]))])) for model in modelList ])
#mParamsI2K = OrderedDict([ (model,list(modelParams[model])) for model in modelList ])
modelParamsList = OrderedDict([ (model, list(modelParams[model])) for model in modelList ])
#sParamsK2I = OrderedDict([ (sim,OrderedDict([(p,i) for i,p in enumerate(list(simParams[sim]))])) for sim in simList ])
#sParamsI2K = OrderedDict([ (sim,list(simParams[sim])) for sim in simList ])
simParamsList = OrderedDict([ (sim, list(simParams[sim])) for sim in simList ])
loadedSims = [ sim for sim in simList if simAvailable(sim) ]
#pParamsK2I = OrderedDict([ (prot,OrderedDict([(p,i) for i,p in enumerate(list(protParams[prot]))])) for prot in protList ])
#pParamsI2K = OrderedDict([ (prot,list(protParams[prot])) for prot in protList ])
protParamsList = OrderedDict([ (prot, list(protParams[prot])) for prot in protList ])
boolDict = OrderedDict([('True',True), ('False',False)])
### TODO: Replace GUI with object oriented code...
class ParamWidgets(object):
"""Common base class for all sets of parameter widgets"""
def __init__(self, pSet, type=None):
self.defaults = pSet
self.type = type
self.paramsK2I = OrderedDict([ (set, OrderedDict([(p,i) for i,p in enumerate(list(modelParams[set]))])) for set in statesArray ])
self.paramsI2K = OrderedDict([ (set, list(modelParams[s])) for set in statesArray ])
def __str__(self):
return "Parameter set: "+self.type
def getParams(self): #, pSet, valueList, varyList=None, minList=None, maxList=None, exprList=None
userParams = Parameters()
for i, param in enumerate(pSet):
if isinstance(pSet[param].value, list): ################################## Change to not number!!!
userParams.add(param, value=ast.literal_eval(widgetList[i].value)) # or http://stackoverflow.com/questions/5387208/convert-a-string-to-an-array
else:
userParams.add(param, value=widgetList[i].value)
if varyList is not None:
userParams[param].set(vary=varyList[i].value) #userParams[param].vary = varyList[i].value
if minList is not None:
userParams[param].set(min=minList[i].value)
if maxList is not None:
userParams[param].set(max=maxList[i].value)
if exprList is not None:
userParams[param].set(expr=maxList[i].value)
return userParams
def setGUIparams(pSet, widgetList, varyList=None, minList=None, maxList=None, exprList=None):
for i, param in enumerate(pSet):
print(i,param)
if isinstance(pSet[param].value, list): ################################## Change to not number!!!
widgetList[i].value = str(pSet[param].value)
else:
widgetList[i].value = pSet[param].value
if varyList is not None:
varyList[i].value = pSet[param].vary
if minList is not None:
minList[i].value = pSet[param].min
if maxList is not None:
maxList[i].value = pSet[param].max
if exprList is not None and pSet[param].expr is not None:
exprList[i].value = pSet[param].expr ### Change units handling
def setParams(self, params):
for p in params.keys():
#if p in self.__dict__:
self.__dict__[p] = params[p].value
#else:
# warnings.warn('Warning: "{p}" not found in {self}'.format(p,self))
def exportParams(self, params):
"""Export parameters to lmfit dictionary"""
for p in self.__dict__.keys():
params[p].value = self.__dict__[p]
return params
##### Not used yet... #####
def buildLayerTab(paramGroup):
pluginBoxes = [None for plugin in paramGroup] #{plugin: None for plugin in paramGroup}
pluginParamsBox = [None for plugin in paramGroup] #{plugin: None for plugin in paramGroup} # Left side container
pluginNotesBox = [None for plugin in paramGroup] #{plugin: None for plugin in paramGroup} # Right side container for figure and equations
#pluginStimHTML = [None for plugin in paramGroup]
pluginFigHTML = [None for plugin in paramGroup] #{plugin: None for plugin in paramGroup}
#eqBox = [None for m in range(len(modelParams))]
pluginValues = [[None for param in paramGroup[plugin]] for plugin in paramGroup] #{plugin:{param: None for param in paramGroup[plugin]} for plugin in paramGroup} # Array of parameter values
pluginUnits = [[None for param in paramGroup[plugin]] for plugin in paramGroup] #{plugin:{param: None for param in paramGroup[plugin]} for plugin in paramGroup} # Array of units
pluginParams = [[None for param in paramGroup[plugin]] for plugin in paramGroup] #{plugin:{param: None for param in paramGroup[plugin]} for plugin in paramGroup} # Array of parameter boxes
for pluginInd, plugin in enumerate(paramGroup):
#pluginInd = paramGroup.keys().index(plugin)
pSet = paramGroup[plugin]
for pInd, param in enumerate(pSet):
#paramInd = pSet.keys().index(param)
if isinstance(pSet[param].value, list): # list ==> Text
pluginValues[pluginInd][pInd] = widgets.Text(value=str(pSet[param].value), description=param) # np.asarray
elif isinstance(pSet[param].value, str): # str ==> Text
pluginValues[pluginInd][pInd] = widgets.Text(value=str(pSet[param].value), description=param)
elif isinstance(pSet[param].value, bool):
pluginValues[pluginInd][pInd] = widgets.Dropdown(options=boolDict,value=pSet[param].value,description=param)
else: # Numeric
if (pSet[param].min == None or pSet[param].min == -np.inf) or (pSet[param].max == None or pSet[param].max == np.inf): # No limits
pluginValues[pluginInd][pInd] = widgets.FloatText(value=pSet[param].value, description=param)
else: # Bounded # ==> widgets.FloatSlider() ?
pluginValues[pluginInd][pInd] = widgets.BoundedFloatText(value=pSet[param].value, min=pSet[param].min, max=pSet[param].max, description=param)
pluginValues[pluginInd][pInd].width = '150px'
if pSet[param].expr is None: # No units
pluginParams[pluginInd][pInd] = widgets.HBox(children=[pluginValues[pluginInd][pInd]])
else:
pluginUnits[pluginInd][pInd] = widgets.Dropdown(options=[pSet[param].expr],value=pSet[param].expr) ### Change units handling
pluginParams[pluginInd][pInd] = widgets.HBox(children=[pluginValues[pluginInd][pInd],pluginUnits[pluginInd][pInd]])
pluginFigHTML[pluginInd] = widgets.HTML()
#exampleProt = '{}{}6s.{}'.format(fDir,prot,'png')#saveFigFormat)
#if os.path.isfile(exampleProt):
# protFigHTML[pInd].value='<img src="{}" alt=Example {} width=200px>'.format(exampleProt,prot)
#else:
# protFigHTML[pInd].value='Example Figure'
pluginParamsBox[pluginInd] = widgets.Box(children=pluginParams[pluginInd])
pluginNotesBox[pluginInd] = widgets.HBox(children=[pluginFigHTML[pluginInd]])# simStimHTML[sInd] , ])#[figHTML[prot],eqBox[prot]])
pluginBoxes[pluginInd] = widgets.HBox(children=[pluginParamsBox[pluginInd],pluginNotesBox[pluginInd]])#modelBox
#display(protBoxes[pInd])
pluginBoxes[pluginInd].margin = '5px'
##### Plugin parameters tab #####
pluginParamsTabs = widgets.Tab(description='Plugin Settings', children=pluginBoxes)# \
pluginParamsTabs.margin = '5px'
return pluginParamsTabs
#pluginParamsTabs.on_trait_change(onChangeSimTab,'selected_index') # External
def loadGUI(IPythonWorkspace=None):
path = os.path.join(os.getcwd(), GUIdir)
if not os.path.isdir(path):
setupGUI(path)
if IPythonWorkspace is None:
pass
else:
globals().update(IPythonWorkspace)
##### Model fitting bar Functions #####
'''
def fitToggle(name, value):
if value == True:
fitBar.visible = True
#time.sleep(clearDelay) # Pause then clear the input field
#dataVar.value = ''
else:
fitBar.visible = False
#dataVar.value='<variable name>'
return
'''
#dataSet = None
def onDataLoad(name):
global dataSet
print('Loading: "', dataVar.value, '"...', end=' ')
#print(vars())
#print(globals())
if dataVar.value in vars(): ### locals()? # http://stackoverflow.com/questions/7969949/whats-the-difference-between-globals-locals-and-vars
dataSet = vars()[dataVar.value]
#useFitCheck.value = True
print('Successfully loaded from vars!')
elif dataVar.value in globals():
dataSet = globals()[dataVar.value] # eval(dataVar.value) ##### Is this safe?
#useFitCheck.value = True
print('Successfully loaded from globals!')
else:
# Try treating it as a file name instead?
fh = open(path.join(config.dDir, dataVar.value), "rb")
dataSet = pickle.load(fh)
fh.close()
print('Successfully loaded "{}"!'.format(path.join(config.dDir, dataVar.value)))
#dataSet = None
#useFitCheck.value = False
#warnings.warn('Warning! Variable: {} not found!'.format(dataVar.value))
#return dataSet
def onClickFitButton(b): # on_button_clicked
"""Main function to fit a model to a supplied data set"""
#global fitParamsPopup
#global clearOutput
if clearOutput.value:
clear_output()
#if 'fitParamsPopup' in vars() or 'fitParamsPopup' in globals():
# fitParamsPopup.close()
model = str(statesToFitButtons.value)
mInd = statesDict[model] #statesDict[' '+str(nStates)]
pSet = modelParams[modelList[mInd]]
initialParams = getGUIparams(pSet, pfValArr[mInd][:], varyList=fVaryArr[mInd][:], minList=pfMinArr[mInd][:], maxList=pfMaxArr[mInd][:], exprList=fExprArr[mInd][:])
fittedParams, miniObj = fitModels(dataSet, nStates=int(statesToFitButtons.value), params=initialParams, postFitOpt=runPostOpt.value, relaxFact=relaxFactWid.value, method=methods[fitMethods.value], postFitOptMethod=methods[postOptFitMethods.value])
#fitParamReport = widgets.TextareaWidget(description='Report:',value=fitRhO.reportParams())
#fitParamsPopup = widgets.PopupWidget(children=[fitParamReport],button_text='Fitted Parameters',description='Fitted {} state model parameters from: {}'.format(int(statesToFitButtons.value),dataVar.value))
#display(fitParamsPopup)
#[:]???
setGUIparams(fittedParams, modelParamsList[model], pfValArr[mInd], varyList=fVaryArr[mInd], minList=pfMinArr[mInd], maxList=pfMaxArr[mInd], exprList=fExprArr[mInd])
# if useFitCheck.value: # Set the run parameters too
# setGUIparams(fittedParams, modelParamsList[model], pValArr[mInd])
if runSSAcheck.value == True:
fitRhO = models[modelList[mInd]]()
fitRhO.updateParams(fittedParams)
fitRhO.plotRates()
characterise(fitRhO)
return
def onClickCharacteriseButton(b):
model = str(statesToFitButtons.value)
mInd = statesDict[model]
pSet = modelParams[modelList[mInd]]
fittedParams = getGUIparams(pSet, pfValArr[mInd][:], varyList=fVaryArr[mInd][:], minList=pfMinArr[mInd][:], maxList=pfMaxArr[mInd][:], exprList=fExprArr[mInd][:])
fitRhO = models[modelList[mInd]]()
fitRhO.updateParams(fittedParams)
fitRhO.plotRates()
characterise(fitRhO)
return
def onClickExportFitButton(b):
model = str(statesToFitButtons.value)
mInd = statesDict[model]
pSet = modelParams[modelList[mInd]]
fittedParams = getGUIparams(pSet, pfValArr[mInd][:], varyList=fVaryArr[mInd][:], minList=pfMinArr[mInd][:], maxList=pfMaxArr[mInd][:], exprList=fExprArr[mInd][:])
setGUIparams(fittedParams, modelParamsList[model], pValArr[mInd])
return
'''
##### NEURON bar functions #####
# def onHocLoad(name):
# print('Loading: "',hocFile.value, '"...', end=' ')
# try: # Load mechanism and set some appropriate parameters
# h = hoc.HocObject()
# h.xopen(hocFile.value)
# h.load_mechanisms() #h('nrn_load_dll("libnrnmech.so")')
# #from neuron import gui # | |
<reponame>anmartinezs/pyseg_system<gh_stars>10-100
"""
Tests module rigid_3d
# Author: <NAME>
# $Id$
"""
from __future__ import unicode_literals
from __future__ import division
from builtins import range
#from past.utils import old_div
__version__ = "$Revision$"
#from copy import copy, deepcopy
import unittest
import numpy as np
import numpy.testing as np_test
#import scipy as sp
from pyto.geometry.affine_2d import Affine2D
from pyto.geometry.rigid_3d import Rigid3D
class TestRigid3D(np_test.TestCase):
"""
"""
def setUp(self):
"""
"""
self.ninit = 10
def testIdentity(self):
"""
Tests identity()
"""
ide = Rigid3D.identity()
np_test.assert_equal(ide.q, np.identity(3))
np_test.assert_equal(ide.s_scalar, 1)
ide = Rigid3D.identity(ndim=3)
np_test.assert_equal(ide.q, np.identity(3))
np_test.assert_equal(ide.s_scalar, 1)
def testS(self):
"""
Tests getS and setS
"""
r3d = Rigid3D()
r3d.s_scalar = 2.3
np_test.assert_equal(r3d.s, 2.3 * np.identity(3))
r3d = Rigid3D(scale=2.4)
np_test.assert_equal(r3d.s, 2.4 * np.identity(3))
r3d = Rigid3D.identity()
r3d.s = 3.4 * np.identity(3)
np_test.assert_equal(r3d.s_scalar, 3.4)
def testMakeScalar(self):
"""
Tests makeScalar()
"""
s = Rigid3D.makeScalar(s=np.identity(3))
np_test.assert_equal(s, 1)
q = Rigid3D.make_r_euler([1.,2.,3.])
s_scalar = 2.5
r3d = Rigid3D(q=q, scale=s_scalar)
q, p, s, m = r3d.decomposeQR(order='qr')
np_test.assert_almost_equal(Rigid3D.makeScalar(s=s, check=False), 2.5)
def testGl(self):
"""
Tests getGl and segGl
"""
# test getGl()
r3d = Rigid3D.identity()
np_test.assert_equal(r3d.gl, np.identity(3))
r3d.s_scalar = 1.7
np_test.assert_equal(r3d.gl, 1.7 * np.identity(3))
r3d.s_scalar = 2.7
np_test.assert_equal(r3d.gl, 2.7 * np.identity(3))
r3d.q = np.array([[0,0,1], [0,1,0], [-1,0,0]])
np_test.assert_equal(r3d.gl, 2.7 * np.array(
[[0,0,1], [0,1,0], [-1,0,0]]))
# test setGl()
r3d = Rigid3D()
r3d.gl = 2.6 * np.identity(3)
np_test.assert_equal(r3d.q, np.identity(3))
np_test.assert_equal(r3d.s_scalar, 2.6)
r3d.gl = 2.8 * np.array([[0,-1,0],[1,0,0],[0,0,1]])
np_test.assert_equal(r3d.q, np.array([[0,-1,0],[1,0,0],[0,0,1]]))
np_test.assert_equal(r3d.s_scalar, 2.8)
def testD(self):
"""
Tests setting d and makeD()
"""
r3d = Rigid3D(q=np.identity(3))
np_test.assert_almost_equal(r3d.d, [0,0,0])
r3d = Rigid3D()
np_test.assert_almost_equal(r3d.d, [0,0,0])
r3d = Rigid3D(q=np.identity(3), d=-1)
np_test.assert_almost_equal(r3d.d, [-1,-1,-1])
r3d = Rigid3D(q=np.identity(3), d=[1,2,3])
np_test.assert_almost_equal(r3d.d, [1,2,3])
r3d = Rigid3D(q=np.identity(3))
r3d.d = [2,3,4]
np_test.assert_almost_equal(r3d.d, [2,3,4])
r3d.d = Rigid3D.makeD(2)
np_test.assert_almost_equal(r3d.d, [2,2,2])
r3d.d = Rigid3D.makeD(d=[5,4,3])
np_test.assert_almost_equal(r3d.d, [5,4,3])
def testCompose(self):
"""
Tests compose()
"""
t1 = Rigid3D(q=np.identity(3), d=[1,2,3])
t2 = Rigid3D(q=np.identity(3))
com = Rigid3D.compose(t2, t1)
np_test.assert_equal(com.d, [1,2,3])
def test_make_r_axis(self):
"""
Tests make_r_axis()
"""
# pi/2 z
np_test.assert_almost_equal(
Rigid3D.make_r_axis(angle=np.pi/2, axis='z'),
[[0, -1, 0], [1, 0, 0], [0, 0, 1]])
np_test.assert_almost_equal(
Rigid3D.make_r_axis(angle=np.pi/3, axis='z'),
[[np.cos(np.pi/3), -np.sin(np.pi/3), 0],
[np.sin(np.pi/3), np.cos(np.pi/3), 0],
[0, 0, 1]])
# -pi/2 y
np_test.assert_almost_equal(
Rigid3D.make_r_axis(angle=-np.pi/2, axis='y'),
[[0, 0, -1], [0, 1, 0], [1, 0, 0]])
np_test.assert_almost_equal(
Rigid3D.make_r_axis(angle=-1., axis='y'),
[[np.cos(-1.), 0, np.sin(-1.)],
[0, 1, 0],
[-np.sin(-1.), 0, np.cos(-1.)]])
# pi x
np_test.assert_almost_equal(
Rigid3D.make_r_axis(angle=np.pi, axis='x'),
[[1, 0, 0], [0, -1, 0], [0, 0, -1]])
np_test.assert_almost_equal(
Rigid3D.make_r_axis(angle=0.5, axis='x'),
[[1, 0, 0],
[0, np.cos(0.5), -np.sin(0.5)],
[0, np.sin(0.5), np.cos(0.5)]])
def testInverse(self):
"""
Tests inverse()
"""
q = Rigid3D.make_r_euler(angles=[1., 2., 3])
r3d = Rigid3D(q=q, d=[1,2,-4])
r3di = r3d.inverse()
com = Rigid3D.compose(r3d, r3di)
np_test.assert_almost_equal(com.q, np.identity(3))
np_test.assert_almost_equal(com.d, [0,0,0])
def test_shift_angle_range(self):
"""
Tests shift_angle_range()
"""
# default low
np_test.assert_almost_equal(Rigid3D.shift_angle_range(angle=1), 1)
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=2), 2)
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=4.), 4 - 2*np.pi)
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=6.), 6 - 2*np.pi)
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=12.), 12 - 4*np.pi)
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=-1), -1)
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=-5), -5 + 2*np.pi)
# given low
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=4., low=0), 4)
np_test.assert_almost_equal(
Rigid3D.shift_angle_range(angle=4., low=-2*np.pi), 4 - 2*np.pi)
def test_find_32_constr_ck_scale_1(self):
"""
Tests find_32_constr_ck(scale=1)
"""
# coord system-like points
x_cs = np.array([[0., 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 3]])
# identity
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=x_cs[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.gl, np.identity(3))
np_test.assert_almost_equal(res.y, x_cs)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/2 rotation around z axis
y = np.array([[0., 0, -2, 0],
[0, 1, 0, 0],
[0, 0, 0, 3]])
r_desired = np.array([[0., -1, 0], [1, 0, 0], [0, 0, 1]])
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r_desired, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/2 rotation around z axis, 3 markers
y = np.array([[0., 0, -2, 0],
[0, 1, 0, 0],
[0, 0, 0, 3]])
r_desired = np.array([[0., -1, 0], [1, 0, 0], [0, 0, 1]])
res = Rigid3D.find_32_constr_ck(
x=x_cs[:,1:4], y=y[:2,1:4], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r_desired, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/6 rotation around z axis
r = Rigid3D.make_r_euler([0, np.pi/6, 0])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,:], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/6 rotation around z axis, 3 markers
r = Rigid3D.make_r_euler([0, np.pi/6, 0])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs[:,1:4], y=y[:2,1:4], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,1:4], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# 8 pi/7 rotation around z axis
r = Rigid3D.make_r_euler([0, 8*np.pi/7, 0])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,:], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/2 rotation around x axis
y = np.array([[0., 1, 0, 0],
[0, 0, 0, -3],
[0, 0, 2, 0]])
r_desired = np.array([[1., 0, 0], [0, 0, -1], [0, 1, 0]])
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r_desired, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/3 rotation around x axis
r = Rigid3D.make_r_euler([np.pi/3, 0, 0])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,:], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/3 rotation around x axis, 3 markers
r = Rigid3D.make_r_euler([np.pi/3, 0, 0])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs[:,1:4], y=y[:2,1:4], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,1:4], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# 8 pi/9 rotation around x axis
r = Rigid3D.make_r_euler([8 * np.pi/9, 0, 0])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,:], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/2 rotation around y axis
y = np.array([[0., 0, 0, 3],
[0, 0, 2, 0],
[0, -1, 0, 0]])
r_desired = np.array([[0., 0, 1], [0, 1, 0], [-1, 0, 0]])
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r_desired, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi/5 rotation around y axis
r = Rigid3D.make_r_euler([np.pi/2, np.pi/5, -np.pi/2])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,:], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# 7 pi/5 rotation around y axis
r = Rigid3D.make_r_euler([np.pi/2, 7 * np.pi/5, -np.pi/2])
y = np.dot(r, x_cs)
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=True)
np_test.assert_almost_equal(res.y[2,:], y[2,:], decimal=3)
np_test.assert_almost_equal(np.dot(res.gl, x_cs), y, decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# identity, non-optimal initial
# doesn't find optimal
# cm=True improves but doesn't find optimal
# fine if optimizing scale
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=x_cs[:2,:], scale=1, cm=False, use_jac=False,
init=[0.2, -0.4, 0.5, -0.1])
#np_test.assert_almost_equal(res.y, x_cs, decimal=3)
#np_test.assert_almost_equal(res.gl, np.identity(3), decimal=3)
#np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# cm=True improves but doesn't find optimal
# fine if optimizing scale
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=x_cs[:2,:], scale=1, cm=False, use_jac=True,
init=[0.2, -0.4, 0.5, np.sqrt(0.55)])
y = np.array([[0., 0, -2, 0],
[0, 1, 0, 0],
[0, 0, 0, 3]])
# fine when +np.sqrt(0.55)
# cm=True improves but doesn't find optimal
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=y[:2,:], scale=1, cm=False, use_jac=False,
init=[0.2, -0.4, 0.5, -np.sqrt(0.55)])
# fails for 4, 5, 6 * pi/5, ok for 3 and 7
# small init changes don't help
# reducing z helps, the closer theta to pi the larger reduction
# cm=True improves but doesn't find optimal
# fine if optimizing scale
r = Rigid3D.make_r_euler([np.pi/2, 6 * np.pi/5, -np.pi/2])
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=np.dot(r, x_cs)[:2,:], scale=1, cm=False, use_jac=True)
# pi around z (fi)
# fails after 1 iter when init=[1, 0, 0, 0] and cm=False
r = Rigid3D.make_r_euler([np.pi, 0, 0])
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=np.dot(r, x_cs)[:2,:], scale=1, cm=False, use_jac=True,
init=[0.2, -0.4, 0.5, -np.sqrt(0.55)])
np_test.assert_almost_equal(res.y[2,:], np.dot(r, x_cs)[2,:], decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# pi around z (psi)
# fails after 1 iter when init=[1, 0, 0, 0] and cm=False
r = Rigid3D.make_r_euler([0, 0, np.pi])
res = Rigid3D.find_32_constr_ck(
x=x_cs, y=np.dot(r, x_cs)[:2,:], scale=1, cm=False, use_jac=True,
init=[0.2, -0.4, 0.5, -np.sqrt(0.55)])
np_test.assert_almost_equal(res.y[2,:], np.dot(r, x_cs)[2,:], decimal=3)
np_test.assert_almost_equal(res.gl, r, decimal=3)
np_test.assert_almost_equal(res.optimizeResult.fun, 0, decimal=3)
# low z example 1
x = np.array([[2., 5.3, 7.2, 0.3, 4],
[4, 3.2, 6, 5.4, 1.2],
[0.5, 1.2, 0.3, 0.5, 0.1]])
angles = np.array([50, 40, 24]) * np.pi / 180
x_cm = x - x.mean(axis=-1).reshape((3,1))
r = Rigid3D.make_r_euler(angles, mode='x')
res = Rigid3D.find_32_constr_ck(
| |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run MLM and coref. resolution loss pre-training for ReadTwice."""
import functools
import os
import time
from typing import Text
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from readtwice.models import checkpoint_utils
from readtwice.models import config
from readtwice.models import input_utils
from readtwice.models import losses
from readtwice.models import metric_utils
from readtwice.models import modeling
from readtwice.models import optimization
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"source_model_config_file", None,
"The source config file corresponding to the ETC ReadItTwiceBERT model. "
"This specifies the model architecture. When first training the model, "
"this file will be copied to a `read_it_twice_bert_config.json` file in the "
"model directory, and future calls to this binary will use that file "
"instead, ignoring this flag.")
flags.DEFINE_string(
"source_model_config_base64", None,
"A source config json Base64 string corresponding to the ETC ReadItTwiceBERT "
"model. This has the same role as `source_model_config_file` and serves as "
"an alternative. Only one should be specified, not both. When first "
"training the model, this json config will be copied to a "
"`read_it_twice_bert_config.json` file in the model directory, and future calls "
"to this binary will use that file instead, ignoring this flag.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
## Parameters for the input pipeline -- `PretrainInputConfig`
flags.DEFINE_float(
"mlm_fraction_to_mask", 0.15,
"The fraction of tokens to mask for masked language model loss.")
flags.DEFINE_float(
"mlm_entity_fraction_to_mask", None,
"The fraction of entities to mask for masked language model loss.")
flags.DEFINE_string(
"mention_mask_mode", "whole_mention",
"Mentions masking strategy. Possible options: "
"`whole_mention`, `whole_entity` and `whole_entity_batch`.")
flags.DEFINE_integer(
"mlm_max_consecutive_masks", 5,
"Maximum number of consecutive tokens to mask at a time. The actual number "
"of consecutive masks will be uniformly sampled between 1 and this number "
"(both inclusive).")
flags.DEFINE_bool(
"mlm_use_whole_word", True,
"Whether to mask whole words for the MLM task instead of just WordPieces. "
"This requires the `is_continuation` feature to be present in the "
"tensorflow Examples.")
flags.DEFINE_integer(
"mask_token_id", 4,
"The token id of the mask token according to the WordPiece vocabulary.")
flags.DEFINE_integer(
"padding_token_id", 0,
"The token id of the padding token according to the WordPiece vocabulary.")
# Parameters for the training objective
flags.DEFINE_bool(
"enable_side_inputs", False,
"If True, enables read-it-twice model. Otherwise, the model becomes equivalent to the standard Transformer model."
)
flags.DEFINE_integer(
"num_replicas_concat", None,
"Number of replicas to gather summaries from. If None (default) then cross-replicas summaries are not used."
)
flags.DEFINE_enum(
"cross_block_attention_mode", "doc",
["block", "doc", "batch", "other_blocks"],
"The policy on how summaries between different "
"blocks are allowed to interact with each other.")
flags.DEFINE_string("extra_loss", None,
"Auxiliary loss to use. Options are sdp, spd_linear")
flags.DEFINE_integer("summary_num_layers", None,
"Number of layers for the summary prediction task.")
flags.DEFINE_integer(
"summary_num_cross_attention_heads", None,
"Number of attention heads for the summary prediction task.")
flags.DEFINE_bool(
"summary_enable_default_side_input", False,
"Add a default side input, which acts like a no-op attention, "
"effective allowing attention weights to sum up to something less than 1.")
flags.DEFINE_string("metrics_name", None, "Name for logging metrics.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_enum("optimizer", "adamw", ["adamw", "lamb"],
"The optimizer for training.")
flags.DEFINE_float("learning_rate", 1e-4, "The initial learning rate for Adam.")
flags.DEFINE_enum(
"learning_rate_schedule", "poly_decay", ["poly_decay", "inverse_sqrt"],
"The learning rate schedule to use. The default of "
"`poly_decay` uses tf.train.polynomial_decay, while "
"`inverse_sqrt` uses inverse sqrt of time after the warmup.")
flags.DEFINE_float("poly_power", 1.0,
"The power of poly decay if using `poly_decay` schedule.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("start_warmup_step", 0, "The starting step of warmup.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_eval_epochs", 2, "Number of eval epochs.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"Whether to use one-hot multiplication instead of gather for embedding "
"lookups.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_string(
"tpu_job_name", None,
"Name of TPU worker binary. Only necessary if job name is changed from"
" default tpu_worker.")
def record_summary_host_fn(metrics_dir, metrics_name, **kwargs):
"""A host_fn function for the host_call in TPUEstimatorSpec.
Args:
metrics_dir: Directory where tf summary events should be written.
metrics_name: Name for the metrics collection
**kwargs: Contains tensors for which summaries are to be recorded. It must
contain a key of `global_step`.
Returns:
A summary op for each tensor to be recorded.
"""
# It's not documented, but when recording summaries via TPUEstimator,
# you need to pass in the global_step value to your host_call function.
global_step = kwargs.pop("global_step")[0]
mlm_loss_per_sample = kwargs.pop("mlm_loss_per_sample")
mlm_accuracy_per_sample = kwargs.pop("mlm_accuracy_per_sample")
mlm_weight_per_sample = kwargs.pop("mlm_weight_per_sample")
block_ids = kwargs.pop("block_ids")
mlm_loss_per_entity_sample = kwargs.pop("mlm_loss_per_entity_sample", None)
mlm_accuracy_per_entity_sample = kwargs.pop("mlm_accuracy_per_entity_sample",
None)
mlm_weight_per_entity_sample = kwargs.pop("mlm_weight_per_entity_sample",
None)
mlm_loss_per_non_entity_sample = kwargs.pop("mlm_loss_per_non_entity_sample",
None)
mlm_accuracy_per_non_entity_sample = kwargs.pop(
"mlm_accuracy_per_non_entity_sample", None)
mlm_weight_per_non_entity_sample = kwargs.pop(
"mlm_weight_per_non_entity_sample", None)
other_metrics = metric_utils.masked_lm_metrics(
mlm_loss_per_sample,
mlm_accuracy_per_sample,
mlm_weight_per_sample,
block_ids,
mlm_loss_per_entity_sample=mlm_loss_per_entity_sample,
mlm_accuracy_per_entity_sample=mlm_accuracy_per_entity_sample,
mlm_weight_per_entity_sample=mlm_weight_per_entity_sample,
mlm_loss_per_non_entity_sample=mlm_loss_per_non_entity_sample,
mlm_accuracy_per_non_entity_sample=mlm_accuracy_per_non_entity_sample,
mlm_weight_per_non_entity_sample=mlm_weight_per_non_entity_sample,
is_train=True,
metrics_name=metrics_name or "train_metrics")
with tf.compat.v2.summary.create_file_writer(metrics_dir).as_default():
with tf.compat.v2.summary.record_if(True):
for name, tensor in kwargs.items():
tf.compat.v2.summary.scalar(
name, tf.reduce_mean(tensor), step=global_step)
for name, tensor in other_metrics.items():
tf.compat.v2.summary.scalar(
name, tf.reduce_mean(tensor), step=global_step)
return tf.summary.all_v2_summary_ops()
def model_fn_builder(model_config, padding_token_id, enable_side_inputs,
num_replicas_concat, cross_block_attention_mode,
extra_loss, summary_num_layers,
summary_num_cross_attention_heads,
summary_enable_default_side_input, init_checkpoint,
learning_rate, num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, optimizer, poly_power,
start_warmup_step, learning_rate_schedule, metrics_name):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
logging.info("*** Model: Params ***")
for name in sorted(params.keys()):
logging.info(" %s = %s", name, params[name])
logging.info("*** Model: Features ***")
for name in sorted(features.keys()):
logging.info(" name = %s, shape = %s", name, features[name].shape)
model = modeling.ReadItTwiceBertModel(
config=model_config, use_one_hot_embeddings=use_one_hot_embeddings)
# [batch_size, main_seq_length]
token_ids = features["token_ids"]
batch_size = tf.shape(token_ids)[0]
main_seq_length = tf.shape(token_ids)[1]
block_ids = features["block_ids"]
block_pos = features["block_pos"]
annotation_begins = features.get("annotation_begins")
annotation_ends = features.get("annotation_ends")
annotation_labels = features.get("annotation_labels")
# Do not attend padding tokens
# [batch_size, main_seq_length, main_seq_length]
att_mask = tf.tile(
tf.expand_dims(tf.not_equal(token_ids, padding_token_id), 1),
[1, main_seq_length, 1])
att_mask = tf.cast(att_mask, dtype=tf.int32)
main_output = model(
token_ids=token_ids,
training=(mode == tf.estimator.ModeKeys.TRAIN),
block_ids=block_ids,
block_pos=block_pos,
att_mask=att_mask,
annotation_begins=annotation_begins,
annotation_ends=annotation_ends,
annotation_labels=annotation_labels,
enable_side_inputs=enable_side_inputs,
num_replicas_concat=num_replicas_concat,
cross_block_attention_mode=cross_block_attention_mode)
mlm_loss_fn = losses.LanguageModelLoss(
model.get_token_embedding_table(),
hidden_size=model_config.hidden_size,
name="mlm_loss")
mlm_loss_output = mlm_loss_fn(
input_tensor=main_output.final_hidden_states,
label_ids=features["masked_lm_ids"],
positions=features["masked_lm_positions"],
label_weights=features["masked_lm_weights"],
mlm_is_entity_mask=features.get("mlm_is_entity_mask"),
mlm_is_not_entity_mask=features.get("mlm_is_not_entity_mask"),
padding_token_id=padding_token_id)
mlm_loss = mlm_loss_output.loss
loss_to_log = dict(mlm_loss=tf.expand_dims(mlm_loss, 0))
loss_weight_denominator = 1.0 + sum(extra_loss.values())
total_loss = mlm_loss * (1.0 / loss_weight_denominator)
for loss_name, loss_weight in extra_loss.items():
logging.info("EXTRA LOSS: %s with weight %.2f", loss_name,
loss_weight / loss_weight_denominator)
if model_config.summary_mode == "entity":
# entity label "1" corresponds to unknown entity
# there is no need to compute coreferense resolution loss
# for these unknown entities.
labels_weight = tf.cast(
tf.logical_and(
tf.not_equal(
tf.expand_dims(main_output.local_summary.labels, 1), 1),
tf.not_equal(
tf.expand_dims(main_output.global_summary.labels, 0), 1)),
tf.float32)
else:
labels_weight = None
if loss_name == "sdp":
loss_fn = losses.BatchCoreferenceResolutionLoss(
apply_linear_layer=False)
loss_value = loss_fn(
main_output.local_summary.states,
main_output.local_summary.labels,
main_output.global_summary.states,
main_output.global_summary.labels,
labels_weight=labels_weight)
elif loss_name == "sdp_linear":
loss_fn = losses.BatchCoreferenceResolutionLoss(apply_linear_layer=True)
loss_value = loss_fn(
main_output.local_summary.states,
main_output.local_summary.labels,
main_output.global_summary.states,
main_output.global_summary.labels,
labels_weight=labels_weight)
elif loss_name == "spp_linear":
loss_fn = losses.BatchCoreferenceResolutionLoss(apply_linear_layer=True)
# Positive examples are blocks which go one after another in the
# original document.
labels_mask = tf.less_equal(
tf.abs(
tf.expand_dims(main_output.local_summary.block_pos, 1) -
tf.expand_dims(main_output.global_summary.block_pos, 0)), 1)
loss_value = loss_fn(
main_output.local_summary.states,
main_output.local_summary.labels,
main_output.global_summary.states,
main_output.global_summary.labels,
labels_mask=labels_mask,
labels_weight=labels_weight)
elif loss_name == "lm":
token_labels = tf.roll(token_ids, shift=-1, axis=1)
# [batch_size, global_batch_size]
token2side_input_att_mask = modeling.get_cross_block_att(
block_ids,
block_pos,
main_output.global_summary.block_ids,
main_output.global_summary.block_pos,
cross_block_attention_mode=cross_block_attention_mode,
cast_to_int32=False)
# We want to exclude the summary of the block itself
| |
in range(window_floor_distance + 80, Screen_Height + window_difference_y, window_difference_y):
for j in range(0, building_width + window_difference_x, 20):
building_window_surface1.blit(building_ornaments1[1], (j, i))
building_window_surface1.blit(building_ornaments1[3], (0, i))
for i in range(window_difference_y - window_karnisa_distance - 10, Screen_Height + window_difference_y,
window_difference_y):
for j in range(first_window_x - round(karnisa_length / 2) + 40, building_width + window_difference_x,
window_difference_x):
building_window_surface1.blit(window_karnisa_surface1, (j, i))
ball_list = []
life_item_list = []
safe_bar_list = []
danger_bar_list = []
cloud_list0 = []
cloud_list1 = []
building_list = []
building_window_surface_list = []
focus_has_lost = True
bar_create_condition = 0
danger_bar_once = 0
safe_bar_animation_condition_value = 0
danger_bar_animation_condition_value = 0
safe_bar_animation_value = 0
danger_bar_animation_value = 0
cloud_create_condition0 = 0
cloud_create_condition1 = 0
no_ball_state = True
ball_crashed = False
life_item_crashed = False
ball_move_step_x_keyboard_state = 0
ball_animation_value = 0
ball_crashed_delay_value = 0
life_item_crashed_delay_value = 0
building_left_right_selector = randrange(0, 2)
score = 0
life_items_gain = initial_life_items_gain
danger_item_collided = False
bar_move_step_value = bar_move_step
cloud_move_step0_value = cloud_move_step0
cloud_move_step1_value = cloud_move_step1
life_item_create_condition = initial_life_item_create_condition
ball_move_step_y_value = ball_move_step_y
ball_move_step_x_keyboard_value = ball_move_step_x_keyboard
if cloud_enabled:
for i in f_range(Screen_Height - cloud_create_frequency0, -cloud[2].get_height(),
-cloud_create_frequency0 - cloud_move_step0):
cloud_type = randrange(2, 4)
max_cloud_position = background.get_width() - cloud[2].get_width() + background_left
cloud_position = randrange(background_left - cloud[2].get_width(),
max_cloud_position + cloud[2].get_width())
cloud_list0.append([[cloud[cloud_type]], [cloud_position, i]])
cloud_list0.reverse()
for i in f_range(Screen_Height - cloud_create_frequency1, -cloud[0].get_height(),
-cloud_create_frequency1 - cloud_move_step1):
cloud_type = randrange(0, 2)
max_cloud_position = background.get_width() - cloud[0].get_width() + background_left
cloud_position = randrange(background_left - cloud[0].get_width(),
max_cloud_position + cloud[0].get_width())
cloud_list1.append([[cloud[cloud_type]], [cloud_position, i]])
cloud_list1.reverse()
if building_left_right_selector == 0:
building_list.append([pygame.transform.flip(building0, True, False), [background_left - building_width, 0], 15])
building_list.append([building1, [background_left + background.get_width(), 0], 15])
building_window_surface_list.append(
[pygame.transform.flip(building_window_surface0, True, False), [background_left - building_width, 0],
window_difference_y])
building_window_surface_list.append(
[building_window_surface1, [background_left + background.get_width(), 0], window_difference_y])
else:
building_list.append([pygame.transform.flip(building1, True, False), [background_left - building_width, 0], 15])
building_list.append([building0, [background_left + background.get_width(), 0], 15])
building_window_surface_list.append(
[pygame.transform.flip(building_window_surface1, True, False), [background_left - building_width, 0],
window_difference_y])
building_window_surface_list.append(
[building_window_surface0, [background_left + background.get_width(), 0], window_difference_y])
r_step = 0.1
g_step = -0.2
b_step = -0.3
pygame.mouse.set_visible(False)
clock = pygame.time.Clock()
print('\n' * 150)
while True:
clock.tick(Game_Speed)
background.fill((R, G, B))
screen.blit(background, (background_left, 0))
R, r_step = lerp(R, R_start, R_stop, r_step)
G, g_step = lerp(G, G_start, G_stop, g_step)
B, b_step = lerp(B, B_start, B_stop, b_step)
if bar_create_condition == 0:
bar_type = randrange(0, 100)
bar_size = randrange(0, 100)
if bar_type < bar_type_condition or danger_bar_once >= max_danger_bar_once:
danger_bar_once = 0
life_item_create = randrange(0, 100)
if bar_size < safe_bar_size_condition0:
max_bar_position = background.get_width() - safe_bar_width0 + background_left
bar_position = randrange(background_left, max_bar_position)
safe_bar_list.append([safe_bar0, [bar_position, Screen_Height + 40]])
if life_item_create < life_item_create_condition:
max_life_item_position = bar_position + safe_bar_width0 - 40
life_item_position = randrange(bar_position, max_life_item_position)
if life_item_position < round(Screen_Width / 2):
life_item_type = 0
else:
life_item_type = 1
life_item_list.append([[life_item[life_item_type]], [life_item_position, Screen_Height]])
elif bar_size < safe_bar_size_condition1:
max_bar_position = background.get_width() - safe_bar_width1 + background_left
bar_position = randrange(background_left, max_bar_position)
safe_bar_list.append([safe_bar1, [bar_position, Screen_Height + 40]])
if life_item_create < life_item_create_condition:
max_life_item_position = bar_position + safe_bar_width1 - 40
life_item_position = randrange(bar_position, max_life_item_position)
if life_item_position < round(Screen_Width / 2):
life_item_type = 0
else:
life_item_type = 1
life_item_list.append([[life_item[life_item_type]], [life_item_position, Screen_Height]])
else:
max_bar_position = background.get_width() - safe_bar_width2 + background_left
bar_position = randrange(background_left, max_bar_position)
safe_bar_list.append([safe_bar2, [bar_position, Screen_Height + 40]])
if life_item_create < life_item_create_condition:
max_life_item_position = bar_position + safe_bar_width2 - 40
life_item_position = randrange(bar_position, max_life_item_position)
if life_item_position < round(Screen_Width / 2):
life_item_type = 0
else:
life_item_type = 1
life_item_list.append([[life_item[life_item_type]], [life_item_position, Screen_Height]])
else:
danger_bar_once += 1
if bar_size < danger_bar_size_condition0:
max_bar_position = background.get_width() - danger_bar_width0 + background_left
bar_position = randrange(background_left, max_bar_position)
danger_bar_list.append([danger_bar0, [bar_position, Screen_Height + 40]])
elif bar_size < danger_bar_size_condition1:
max_bar_position = background.get_width() - danger_bar_width1 + background_left
bar_position = randrange(background_left, max_bar_position)
danger_bar_list.append([danger_bar1, [bar_position, Screen_Height + 40]])
else:
max_bar_position = background.get_width() - danger_bar_width2 + background_left
bar_position = randrange(background_left, max_bar_position)
danger_bar_list.append([danger_bar2, [bar_position, Screen_Height + 40]])
if cloud_enabled and cloud_create_condition0 == 0:
cloud_type = randrange(2, 4)
max_cloud_position = background.get_width() - cloud[2].get_width() + background_left
cloud_position = randrange(background_left - cloud[2].get_width(),
max_cloud_position + cloud[2].get_width())
cloud_list0.append([[cloud[cloud_type]], [cloud_position, Screen_Height]])
if cloud_enabled and cloud_create_condition1 == 0:
cloud_type = randrange(0, 2)
max_cloud_position = background.get_width() - cloud[0].get_width() + background_left
cloud_position = randrange(background_left - cloud[0].get_width(),
max_cloud_position + cloud[0].get_width())
cloud_list1.append([[cloud[cloud_type]], [cloud_position, Screen_Height]])
for i in cloud_list0:
screen.blit(i[0][0], (round(i[1][0]), round(i[1][1])))
i[1][1] -= cloud_move_step0_value
for i in cloud_list1:
screen.blit(i[0][0], (round(i[1][0]), round(i[1][1])))
i[1][1] -= cloud_move_step1_value
for i in safe_bar_list[::-1]:
screen.blit(i[0][safe_bar_animation_value], (round(i[1][0]), round(i[1][1])))
if no_ball_state and i[1][1] <= round(initial_ball_position * Screen_Height) and life_items_gain >= 0:
ball_list.append(ball[ball_animation_value])
ball_list.append([i[1][0] - 20 + round(i[0][0].get_width() / 2), i[1][1] - 40])
if pygame.mouse.get_focused(): pygame.mouse.set_pos(ball_list[1])
no_ball_state = False
danger_item_collided = False
i[1][1] -= bar_move_step_value
if life_items_gain < 0 and (not ball_crashed):
pygame.quit()
print("Game is over!\nYour score is " + str(
round(score * 0.1)) + ".\nThank you for playing this mini-game.\n\n\nPress ENTER to exit..." + '\n' * 8)
input()
break
for i in danger_bar_list:
screen.blit(i[0][danger_bar_animation_value], (round(i[1][0]), round(i[1][1])))
i[1][1] -= bar_move_step_value
if not no_ball_state:
if not ball_crashed: ball_list[0] = ball[ball_animation_value]
screen.blit(ball_list[0], (round(ball_list[1][0]), round(ball_list[1][1])))
for i in life_item_list:
screen.blit(i[0][0], (round(i[1][0]), round(i[1][1])))
i[1][1] -= bar_move_step_value
for i in building_list, building_window_surface_list:
for j in range(0, 2):
i[j][1][1] -= bar_move_step_value % i[j][2]
if i[j][1][1] <= -i[j][2]:
i[j][1][1] += i[j][2]
screen.blit(i[j][0], (round(i[j][1][0]), round(i[j][1][1])))
screen.blit(top_spike_surface, (background_left, 0))
screen.blit(bottom_spike_surface, (background_left, Screen_Height - 20))
screen.blit(score_display, (10, Screen_Height - 50))
screen.blit(font.render(str(round(score * 0.1)), True, (0, 0, 0)), (51, Screen_Height - 39))
screen.blit(font.render(str(round(score * 0.1)), True, (38, 255, 125)), (50, Screen_Height - 40))
screen.blit(life_display, (Screen_Width - 150, Screen_Height - 47))
screen.blit(font.render(str(life_items_gain), True, (0, 0, 0)), (Screen_Width - 99, Screen_Height - 39))
screen.blit(font.render(str(life_items_gain), True, (255, 127, 39)), (Screen_Width - 100, Screen_Height - 40))
pygame.display.update()
bar_create_condition += bar_move_step_value
if bar_create_condition > bar_create_frequency:
bar_create_condition = 0
safe_bar_animation_condition_value += 1
if safe_bar_animation_condition_value > safe_bar_animation_condition:
safe_bar_animation_value += 1
if safe_bar_animation_value > 1: safe_bar_animation_value = 0
safe_bar_animation_condition_value = 0
danger_bar_animation_condition_value += 1
if danger_bar_animation_condition_value > danger_bar_animation_condition:
danger_bar_animation_value += 1
if danger_bar_animation_value > 1: danger_bar_animation_value = 0
danger_bar_animation_condition_value = 0
cloud_create_condition0 += cloud_move_step0_value
if cloud_create_condition0 > cloud_create_frequency0:
cloud_create_condition0 = 0
cloud_create_condition1 += cloud_move_step1_value
if cloud_create_condition1 > cloud_create_frequency1:
cloud_create_condition1 = 0
if ball_crashed:
ball_crashed_delay_value += 1
if ball_crashed_delay_value > ball_crashed_delay:
ball_crashed_delay_value = 0
ball_crashed = False
ball_list.clear()
no_ball_state = True
if life_item_crashed:
life_item_crashed_delay_value += 1
if life_item_crashed_delay_value > life_item_crashed_delay:
life_item_crashed_delay_value = 0
life_item_crashed = False
i = 0
while i < len(life_item_list):
if life_item_list[i][0][0] == life_item[2] or life_item_list[i][0][0] == life_item[3]:
del life_item_list[i]
i -= 1
i += 1
for i in safe_bar_list, danger_bar_list, cloud_list0, cloud_list1, life_item_list:
j = 0
while j < len(i):
if i[j][1][1] + i[j][0][0].get_height() <= 0:
del i[j]
j -= 1
j += 1
pygame.event.pump()
keystate = pygame.key.get_pressed()
event = pygame.event.poll()
if not pygame.mouse.get_focused():
focus_has_lost = True
if Focus_Locked:
if not no_ball_state:
pygame.mouse.set_pos(ball_list[1])
else:
pygame.mouse.set_pos((0, 0))
if keystate[K_ESCAPE]:
pygame.quit()
break
if (not no_ball_state) and (not ball_crashed):
if ball_move_step_x_keyboard_enabled and keystate[K_LEFT]:
if ball_list[1][0] - ball_move_step_x_keyboard_value > background_left:
ball_list[1][0] -= ball_move_step_x_keyboard_value
ball_move_step_x_keyboard_state = -1
if ball_move_step_x_keyboard_state == -1: ball_move_step_x_keyboard_value += ball_move_step_x_keyboard_acceleration
ball_animation_value += 1
if ball_animation_value > 5: ball_animation_value = 0
else:
ball_list[1][0] = background_left
if pygame.mouse.get_focused(): pygame.mouse.set_pos(ball_list[1])
elif ball_move_step_x_keyboard_enabled and keystate[K_RIGHT]:
if ball_list[1][0] + ball_move_step_x_keyboard_value < background_left + background.get_width() - 40:
ball_list[1][0] += ball_move_step_x_keyboard_value
ball_move_step_x_keyboard_state = +1
if ball_move_step_x_keyboard_state == +1: ball_move_step_x_keyboard_value += ball_move_step_x_keyboard_acceleration
ball_animation_value -= 1
if ball_animation_value < 0: ball_animation_value = 5
else:
ball_list[1][0] = background_left + background.get_width() - 40
if pygame.mouse.get_focused(): pygame.mouse.set_pos(ball_list[1])
elif ball_move_step_x_mouse_enabled and event.type == MOUSEMOTION:
if Focus_Locked or (not focus_has_lost):
mouse_pos_x = pygame.mouse.get_pos()[0]
if mouse_pos_x < ball_list[1][0]:
if mouse_pos_x >= background_left:
ball_list[1][0] = mouse_pos_x
ball_animation_value += 1
if ball_animation_value > 5: ball_animation_value = 0
else:
ball_list[1][0] = background_left
if pygame.mouse.get_focused(): pygame.mouse.set_pos(ball_list[1])
elif mouse_pos_x > ball_list[1][0]:
if mouse_pos_x <= background_left + background.get_width() - 40:
ball_list[1][0] = mouse_pos_x
ball_animation_value -= 1
if ball_animation_value < 0: ball_animation_value = 5
else:
ball_list[1][0] = background_left + background.get_width() - 40
if pygame.mouse.get_focused(): pygame.mouse.set_pos(ball_list[1])
else:
if pygame.mouse.get_focused(): pygame.mouse.set_pos(ball_list[1])
focus_has_lost = False
else:
ball_move_step_x_keyboard_state = 0
ball_move_step_x_keyboard_value = ball_move_step_x_keyboard
if (not no_ball_state) or ball_crashed:
ball_collided = False
for i in safe_bar_list:
if i[1][1] + bar_move_step_value > 60 and i[1][1] - 40 < ball_list[1][
1] + ball_move_step_y_value and ceil(i[1][1] - 40 + bar_move_step_value - ball_list[1][1]) >= 0 and \
i[1][0] - 20 <= ball_list[1][0] < i[1][0] + i[0][0].get_width() - 20:
ball_list[1][1] = i[1][1] - 40
ball_collided = True
ball_move_step_y_value = ball_move_step_y
for j in life_item_list:
if | |
<reponame>Stratoscale/zadarapy
# Copyright 2019 Zadara Storage, Inc.
# Originally authored by <NAME> - https://github.com/jwbrown77
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zadarapy.validators import verify_snapshot_id, verify_boolean, \
verify_field, verify_start_limit, verify_cg_id, verify_policy_id, \
verify_volume_id, verify_snaprule_id, verify_remote_vpsa_id, \
verify_mirror_id
def get_all_mirrors(session, start=None, limit=None, return_type=None,
**kwargs):
"""
Retrieves details for all mirror jobs configured on the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying mirror jobs from. Optional.
:type: limit: int
:param limit: The maximum number of mirror jobs to return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/mirror_jobs.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_mirror(session, mirror_id, return_type=None, **kwargs):
"""
Retrieves details for the specified mirror job configured on the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type mirror_id: str
:param mirror_id: The mirror job 'job_name' value as returned by
create_volume_mirror. For example: 'srcjvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_mirror_id(mirror_id)
path = '/api/mirror_jobs/{0}.json'.format(mirror_id)
return session.get_api(path=path, return_type=return_type, **kwargs)
def pause_mirror(session, mirror_id, return_type=None, **kwargs):
"""
Pauses a mirror job. This should only be initiated from the source VPSA.
e.g. the mirror job ID should start with "srcjvpsa-".
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type mirror_id: str
:param mirror_id: The mirror job 'job_name' value as returned by
get_all_mirrors. For example: 'srcjvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_mirror_id(mirror_id)
path = '/api/mirror_jobs/{0}/pause.json'.format(mirror_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def resume_paused_mirror(session, mirror_id, return_type=None, **kwargs):
"""
Resumes a paused mirror job. This should only be initiated from the
source VPSA. e.g. the mirror job ID should start with "srcjvpsa-".
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type mirror_id: str
:param mirror_id: The mirror job 'job_name' value as returned by
get_all_mirrors. For example: 'srcjvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_mirror_id(mirror_id)
path = '/api/mirror_jobs/{0}/continue.json'.format(mirror_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def break_mirror(session, mirror_id, return_type=None, **kwargs):
"""
Breaks a mirror job. This can be initiated from either the source or
destination VPSA. A broken mirror can be reconnected if all appropriate
snapshots still exist on both systems to resume the relationship - this
possibility can be ascertained by calling get_suggested_mirrors and
issuing a resume_broken_mirror using the right information.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type mirror_id: str
:param mirror_id: The mirror job 'job_name' value as returned by
get_all_mirrors. For example: 'srcjvpsa-00000001' or
'dstjvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_mirror_id(mirror_id)
path = '/api/mirror_jobs/{0}/break.json'.format(mirror_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def get_all_remote_vpsas(session, start=None, limit=None, return_type=None,
**kwargs):
"""
Retrieves details for all remote VPSAs with which this VPSA has a
relationship.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying remote VPSAs from. Optional.
:type: limit: int
:param limit: The maximum number of remote VPSAs to return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/remote_vpsas.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_remote_vpsa(session, rvpsa_id, return_type=None, **kwargs):
"""
Retrieves details for a single remote VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type rvpsa_id: str
:param rvpsa_id: The remote VPSA 'name' value as returned by
get_all_remote_vpsas. For example: 'rvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_remote_vpsa_id(rvpsa_id)
path = '/api/remote_vpsas/{0}.json'.format(rvpsa_id)
return session.get_api(path=path, return_type=return_type, **kwargs)
def discover_remote_vpsa(session, ip_address, username, password, public,
return_type=None, **kwargs):
"""
Establishes a relationship with a remote VPSA for the purposes of
mirroring volume snapshots.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ip_address: str
:param ip_address: The IP address of the remote VPSA. Required.
:type username: str
:param username: The login username for the administrative user of the
remote VPSA (same as what's used to log into that VPSA's GUI).
Required.
:type password: str
:param password: The login password for the administrative user of the
remote VPSA (same as what's used to log into that VPSA's GUI).
Required.
:type public: str
:param public: If set to 'YES', establishing the relationship and future
mirror jobs will occur over the VPSA's public IP/interface (The VPSA
must have a valid public IP and setup). If 'NO', the relationship and
mirror jobs will occur using the same IP as connecting to the storage
- in this case the VPSA must be able to route to the remote VPSA in
question via the VPSA's defined default gateway. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
username = verify_field(username, "VPSA username")
password = verify_field(password, "<PASSWORD>")
public = verify_boolean(public, "public")
body_values = {'user': username, 'password': password, 'ip': ip_address,
'isPublic': public}
path = '/api/remote_vpsas/discover.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def refresh_remote_vpsa(session, rvpsa_id, return_type=None, **kwargs):
"""
Refreshes information about a remote VPSA - such as discovering new pools
and updating how much free space remote pools have.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type rvpsa_id: str
:param rvpsa_id: The remote VPSA 'name' value as returned by
get_all_remote_vpsas. For example: 'rvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: | |
"icon": "account-supervisor-circle",
"path": self.current_path
+ ais_global.G_DRIVE_SHARED_WITH_ME,
}
)
break
for item in self.folders_json:
if self.current_path.endswith(":"):
path = self.current_path + item["Path"]
else:
path = self.current_path + "/" + item["Path"]
l_icon = "file-outline"
if item["IsDir"]:
l_icon = "folder-google-drive"
else:
if "MimeType" in item:
if item["MimeType"].startswith("text/"):
l_icon = "file-document-outline"
elif item["MimeType"].startswith("audio/"):
l_icon = "music-circle"
elif item["MimeType"].startswith("video/"):
l_icon = "file-video-outline"
items_info.append({"name": item["Path"][:50], "icon": l_icon, "path": path})
self.hass.states.set(
"sensor.ais_drives", self.current_path, {"files": items_info}
)
if say:
jlen = len(self.folders_json)
self.say(get_pozycji_variety(jlen))
# call from bookmarks now (since we have files from folder) we need to play the file
if self.file_path is not None:
self.hass.services.call(
"ais_drives_service",
"browse_path",
{"path": self.file_path, "seek_position": self.seek_position},
)
def get_icon(self, entry):
if entry.is_dir():
return "folder"
elif entry.name.lower().endswith(".txt"):
return "file-document-outline"
elif entry.name.lower().endswith((".mp3", ".wav", ".mp4", ".flv")):
return "music-circle"
def browse_path(self, call):
"""Load subfolders for the selected folder."""
if "path" not in call.data:
_LOGGER.error("No path")
return
self.file_path = None
self.seek_position = 0
say = True
if "file_path" in call.data:
self.file_path = call.data["file_path"]
say = False
if "seek_position" in call.data:
self.seek_position = call.data["seek_position"]
say = False
self._browse_path(call.data["path"], say)
def _browse_path(self, path, say):
if len(path.strip()) == 0:
self.say("Wybierz pozycję do przeglądania")
if path == "..":
# check if this is cloud drive
if self.is_rclone_path(self.current_path):
if self.current_path == G_CLOUD_PREFIX:
self.current_path = G_LOCAL_FILES_ROOT
elif self.current_path == G_CLOUD_PREFIX + self.rclone_remote_from_path(
self.current_path
):
self.current_path = G_CLOUD_PREFIX
elif self.current_path.count("/") == 0:
k = self.current_path.rfind(":")
self.current_path = self.current_path[: k + 1]
else:
if self.rclone_is_dir(self.current_path):
k = self.current_path.rfind("/")
self.current_path = self.current_path[:k]
else:
k = self.current_path.rfind("/")
self.current_path = self.current_path[:k]
if self.current_path.count("/") > 0:
k = self.current_path.rfind("/")
self.current_path = self.current_path[:k]
else:
k = self.current_path.rfind(":")
self.current_path = self.current_path[: k + 1]
# local drive
else:
if os.path.isfile(self.current_path):
k = self.current_path.rfind(
"/" + os.path.basename(self.current_path)
)
self.current_path = self.current_path[:k]
k = self.current_path.rfind("/" + os.path.basename(self.current_path))
self.current_path = self.current_path[:k]
else:
self.current_path = path
if self.current_path.startswith(G_CLOUD_PREFIX):
self.rclone_browse(self.current_path, say)
self.selected_item_idx = 0
return
if self.current_path == G_LOCAL_FILES_ROOT:
self.display_root_items(say)
self.selected_item_idx = 0
return
if os.path.isdir(self.current_path):
self.display_current_items(say)
self.selected_item_idx = 0
return
else:
# file was selected, check mimetype and play if possible
self.play_file(say)
def is_rclone_path(self, path):
if path.startswith(G_CLOUD_PREFIX):
return True
return False
def rclone_remote_from_path(self, path):
remote = path.replace(G_CLOUD_PREFIX, "")
k = remote.find(":")
remote = remote[: k + 1]
return remote
def rclone_fix_permissions(self):
command = 'su -c "chmod -R 777 /sdcard/rclone"'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
# process.wait()
def rclone_append_listremotes(self):
remotes = rclone_get_remotes_long()
self.display_current_remotes(remotes)
if len(remotes) == 0:
self.say(
"Nie masz żadnych dysków zdalnych. "
"Dodaj połączenie do dysku zdalnego za pomocą konfiguratora w aplikacji."
)
else:
self.say(
"Mamy "
+ get_pozycji_variety(len(remotes))
+ " Wybierz dysk który mam przeglądać."
)
def rclone_browse_folder(self, path, say):
if ais_global.G_DRIVE_SHARED_WITH_ME in path:
if ais_global.G_DRIVE_SHARED_WITH_ME + "/" in path:
path = path.replace(ais_global.G_DRIVE_SHARED_WITH_ME + "/", "")
else:
path = path.replace(ais_global.G_DRIVE_SHARED_WITH_ME, "")
rclone_cmd = [
"rclone",
"lsjson",
path,
G_RCLONE_CONF,
"--drive-formats=txt",
"--drive-shared-with-me",
]
else:
rclone_cmd = [
"rclone",
"lsjson",
path,
G_RCLONE_CONF,
"--drive-formats=txt",
]
proc = subprocess.run(
rclone_cmd, encoding="utf-8", stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# will wait for the process to complete and then we are going to return the output
if "" != proc.stderr:
self.say("Nie można pobrać zawartości folderu " + path + " " + proc.stderr)
else:
self.folders_json = json.loads(proc.stdout)
self.display_current_remote_items(say)
def rclone_copy_and_read(self, path, item_path):
# clear .temp files
files = os.listdir(G_LOCAL_FILES_ROOT + "/.temp/")
for file in files:
os.remove(os.path.join(G_LOCAL_FILES_ROOT + "/.temp/", file))
if ais_global.G_DRIVE_SHARED_WITH_ME in path:
rclone_cmd = [
"rclone",
"copy",
path.replace(ais_global.G_DRIVE_SHARED_WITH_ME, ""),
G_LOCAL_FILES_ROOT + "/.temp/",
G_RCLONE_CONF,
"--drive-formats=txt",
"--drive-shared-with-me",
]
else:
rclone_cmd = [
"rclone",
"copy",
path,
G_LOCAL_FILES_ROOT + "/.temp/",
G_RCLONE_CONF,
"--drive-formats=txt",
]
proc = subprocess.run(
rclone_cmd, encoding="utf-8", stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if "" != proc.stderr:
self.say("Nie udało się pobrać pliku " + proc.stderr)
else:
try:
with open(G_LOCAL_FILES_ROOT + "/.temp/" + item_path) as file:
self.say(file.read())
except Exception as e:
self.say("Nie udało się otworzyć pliku ")
def rclone_play_the_stream(self):
_audio_info = {
"NAME": os.path.basename(self.rclone_url_to_stream),
"MEDIA_SOURCE": ais_global.G_AN_LOCAL,
"ALBUM_NAME": os.path.basename(os.path.dirname(self.current_path)),
"media_content_id": self.rclone_url_to_stream,
"lookup_url": self.current_path,
"media_position_ms": self.seek_position,
}
_audio_info = json.dumps(_audio_info)
# to set the stream image and title
self.hass.services.call(
"media_player",
"play_media",
{
"entity_id": "media_player.wbudowany_glosnik",
"media_content_type": "ais_content_info",
"media_content_id": _audio_info,
},
)
# seek position
self.seek_position = 0
def check_kill_process(self, pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def rclone_serve_and_play_the_stream(self, path, item_path):
# serve and play
if ais_global.G_DRIVE_SHARED_WITH_ME in path:
path = path.replace(ais_global.G_DRIVE_SHARED_WITH_ME, "")
rclone_cmd = (
"rclone serve http '" + path + "' " + G_RCLONE_CONF + " --addr=:8080"
)
self.rclone_url_to_stream = G_RCLONE_URL_TO_STREAM + str(item_path)
import pexpect
try:
if self.rclone_pexpect_stream is not None:
self.rclone_pexpect_stream.kill(0)
self.check_kill_process("rclone")
self.rclone_pexpect_stream = pexpect.spawn(rclone_cmd)
# Current remotes:
self.rclone_pexpect_stream.expect(
["Serving on", "Failed to", pexpect.EOF], timeout=10
)
_LOGGER.info(str(self.rclone_pexpect_stream.before, "utf-8"))
if self.rclone_pexpect_stream == 0:
_LOGGER.info("Serving stream")
elif self.rclone_pexpect_stream == 1:
_LOGGER.info("Problem, kill rclone")
self.rclone_pexpect_stream.kill(0)
self.check_kill_process("rclone")
elif self.rclone_pexpect_stream == 2:
_LOGGER.info("EOF")
self.rclone_play_the_stream()
except Exception as e:
_LOGGER.info("Rclone: " + str(e))
def rclone_is_dir(self, path):
# check if path is dir or file
for item in self.folders_json:
if path.endswith(item["Path"]):
return item["IsDir"]
return True
def rclone_browse(self, path, say):
if path == G_CLOUD_PREFIX:
self.rclone_append_listremotes()
return
is_dir = None
mime_type = ""
item_name = ""
item_path = ""
# check what was selected file or folder
for item in self.folders_json:
# now check if item is a dictionary
if path.endswith(item["Path"]):
item_path = item["Path"]
is_dir = item["IsDir"]
item_name = item["Name"]
if "MimeType" in item:
mime_type = item["MimeType"]
break
if is_dir is None:
is_dir = True
if is_dir:
# browse the cloud drive
path = path.replace(G_CLOUD_PREFIX, "", 1)
self.say("Pobieram")
self.rclone_browse_folder(path, say)
else:
self.dispalay_current_path()
# file was selected, check the MimeType
# "MimeType":"audio/mp3" and "text/plain" are supported
path = path.replace(G_CLOUD_PREFIX, "")
if mime_type is None:
mime_type = ""
if (
mime_type.startswith("audio/")
or mime_type.startswith("video/")
or mime_type.startswith("application/")
):
# StreamTask().execute(fileItem);
self.say("Pobieram i odtwarzam: " + str(item_name))
self.rclone_serve_and_play_the_stream(path, item_path)
elif mime_type.startswith("text/"):
self.say("Pobieram i czytam: " + str(item_name))
self.rclone_copy_and_read(path, item_path)
else:
self.say("Jeszcze nie obsługuję plików typu: " + str(mime_type))
def sync_locations(self, call):
if "source_path" not in call.data:
_LOGGER.error("No source_path")
return []
if "dest_path" not in call.data:
_LOGGER.error("No dest_path")
return []
if "say" in call.data:
say = call.data["say"]
else:
say = False
if say:
self.say(
"Synchronizuję lokalizację "
+ call.data["source_path"]
+ " z "
+ call.data["dest_path"]
+ " modyfikuję tylko "
+ call.data["source_path"]
)
rclone_cmd = [
"rclone",
"sync",
call.data["source_path"],
call.data["dest_path"],
"--transfers=1",
"--stats=0",
G_RCLONE_CONF,
]
proc = subprocess.run(
rclone_cmd, encoding="utf-8", stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# will wait for the process to complete and then we are going to return the output
if "" != proc.stderr:
self.say("Błąd podczas synchronizacji: " + proc.stderr)
else:
self.say("Synchronizacja zakończona.")
def play_next(self, call):
state = self.hass.states.get("sensor.ais_drives")
attr = state.attributes
files = attr.get("files", [])
l_idx = 0
i = 0
for f in files:
i = i + 1
if f["path"].replace(G_LOCAL_FILES_ROOT, "") == state.state:
l_idx = i
if l_idx == len(files):
l_idx = min(2, len(files))
self._browse_path(files[l_idx]["path"], True)
def play_prev(self, call):
state = self.hass.states.get("sensor.ais_drives")
attr = state.attributes
files = attr.get("files", [])
l_idx = 0
i = 0
for f in files:
i = i + 1
if f["path"].replace(G_LOCAL_FILES_ROOT, "") == state.state:
l_idx = i
if l_idx == 3:
l_idx = len(files) - 1
else:
l_idx = l_idx - 2
self._browse_path(files[l_idx]["path"], True)
def get_item_name(self, path):
path = path.rstrip(":")
if path.count("/") > 0:
name = path.split("/").pop()
else:
name = path.split(":").pop()
name = name.replace(":", "")
name = name.replace("-", " ")
return name
def remote_next_item(self, say):
state = self.hass.states.get("sensor.ais_drives")
attr = state.attributes
files = attr.get("files", [])
if len(state.state) == 0:
if self.selected_item_idx == len(files) - 1:
self.selected_item_idx = 0
else:
self.selected_item_idx = self.selected_item_idx + 1
else:
if len(files) == 2:
self.say("brak pozycji")
return
if self.selected_item_idx == len(files) - 1:
self.selected_item_idx = 2
else:
self.selected_item_idx = max(self.selected_item_idx + 1, 2)
if say:
name = self.get_item_name(files[self.selected_item_idx]["path"])
self.say(name)
def remote_prev_item(self, say):
state = self.hass.states.get("sensor.ais_drives")
attr = state.attributes
files = attr.get("files", [])
if len(state.state) == 0:
if self.selected_item_idx < 1:
self.selected_item_idx = len(files) - 1
else:
self.selected_item_idx = self.selected_item_idx - 1
else:
if len(files) == 2:
self.say("brak pozycji")
return
if self.selected_item_idx < 3:
self.selected_item_idx = len(files) - 1
else:
self.selected_item_idx = self.selected_item_idx - 1
if | |
<filename>plugins/son-mano-service-lifecycle-management/test/test_slm.py
# Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, 5GTANGO
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
#
# This work has been performed in the framework of the 5GTANGO project,
# funded by the European Commission under Grant number 761493 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the 5GTANGO
# partner consortium (www.5gtango.eu).
import unittest
import time
import json
import yaml
import threading
import logging
import uuid
import son_mano_slm.slm_helpers as tools
from unittest import mock
from multiprocessing import Process
from son_mano_slm.slm import ServiceLifecycleManager
from sonmanobase.messaging import ManoBrokerRequestResponseConnection
from collections import namedtuple
logging.basicConfig(level=logging.INFO)
logging.getLogger('amqp-storm').setLevel(logging.INFO)
LOG = logging.getLogger("son-mano-plugins:slm_test")
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
logging.getLogger("son-mano-base:plugin").setLevel(logging.INFO)
LOG.setLevel(logging.INFO)
class testSlmFunctionality(unittest.TestCase):
"""
Tests the tasks that the SLM should perform in the service
life cycle of the network services.
"""
slm_proc = None
uuid = '1'
corr_id = '1ba347d6-6210-4de7-9ca3-a383e50d0330'
########################
#SETUP
########################
def setUp(self):
def on_register_trigger(ch, method, properties, message):
return json.dumps({'status': 'OK', 'uuid': self.uuid})
#vnfcounter for when needed
self.vnfcounter = 0
#Generate a new corr_id for every test
self.corr_id = str(uuid.uuid4())
#a new SLM in another process for each test
self.slm_proc = ServiceLifecycleManager(start_running=False)
#We make a spy connection to listen to the different topics on the broker
self.manoconn_spy = ManoBrokerRequestResponseConnection('son-plugin.SonSpy')
#we need a connection to simulate messages from the gatekeeper
self.manoconn_gk = ManoBrokerRequestResponseConnection('son-plugin.SonGateKeeper')
#we need a connection to simulate messages from the infrastructure adaptor
self.manoconn_ia = ManoBrokerRequestResponseConnection('son-plugin.SonInfrastructureAdapter')
#Some threading events that can be used during the tests
self.wait_for_first_event = threading.Event()
self.wait_for_first_event.clear()
#The uuid that can be assigned to the plugin
self.uuid = '1'
def tearDown(self):
#Killing the slm
self.slm_proc.manoconn.stop_connection()
self.slm_proc.manoconn.stop_threads()
try:
del self.slm_proc
except:
pass
#Killing the connection with the broker
self.manoconn_spy.stop_connection()
self.manoconn_gk.stop_connection()
self.manoconn_ia.stop_connection()
self.manoconn_spy.stop_threads()
self.manoconn_gk.stop_threads()
self.manoconn_ia.stop_threads()
del self.manoconn_spy
del self.manoconn_gk
del self.manoconn_ia
del self.wait_for_first_event
########################
#GENERAL
########################
def createGkNewServiceRequestMessage(self, correctlyFormatted=True):
"""
This method helps creating messages for the service request packets.
If it needs to be wrongly formatted, the nsd part of the request is
removed.
"""
path_descriptors = '/plugins/son-mano-service-lifecycle-management/test/test_descriptors/'
nsd_descriptor = open(path_descriptors + 'sonata-demo.yml', 'r')
vnfd1_descriptor = open(path_descriptors + 'firewall-vnfd.yml', 'r')
vnfd2_descriptor = open(path_descriptors + 'iperf-vnfd.yml', 'r')
vnfd3_descriptor = open(path_descriptors + 'tcpdump-vnfd.yml', 'r')
#import the nsd and vnfds that form the service
if correctlyFormatted:
service_request = {'NSD': yaml.load(nsd_descriptor), 'VNFD1': yaml.load(vnfd1_descriptor), 'VNFD2': yaml.load(vnfd2_descriptor), 'VNFD3': yaml.load(vnfd3_descriptor)}
else:
service_request = {'VNFD1': yaml.load(vnfd1_descriptor), 'VNFD2': yaml.load(vnfd2_descriptor), 'VNFD3': yaml.load(vnfd3_descriptor)}
return yaml.dump(service_request)
#Method that terminates the timer that waits for an event
def firstEventFinished(self):
self.wait_for_first_event.set()
#Method that starts a timer, waiting for an event
def waitForFirstEvent(self, timeout=5, msg="Event timed out."):
if not self.wait_for_first_event.wait(timeout):
self.assertEqual(True, False, msg=msg)
def dummy(self, ch, method, properties, message):
"""
Sometimes, we need a cbf for a async_call, without actually using it.
"""
return
#############################################################
#TEST1: test validate_deploy_request
#############################################################
def test_validate_deploy_request(self):
"""
The method validate_deploy_request is used to check whether the
received message that requests the deployment of a new service is
correctly formatted.
"""
#Setup
service_dict = {}
service_id = str(uuid.uuid4())
corr_id = str(uuid.uuid4())
service_dict[service_id] = {'original_corr_id':corr_id}
# #SUBTEST1: Check a correctly formatted message
# message = self.createGkNewServiceRequestMessage()
# service_dict[service_id]['payload'] = yaml.load(message)
# self.slm_proc.set_services(service_dict)
# self.slm_proc.validate_deploy_request(service_id)
# result = self.slm_proc.get_services()
# self.assertEqual({'status': result[service_id]['status'],
# 'error': result[service_id]['error']},
# {'status': 'INSTANTIATING', 'error': None},
# msg="outcome and expected result not equal SUBTEST1.")
#SUBTEST2: Check a message that is not a dictionary
message = "test message"
service_dict[service_id]['payload'] = message
self.slm_proc.set_services(service_dict)
self.slm_proc.validate_deploy_request(service_id)
result = self.slm_proc.get_services()
expected_message = "Request " + corr_id + ": payload is not a dict."
expected_response = {'status': 'ERROR', 'error': expected_message}
self.assertEqual({'status': result[service_id]['status'],
'error': result[service_id]['error']},
expected_response,
msg="outcome and expected result not equal SUBTEST2.")
#SUBTEST3: Check a message that contains no NSD
message = self.createGkNewServiceRequestMessage()
loaded_message = yaml.load(message)
del loaded_message['NSD']
service_dict[service_id]['payload'] = loaded_message
self.slm_proc.set_services(service_dict)
self.slm_proc.validate_deploy_request(service_id)
result = self.slm_proc.get_services()
expected_message = "Request " + corr_id + ": NSD is not a dict."
expected_response = {'status': 'ERROR', 'error': expected_message}
self.assertEqual({'status': result[service_id]['status'],
'error': result[service_id]['error']},
expected_response,
msg="outcome and expected result not equal SUBTEST3.")
#SUBTEST4: The number of VNFDs must be the same as listed in the NSD
message = self.createGkNewServiceRequestMessage()
loaded_message = yaml.load(message)
loaded_message['NSD']['network_functions'].append({})
service_dict[service_id]['payload'] = loaded_message
self.slm_proc.set_services(service_dict)
self.slm_proc.validate_deploy_request(service_id)
result = self.slm_proc.get_services()
expected_message = "Request " + corr_id + ": # of VNFDs doesn't match NSD."
expected_response = {'status': 'ERROR', 'error': expected_message}
self.assertEqual({'status': result[service_id]['status'],
'error': result[service_id]['error']},
expected_response,
msg="outcome and expected result not equal SUBTEST4.")
#SUBTEST5: VNFDs can not be empty
message = self.createGkNewServiceRequestMessage()
loaded_message = yaml.load(message)
loaded_message['VNFD1'] = None
service_dict[service_id]['payload'] = loaded_message
self.slm_proc.set_services(service_dict)
self.slm_proc.validate_deploy_request(service_id)
result = self.slm_proc.get_services()
expected_message = "Request " + corr_id + ": empty VNFD."
expected_response = {'status': 'ERROR', 'error': expected_message}
self.assertEqual({'status': result[service_id]['status'],
'error': result[service_id]['error']},
expected_response,
msg="outcome and expected result not equal SUBTEST5.")
###########################################################
#TEST2: Test start_next_task
###########################################################
def test_start_next_task(self):
"""
This method tests the start_next_task method
"""
#Setup
service_dict = {}
service_id = str(uuid.uuid4())
corr_id = str(uuid.uuid4())
orig_corr_id = str(uuid.uuid4())
service_dict[service_id] = {'corr_id':corr_id,
'original_corr_id': orig_corr_id,
'pause_chain': True,
'kill_chain': False}
#SUBTEST1: Check if next task is correctly called
message = self.createGkNewServiceRequestMessage()
#Add a task to the list
task_list = ['validate_deploy_request']
#Create the ledger
service_dict[service_id]['schedule'] = task_list
service_dict[service_id]['payload'] = yaml.load(message)
#Run the method
self.slm_proc.set_services(service_dict)
self.slm_proc.start_next_task(service_id)
#wait for the task to finish
time.sleep(0.1)
result = self.slm_proc.get_services()
#Check result
generated_response = {'status': result[service_id]['status'],
'error': result[service_id]['error']}
expected_response = {'status': 'INSTANTIATING', 'error': None}
self.assertEqual(generated_response,
expected_response,
msg="outcome and expected result not equal SUBTEST1.")
#Setup
service_dict = {}
service_id = str(uuid.uuid4())
corr_id = str(uuid.uuid4())
orig_corr_id = str(uuid.uuid4())
service_dict[service_id] = {'corr_id':corr_id,
'original_corr_id': orig_corr_id,
'pause_chain': False,
'kill_chain': False}
#SUBTEST2: Check behavior if there is no next task
message = self.createGkNewServiceRequestMessage()
#Add a task to the list
task_list = []
#Create the ledger
service_dict[service_id]['schedule'] = task_list
service_dict[service_id]['payload'] = yaml.load(message)
#Run the method
self.slm_proc.set_services(service_dict)
self.slm_proc.start_next_task(service_id)
#wait for the task to finish
time.sleep(0.1)
result = self.slm_proc.get_services()
#Check result: if successful, service_id will not be a key in result
self.assertFalse(service_id in result.keys(),
msg="key is part of ledger in SUBTEST2.")
# ###############################################################
# #TEST3: Test service_instance_create
# ###############################################################
# def test_service_instance_create(self):
# """
# This method tests the service_instance_create method of the SLM
# """
# #Setup
# message = self.createGkNewServiceRequestMessage()
# corr_id = str(uuid.uuid4())
# topic = "service.instances.create"
# prop_dict = {'reply_to': topic,
# 'correlation_id': corr_id,
# 'app_id': "Gatekeeper"}
# properties = namedtuple('properties', prop_dict.keys())(*prop_dict.values())
# schedule = self.slm_proc.service_instance_create('foo',
# 'bar',
# properties,
# message)
# #Check result: since we don't know how many of the tasks
# #were completed by the time we got the result, we only check
# #the final elements in the tasklist
# #The last 7 elements from the generated result
# generated_result = schedule[-7:]
# #The expected last 7 elements in the list
# expected_result = ['SLM_mapping', 'ia_prepare', 'vnf_deploy',
# 'vnf_chain', 'wan_configure',
# 'instruct_monitoring', 'inform_gk']
# self.assertEqual(generated_result,
# expected_result,
# msg='lists are not equal')
###############################################################
#TEST4: Test resp_topo
###############################################################
# def test_resp_topo(self):
# """
# This method tests the resp_topo method.
# """
# #Setup
# #Create topology message
# first = {'vim_uuid':str(uuid.uuid4()), 'memory_used':5, 'memory_total':12, 'core_used':4, 'core_total':6}
# second = {'vim_uuid':str(uuid.uuid4()), 'memory_used':3, 'memory_total':5, 'core_used':4, 'core_total':5}
# third = {'vim_uuid':str(uuid.uuid4()), 'memory_used':6, 'memory_total':7, 'core_used':2, 'core_total':12}
# topology_message = [first, second, third]
# payload = yaml.dump(topology_message)
# #Create ledger
# service_dict = {}
# service_id = str(uuid.uuid4())
# corr_id = str(uuid.uuid4())
# service_dict[service_id] = {'act_corr_id':corr_id,
# 'infrastructure': {'topology':None},
# 'schedule': ['get_ledger'],
# 'original_corr_id':corr_id,
# 'pause_chain': True,
# 'kill_chain': False}
# self.slm_proc.set_services(service_dict)
# #Create properties
# topic = "infrastructure.management.compute.list"
# prop_dict = {'reply_to': topic,
# 'correlation_id': corr_id,
# 'app_id': 'InfrastructureAdaptor'}
# properties = namedtuple('properties', prop_dict.keys())(*prop_dict.values())
# #Run method
# self.slm_proc.resp_topo('foo', 'bar', properties, payload)
# #Check result
# | |
<filename>azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class JobOperations(object):
"""JobOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_statistics(
self, account_name, job_identity, custom_headers=None, raw=False, **operation_config):
"""Gets statistics of the specified job.
:param account_name: The Azure Data Lake Analytics account to execute
job operations on.
:type account_name: str
:param job_identity: Job Information ID.
:type job_identity: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobStatistics
<azure.mgmt.datalake.analytics.job.models.JobStatistics>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Jobs/{jobIdentity}/GetStatistics'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True),
'jobIdentity': self._serialize.url("job_identity", job_identity, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobStatistics', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_debug_data_path(
self, account_name, job_identity, custom_headers=None, raw=False, **operation_config):
"""Gets the job debug data information specified by the job ID.
:param account_name: The Azure Data Lake Analytics account to execute
job operations on.
:type account_name: str
:param job_identity: JobInfo ID.
:type job_identity: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobDataPath
<azure.mgmt.datalake.analytics.job.models.JobDataPath>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Jobs/{jobIdentity}/GetDebugDataPath'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True),
'jobIdentity': self._serialize.url("job_identity", job_identity, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobDataPath', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def build(
self, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Builds (compiles) the specified job in the specified Data Lake
Analytics account for job correctness and validation.
:param account_name: The Azure Data Lake Analytics account to execute
job operations on.
:type account_name: str
:param parameters: The parameters to build a job.
:type parameters: :class:`JobInformation
<azure.mgmt.datalake.analytics.job.models.JobInformation>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobInformation
<azure.mgmt.datalake.analytics.job.models.JobInformation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/BuildJob'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'JobInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel(
self, account_name, job_identity, custom_headers=None, raw=False, **operation_config):
"""Cancels the running job specified by the job ID.
:param account_name: The Azure Data Lake Analytics account to execute
job operations on.
:type account_name: str
:param job_identity: JobInfo ID to cancel.
:type job_identity: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Jobs/{jobIdentity}/CancelJob'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True),
'jobIdentity': self._serialize.url("job_identity", job_identity, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, account_name, job_identity, custom_headers=None, raw=False, **operation_config):
"""Gets the job information for the specified job ID.
:param account_name: The Azure Data Lake Analytics account to execute
job operations on.
:type account_name: str
:param job_identity: JobInfo ID.
:type job_identity: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobInformation
<azure.mgmt.datalake.analytics.job.models.JobInformation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Jobs/{jobIdentity}'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True),
'jobIdentity': self._serialize.url("job_identity", job_identity, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, account_name, job_identity, parameters, custom_headers=None, raw=False, **operation_config):
"""Submits a job to the specified Data Lake Analytics account.
:param account_name: The Azure Data Lake Analytics account to execute
job operations on.
:type account_name: str
:param job_identity: The job ID (a GUID) for the job being submitted.
:type job_identity: str
:param parameters: The parameters to submit a job.
:type parameters: :class:`JobInformation
<azure.mgmt.datalake.analytics.job.models.JobInformation>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobInformation
<azure.mgmt.datalake.analytics.job.models.JobInformation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Jobs/{jobIdentity}'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaJobDnsSuffix': self._serialize.url("self.config.adla_job_dns_suffix", self.config.adla_job_dns_suffix, 'str', skip_quote=True),
'jobIdentity': self._serialize.url("job_identity", job_identity, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'JobInformation')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = | |
a in [ax, ratio_ax]:
a.set_xlim(0, 80)
auto_ticks(a, 'x', nbins=5, minor=2)
if ratio_ax.is_last_row():
ratio_ax.set_xlabel('Centrality %')
ax.set_ylim(plot['ylim'])
ax.set_ylabel(plot['ylabel'])
if plot.get('legend'):
ax.legend(
[handles[t][s] for t in ['model', 'expt'] for s in systems],
[fmt.format(parse_system(s)[1]/1000)
for fmt in ['', '{} TeV'] for s in systems],
ncol=2, loc='upper left', bbox_to_anchor=(0, .94),
columnspacing=0, handletextpad=0
)
ax.text(
.5, 1 if ax.is_first_row() else .97, plot['title'],
transform=ax.transAxes, ha='center', va='top',
size=plt.rcParams['axes.labelsize']
)
ratio_ax.axhline(
1,
linewidth=plt.rcParams['ytick.major.width'], color='0.5',
zorder=-100
)
ratio_ax.axhspan(.9, 1.1, color='0.93', zorder=-200)
ratio_ax.set_ylim(.85, 1.15)
ratio_ax.set_ylabel('Ratio')
ratio_ax.text(
ratio_ax.get_xlim()[1], .9, '±10%',
color='.5', zorder=-50,
ha='right', va='bottom',
size=plt.rcParams['xtick.labelsize']
)
set_tight(fig)
@plot
def observables_expt_only():
"""
Observables plots of experimental data only.
"""
plots = _observables_plots()
ylim = {
'Yields': (2, 1e5),
'Flow cumulants': (0, .15),
'Mean $p_T$': (0, 1.7),
'Mean $p_T$ fluctuations': (0, .045),
}
for n, p in enumerate(plots):
p['ylim'] = ylim[p['title']]
if p['title'] == 'Flow cumulants':
move_index = n
p.update(legend=True)
plots.insert(1, plots.pop(move_index))
ncols = int(len(plots)/2)
fig, axes = plt.subplots(
nrows=2, ncols=ncols,
figsize=figsize(1.1, aspect=1.5/ncols),
gridspec_kw=dict(
height_ratios=[p.get('height_ratio', 1) for p in plots[::ncols]]
)
)
for plot, ax in zip(plots, axes.flat):
labels = {}
handles = {}
for system, (obs, subobs, opts) in itertools.product(
systems, plot['subplots']
):
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
yexp = dset['y']
yerr = dset['yerr']
yerrstat = yerr.get('stat')
yerrsys = yerr.get('sys', yerr.get('sum'))
scale = opts.get('scale')
if scale is not None:
yexp = yexp*scale
if yerrstat is not None:
yerrstat = yerrstat*scale
if yerrsys is not None:
yerrsys = yerrsys*scale
color = obs_color(obs, subobs)
fill_markers = {'PbPb2760': True, 'PbPb5020': False}[system]
c = darken(color, .15)
h = ax.errorbar(
x, yexp, yerr=yerrstat, fmt='o',
capsize=0, color=c,
mec=c, mfc=(c if fill_markers else '.9'),
mew=((.2 if fill_markers else .6) *
plt.rcParams['lines.linewidth']),
zorder=1000
)
if system not in handles:
handles[system] = h
ax.fill_between(
x, yexp - yerrsys, yexp + yerrsys,
facecolor='.9', zorder=-10,
)
if 'label' in opts and (obs, subobs) not in labels:
labels[obs, subobs] = ax.text(
x[-1] + 3, yexp[-1],
opts['label'],
color=darken(color), ha='left', va='center'
)
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
ax.set_xlim(0, 80)
auto_ticks(ax, 'x', nbins=5, minor=2)
if ax.is_last_row():
ax.set_xlabel('Centrality %')
ax.set_ylim(plot['ylim'])
ax.set_ylabel(plot['ylabel'])
if plot.get('legend'):
ax.legend(
*zip(*[(handles[s], format_system(s)) for s in systems]),
loc='upper left', bbox_to_anchor=(0, .94),
handletextpad=0
)
ax.text(
.5, 1 if ax.is_first_row() else .97, plot['title'],
transform=ax.transAxes, ha='center', va='top',
size=plt.rcParams['axes.labelsize']
)
set_tight(fig)
@plot
def find_map():
"""
Find the maximum a posteriori (MAP) point and compare emulator predictions
to experimental data.
"""
from scipy.optimize import minimize
chain = mcmc.Chain()
fixed_params = {
'trento_p': 0.,
'etas_min': .08,
'etas_hrg': .3,
'model_sys_err': .1,
}
opt_params = [k for k in chain.keys if k not in fixed_params]
def full_x(x):
x = dict(zip(opt_params, x), **fixed_params)
return [x[k] for k in chain.keys]
res = minimize(
lambda x: -chain.log_posterior(full_x(x))[0],
x0=np.median(chain.load(*opt_params, thin=1000), axis=0),
tol=1e-8,
bounds=[
(a + 1e-6*(b - a), b - 1e-6*(b - a))
for (a, b), k in zip(chain.range, chain.keys)
if k in opt_params
]
)
logging.debug('optimization result:\n%s', res)
width = max(map(len, chain.keys)) + 2
logging.info(
'MAP params:\n%s',
'\n'.join(
k.ljust(width) + str(x) for k, x in zip(chain.keys, full_x(res.x))
)
)
pred = chain._predict(np.atleast_2d(full_x(res.x)))
plots = _observables_plots()
fig, axes = plt.subplots(
nrows=2*len(plots), ncols=len(systems),
figsize=figsize(1.1, aspect=1.7),
gridspec_kw=dict(
height_ratios=list(itertools.chain.from_iterable(
(p.get('height_ratio', 1), .4) for p in plots
))
)
)
for (plot, system), ax, ratio_ax in zip(
itertools.product(plots, systems), axes[::2].flat, axes[1::2].flat
):
for obs, subobs, opts in plot['subplots']:
color = obs_color(obs, subobs)
scale = opts.get('scale')
x = model.data[system][obs][subobs]['x']
y = pred[system][obs][subobs][0]
if scale is not None:
y = y*scale
ax.plot(x, y, color=color)
if 'label' in opts:
ax.text(
x[-1] + 3, y[-1],
opts['label'],
color=darken(color), ha='left', va='center'
)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
yexp = dset['y']
yerr = dset['yerr']
yerrstat = yerr.get('stat')
yerrsys = yerr.get('sys', yerr.get('sum'))
if scale is not None:
yexp = yexp*scale
if yerrstat is not None:
yerrstat = yerrstat*scale
if yerrsys is not None:
yerrsys = yerrsys*scale
ax.errorbar(
x, yexp, yerr=yerrstat, fmt='o',
capsize=0, color='.25', zorder=1000
)
ax.fill_between(
x, yexp - yerrsys, yexp + yerrsys,
color='.9', zorder=-10
)
ratio_ax.plot(x, y/yexp, color=color)
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
for a in [ax, ratio_ax]:
a.set_xlim(0, 80)
auto_ticks(a, 'x', nbins=5, minor=2)
ax.set_xticklabels([])
ax.set_ylim(plot['ylim'])
if ax.is_first_row():
ax.set_title(format_system(system))
elif ax.is_last_row():
ax.set_xlabel('Centrality %')
if ax.is_first_col():
ax.set_ylabel(plot['ylabel'])
if ax.is_last_col():
ax.text(
1.02, .5, plot['title'],
transform=ax.transAxes, ha='left', va='center',
size=plt.rcParams['axes.labelsize'], rotation=-90
)
ratio_ax.axhline(1, lw=.5, color='0.5', zorder=-100)
ratio_ax.axhspan(0.9, 1.1, color='0.95', zorder=-200)
ratio_ax.set_ylim(0.8, 1.2)
ratio_ax.set_yticks(np.arange(80, 121, 20)/100)
ratio_ax.set_ylabel('Ratio')
set_tight(fig, rect=[0, 0, .97, 1])
def format_ci(samples, ci=.9):
"""
Compute the median and a credible interval for an array of samples and
return a TeX-formatted string.
"""
cil, cih = mcmc.credible_interval(samples, ci=ci)
median = np.median(samples)
ul = median - cil
uh = cih - median
# decide precision for formatting numbers
# this is NOT general but it works for the present data
if abs(median) < .05 or (uh + ul) < abs(median) < .5:
precision = 3
elif abs(median) < 5:
precision = 2
else:
precision = 1
fmt = str(precision).join(['{:#.', 'f}'])
return ''.join([
'$', fmt.format(median),
'_{-', fmt.format(ul), '}',
'^{+', fmt.format(uh), '}$'
])
def _posterior(
params=None, ignore=None,
scale=1, pad_subplots=-.1, rect_r=1, rect_t=.99,
cmap=None
):
"""
Triangle plot of posterior marginal and joint distributions.
"""
chain = mcmc.Chain()
if params is None and ignore is None:
params = set(chain.keys)
elif params is not None:
params = set(params)
elif ignore is not None:
params = set(chain.keys) - set(ignore)
keys, labels, ranges = map(list, zip(*(
i for i in zip(chain.keys, chain.labels, chain.range)
if i[0] in params
)))
ndim = len(params)
data = chain.load(*keys).T
cmap = plt.get_cmap(cmap)
cmap.set_bad('white')
line_color = cmap(.8)
fill_color = cmap(.5, alpha=.1)
fig, axes = plt.subplots(
nrows=ndim, ncols=ndim,
sharex='col', sharey='row',
figsize=figsize(.15*scale*ndim, aspect=1)
)
for samples, key, lim, ax in zip(data, keys, ranges, axes.diagonal()):
counts, edges = np.histogram(samples, bins=50, range=lim)
x = (edges[1:] + edges[:-1]) / 2
y = .85 * (lim[1] - lim[0]) * counts / counts.max() + lim[0]
# smooth histogram with monotonic cubic interpolation
interp = PchipInterpolator(x, y)
x = np.linspace(x[0], x[-1], 10*x.size)
y = interp(x)
ax.plot(x, y, linewidth=1, color=line_color)
ax.fill_between(x, lim[0], y, color=fill_color, zorder=-10)
ax.set_xlim(lim)
ax.set_ylim(lim)
if key == 'dmin3':
samples = samples**(1/3)
ax.annotate(
format_ci(samples), (.62, .92), xycoords='axes fraction',
ha='center', va='bottom', fontsize=fontsize['large']
)
for ny, nx in zip(*np.tril_indices_from(axes, k=-1)):
axes[ny][nx].hist2d(
data[nx], data[ny], bins=100,
range=(ranges[nx], ranges[ny]),
cmap=cmap, cmin=1
)
axes[nx][ny].set_axis_off()
for ax in axes.flat:
ax.tick_params(length=2/3*plt.rcParams['xtick.major.size'])
for key, label, axb, axl in zip(keys, labels, axes[-1], axes[:, 0]):
for axis in [axb.xaxis, axl.yaxis]:
axis.set_label_text(
label.replace(r'\ [', '$\n$['),
)
axis.set_tick_params(labelsize=fontsize['tiny'])
if key == 'dmin3':
ticks = [0., 1.2, 1.5, 1.7]
axis.set_ticklabels(list(map(str, ticks)))
axis.set_ticks([t**3 for t in ticks])
else:
axis.set_major_locator(ticker.LinearLocator(3))
if axis.axis_name == 'x' and any(
len(str(round(x, 5))) > 4 for x in axis.get_ticklocs()
):
for t in axis.get_ticklabels():
t.set_rotation(30)
axb.get_xticklabels()[0].set_horizontalalignment('left')
axb.get_xticklabels()[-1].set_horizontalalignment('right')
axl.get_yticklabels()[0].set_verticalalignment('bottom')
axl.get_yticklabels()[-1].set_verticalalignment('top')
set_tight(
fig, pad=0, w_pad=pad_subplots, h_pad=pad_subplots,
rect=(0, 0, rect_r, rect_t)
)
@plot
def posterior():
_posterior(ignore={'etas_hrg'})
@plot
def posterior_shear():
_posterior(
scale=1.35, pad_subplots=.1, rect_t=.97,
params={'etas_min', 'etas_slope', 'etas_crv'}
)
@plot
def posterior_bulk():
_posterior(
scale=1.35, pad_subplots=.1, rect_t=.97,
params={'zetas_max', 'zetas_width', 'zetas_t0'}
)
@plot
def posterior_p():
"""
Distribution of trento p parameter with annotations for other models.
"""
plt.figure(figsize=figsize(.8, .35))
ax = plt.axes()
data = mcmc.Chain().load('trento_p').ravel()
counts, edges = np.histogram(data, bins=50)
x = (edges[1:] + edges[:-1]) / 2
y = counts / counts.max()
interp = PchipInterpolator(x, y)
x = np.linspace(x[0], x[-1], 10*x.size)
y = interp(x)
ax.plot(x, y, color=plt.cm.Blues(0.8))
ax.fill_between(x, y, color=plt.cm.Blues(0.15), zorder=-10)
ax.set_xlabel('$p$')
for spine in ax.spines.values():
spine.set_visible(False)
for label, x, err in [
('KLN', -.67, .01),
('EKRT /\nIP-Glasma', 0, .1),
('Wounded\nnucleon', 1, None),
]:
args = ([x], [0], 'o') if err is None else ([x - err, x + err], [0, 0])
ax.plot(*args, lw=4, ms=4, color=offblack, alpha=.58, clip_on=False)
if label.startswith('EKRT'):
x -= .29
ax.text(x, .05, label, va='bottom', ha='center')
ax.text(.1, .8, format_ci(data))
ax.set_xticks(np.arange(-10, 11, 5)/10)
ax.set_xticks(np.arange(-75, 76, 50)/100, minor=True)
xm = 1.2
ax.set_xlim(-xm, xm)
ax.add_artist(
patches.FancyArrowPatch(
(-xm, 0), (xm, 0),
linewidth=plt.rcParams['axes.linewidth'],
arrowstyle=patches.ArrowStyle.CurveFilledAB(
head_length=3, head_width=1.5
),
facecolor=offblack, edgecolor=offblack,
clip_on=False, zorder=100
)
)
ax.set_yticks([])
ax.set_ylim(0, 1.01*y.max())
set_tight(pad=0)
def _region(ax, name, cmap=plt.cm.Blues, legend=False, title=False):
"""
Visual estimate (posterior median and credible region) of
temperature-dependent shear or bulk viscosity.
"""
var, keys, function, ymax = dict(
shear=(
'eta',
['min', 'slope', 'crv'],
lambda T, m, s, c: m + s*(T - Tc)*(T/Tc)**c,
| |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.generic import CreateView, UpdateView
from django.core.paginator import Paginator, EmptyPage
from django.views.decorators.http import require_http_methods
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.urls import reverse_lazy, reverse
from django.db.models import Q
from django.utils.decorators import method_decorator
from lazysignup.decorators import (
allow_lazy_user,
require_nonlazy_user,
require_lazy_user,
)
from lazysignup.models import LazyUser
from lazysignup.utils import is_lazy_user
from .forms import (
IssueFormCreate,
AddDeveloper,
IssueFormUpdate,
CreateUserForm,
AddComment,
)
from .models import *
from django.contrib.auth.mixins import UserPassesTestMixin
from .decorators import group_required, group_excluded
from django.contrib.auth.models import Group
def load_users(request):
project_pk = request.GET.get("project")
users = User.objects.filter(
Q(project__pk=project_pk) | Q(leader_project_set__pk=project_pk)
).distinct()
return render(
request,
"issue_tracker/hr/user_assigned_dropdown_list_options.html",
{"users": users},
)
@allow_lazy_user
def set_demo_user(request):
if is_lazy_user(request.user) and not request.user.groups.filter(
name__in=("developer", "leader")
):
# Adding to groups
my_group1 = Group.objects.get(name="leader")
my_group1.user_set.add(request.user)
my_group2 = Group.objects.get(name="developer")
my_group2.user_set.add(request.user)
# Creating demo projects
# (username="admin", is_superuser=True) stands for superadmin user
admin_user = User.objects.get(username="admin", is_superuser=True)
project1 = Project.objects.create(
name="Demo Project1",
description="This project is made only for demo purposes",
leader=request.user,
)
project1.developer.add(request.user)
project1.developer.add(admin_user)
project2 = Project.objects.create(
name="Demo Project2",
description="This project is made only for demo purposes",
leader=admin_user,
)
project2.developer.add(request.user)
project2.developer.add(admin_user)
return redirect("issue_tracker:main")
return redirect("issue_tracker:main")
@allow_lazy_user
def sign_in(request):
if request.user.is_authenticated and not is_lazy_user(request.user):
return redirect("issue_tracker:main")
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect("issue_tracker:main")
else:
messages.info(request, "Wrong password or username")
return redirect(request.path)
else:
return render(request, "issue_tracker/sign_in.html")
@allow_lazy_user
@login_required(login_url="issue_tracker:sign-in")
def main(request):
return render(request, "issue_tracker/index.html")
@allow_lazy_user
def sign_up(request):
if request.user.is_authenticated and not is_lazy_user(request.user):
return redirect("issue_tracker:main")
form = CreateUserForm()
if request.method == "POST":
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
return redirect("issue_tracker:sign-in")
context = {"form": form}
return render(request, "issue_tracker/sign_up.html", context)
@method_decorator(group_required("developer", "leader"), name="get")
class Add_comment(CreateView):
def get(self, request, *args, **kwargs):
projects = Project.objects.filter(
Q(leader__pk=self.request.user.pk) | Q(developer__pk=self.request.user.pk)
)
issue = Issue.objects.filter(pk=self.kwargs["pk"], project__in=projects).first()
if issue:
return super().get(request, *args, **kwargs)
else:
return HttpResponse("You have no access to this comment")
model = Comment
form_class = AddComment
template_name = "issue_tracker/add_comment.html"
success_url = reverse_lazy("issue_tracker:main")
def get_success_url(self):
comment_issue_pk = self.kwargs["pk"]
return reverse(
"issue_tracker:issue-details-comments", kwargs={"pk": comment_issue_pk}
)
def get_form_kwargs(self):
comment_issue_pk = self.kwargs["pk"]
kwargs = super(Add_comment, self).get_form_kwargs()
kwargs["request"] = self.request
kwargs["comment_issue_pk"] = comment_issue_pk
return kwargs
def get_context_data(self, **kwargs):
context = super(Add_comment, self).get_context_data(**kwargs)
context["issue"] = Issue.objects.get(pk=self.kwargs["pk"])
return context
@login_required(login_url="issue_tracker:sign-in")
@group_required("admin")
@require_http_methods(["GET"])
def delete_comment(request, pk):
comment = Comment.objects.filter(pk=pk).first()
if comment:
comment.delete()
return redirect("issue_tracker:issue-details-comments", pk=comment.issue.pk)
else:
return HttpResponse("This comment does not exist")
@login_required(login_url="issue_tracker:sign-in")
@group_required("leader", "admin")
@require_http_methods(["GET"])
def developer_application_deny(request, pk):
application = DeveloperApplication.objects.filter(pk=pk).first()
if request.user.groups.filter(name="admin").exists():
# For admin user
if application:
application.delete()
else:
return HttpResponse("This application does not exist")
return redirect("issue_tracker:manage-developers-applications-list")
elif request.user.groups.filter(name="leader").exists():
# For leader user
if application:
if application.project.leader.pk == request.user.pk:
application.delete()
else:
return HttpResponse("This application does not exist")
return redirect("issue_tracker:manage-developers-applications-list")
@login_required(login_url="issue_tracker:sign-in")
@group_required("leader", "admin")
@require_http_methods(["GET"])
def developer_application_accept(request, pk):
application = DeveloperApplication.objects.filter(pk=pk).first()
if request.user.groups.filter(name="admin").exists():
if application:
application.project.developer.add(application.applicant)
application.delete()
my_group = Group.objects.get(name="developer")
my_group.user_set.add(application.applicant)
else:
return HttpResponse("This application does not exist")
return redirect("issue_tracker:manage-developers-applications-list")
elif request.user.groups.filter(name="leader").exists():
if application:
if application.project.leader.pk == request.user.pk:
application.project.developer.add(application.applicant)
application.delete()
my_group = Group.objects.get(name="developer")
my_group.user_set.add(application.applicant)
else:
return HttpResponse("This application does not exist")
return redirect("issue_tracker:manage-developers-applications-list")
@login_required(login_url="issue_tracker:sign-in")
@group_required("leader", "admin")
@require_http_methods(["GET"])
def manage_developers_applications_list(request):
context = {}
if request.user.groups.filter(name__in=("admin",)):
applications = (
DeveloperApplication.objects.all()
.select_related("project", "applicant")
.order_by("pk")
)
elif request.user.groups.filter(name__in=("leader",)):
applications = (
DeveloperApplication.objects.filter(project__leader=request.user)
.select_related("project", "applicant")
.order_by("pk")
)
paginator = Paginator(applications, 3, allow_empty_first_page=True)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
if request.GET.get("search_query"):
search_query = request.GET.get("search_query")
context["search_query"] = str(search_query)
query = applications.filter(
Q(project__name__icontains=search_query)
| Q(applicant__username__icontains=search_query)
| Q(project__description__icontains=search_query)
).order_by("pk")
paginator = Paginator(query, 3, allow_empty_first_page=True)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context["page_obj"] = page_obj
context["applications"] = applications
return render(
request, "issue_tracker/manage_developers_applications_list.html", context
)
@login_required(login_url="issue_tracker:sign-in")
@group_required("admin")
@require_http_methods(["GET"])
def leader_application_deny(request, pk):
application = LeaderApplication.objects.filter(pk=pk).first()
if application:
application.delete()
else:
return HttpResponse("This application does not exist")
return redirect("issue_tracker:manage-leaders-applications-list")
@login_required(login_url="issue_tracker:sign-in")
@group_required("admin")
@require_http_methods(["GET"])
def leader_application_accept(request, pk):
application = LeaderApplication.objects.filter(pk=pk).first()
if application:
project_pk = application.project.pk
project = Project.objects.get(pk=application.project.pk)
if project.leader:
previous_leader = project.leader
Project.objects.filter(pk=project_pk).update(leader=application.applicant)
application.delete()
if previous_leader:
leader_group = Group.objects.get(name="leader")
# If previous_leader is not leader anymore, will be deleted from group
if not previous_leader.leader_project_set.all():
leader_group.user_set.remove(previous_leader)
my_group = Group.objects.get(name="leader")
my_group.user_set.add(application.applicant)
else:
return HttpResponse("This application does not exist")
return redirect("issue_tracker:manage-leaders-applications-list")
@login_required(login_url="issue_tracker:sign-in")
@group_required("admin")
@require_http_methods(["GET"])
def manage_leaders_applications_list(request):
context = {}
applications = (
LeaderApplication.objects.all().select_related("project", "applicant")
).order_by("pk")
paginator = Paginator(applications, 3, allow_empty_first_page=True)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
if request.GET.get("search_query"):
search_query = request.GET.get("search_query")
context["search_query"] = str(search_query)
query = applications.filter(
Q(project__name__icontains=search_query)
| Q(applicant__username__icontains=search_query)
| Q(project__description__icontains=search_query)
).order_by("pk")
paginator = Paginator(query, 3, allow_empty_first_page=True)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context["page_obj"] = page_obj
context["applications"] = applications
return render(
request, "issue_tracker/manage_leaders_applications_list.html", context
)
@login_required(login_url="issue_tracker:sign-in")
@group_excluded("admin")
@require_http_methods(["GET"])
def project_apply_developer(request, pk):
project = Project.objects.filter(pk=pk).first()
developer_pks = project.developer.values_list("pk", flat=True)
is_applied_already = DeveloperApplication.objects.filter(
project=project, applicant=request.user
).first()
if project and not (request.user.pk in developer_pks) and not is_applied_already:
DeveloperApplication.objects.create(applicant=request.user, project=project)
return redirect("issue_tracker:apply-project-list-all")
if is_applied_already:
return HttpResponse(
"You have already applied for being developer in this project."
)
return HttpResponse("You are developer in this project or project deos not exist.")
@login_required(login_url="issue_tracker:sign-in")
@group_excluded("admin")
@require_http_methods(["GET"])
def project_apply_leader(request, pk):
project = Project.objects.filter(pk=pk).first()
user_is_not_already_leader = False
try:
leader_pk = project.leader.pk
user_is_not_already_leader = request.user.pk != leader_pk
except AttributeError:
user_is_not_already_leader = True
is_applied_already = LeaderApplication.objects.filter(
project=project, applicant=request.user
).first()
if project and user_is_not_already_leader and not is_applied_already:
LeaderApplication.objects.create(applicant=request.user, project=project)
return redirect("issue_tracker:apply-project-list-all")
if is_applied_already:
return HttpResponse(
"You have already applied for being leader in this project."
)
return HttpResponse("You are leader in this project or project deos not exist.")
@login_required(login_url="issue_tracker:sign-in")
@group_excluded("admin")
@require_http_methods(["GET", "POST"])
def project_apply(request, pk):
project = Project.objects.filter(pk=pk).first()
if project:
context = {"pk": pk, "project": project}
return render(request, "issue_tracker/project_apply.html", context)
return HttpResponse("That project does not exist")
@login_required(login_url="issue_tracker:sign-in")
@group_excluded("admin")
@require_http_methods(["GET"])
def apply_project_list_all(request):
context = {}
projects = (
Project.objects.all().select_related("leader").prefetch_related("developer")
).order_by("pk")
paginator = Paginator(projects, 5, allow_empty_first_page=True)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
if request.GET.get("search_query"):
search_query = request.GET.get("search_query")
context["search_query"] = str(search_query)
query = projects.filter(
Q(name__icontains=search_query) | Q(description__icontains=search_query)
).order_by("pk")
paginator = Paginator(query, 5, allow_empty_first_page=True)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context["page_obj"] = page_obj
return render(request, "issue_tracker/apply_project_list_all.html", context)
@method_decorator(group_required("developer", "leader"), name="get")
class Update_issue(UpdateView):
model = Issue
form_class = IssueFormUpdate
template_name = "issue_tracker/update_issue.html"
def get(self, request, *args, **kwargs):
pk = self.kwargs["pk"]
issue = Issue.objects.filter(
pk=pk, project__leader__pk=self.request.user.pk
).first()
if issue:
return super().get(request, *args, **kwargs)
else:
return HttpResponse("You have no access to this issue")
def get_object(self):
pk = self.kwargs["pk"]
issue = Issue.objects.filter(
pk=pk, project__leader__pk=self.request.user.pk
).first()
if issue:
return issue
def form_valid(self, form):
# If there is no changes issue will not be updated
updated_instance = form.save(commit=False)
original_instance = Issue.objects.get(pk=self.kwargs["pk"])
original_list = [
original_instance.title,
original_instance.creator,
original_instance.project,
original_instance.priority,
original_instance.status,
original_instance.type,
original_instance.description,
original_instance.user_assigned
]
updated_list = [
updated_instance.title,
updated_instance.creator,
updated_instance.project,
updated_instance.priority,
updated_instance.status,
updated_instance.type,
updated_instance.description,
updated_instance.user_assigned
]
if original_list == updated_list:
return super(Update_issue, self).form_invalid(form)
return super(Update_issue, self).form_valid(form)
def get_success_url(self):
issue = self.get_object()
issue_project_pk = issue.project.pk
return reverse(
"issue_tracker:manage-project-issues-list", kwargs={"pk": issue_project_pk}
)
def get_form_kwargs(self):
kwargs = super(Update_issue, self).get_form_kwargs()
kwargs["pk"] = self.kwargs["pk"]
kwargs["request"] = self.request
return kwargs
@method_decorator(group_required("developer", "leader"), name="get")
class Add_issue(CreateView):
model = Issue
form_class = IssueFormCreate
template_name = "issue_tracker/add_issue.html"
success_url = reverse_lazy("issue_tracker:main")
def get_form_kwargs(self):
kwargs = super(Add_issue, self).get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def get_success_url(self):
return reverse("issue_tracker:issue-details", args=(self.object.pk,))
@login_required(login_url="issue_tracker:sign-in")
@group_required("admin")
@require_http_methods(["GET"])
def all_projects(request):
context = {}
projects = Project.objects.all()
context = {"projects": projects}
return render(request, "issue_tracker/all_projects.html", context)
@login_required(login_url="issue_tracker:sign-in")
@group_excluded("admin")
@group_required("leader", "developer")
@require_http_methods(["GET"])
def my_projects(request):
context = {}
projects = (
Project.objects.filter(
Q(leader__pk=request.user.pk) | Q(developer__pk=request.user.pk)
)
.distinct()
.select_related("leader")
.prefetch_related("developer")
)
context = {"projects": projects}
return render(request, "issue_tracker/my_projects.html", context)
@login_required(login_url="issue_tracker:sign-in")
@group_required("leader", "admin")
@require_http_methods(["GET"])
def manage_projects_list(request):
context = {}
if request.user.groups.filter(name__in=("admin",)):
projects = Project.objects.all()
elif request.user.groups.filter(name__in=("leader",)):
projects = (
Project.objects.filter(leader__pk=request.user.pk)
.select_related("leader")
.prefetch_related("developer", "leader")
)
context = {"projects": projects}
return render(request, "issue_tracker/manage_projects_list.html", context)
@login_required(login_url="issue_tracker:sign-in")
@group_required("leader", "admin")
@require_http_methods(["GET"])
def manage_project_details(request, pk):
if request.user.groups.filter(name__in=("admin",)):
project_instance = Project.objects.filter(pk=pk).first()
elif request.user.groups.filter(name__in=("leader",)):
project_instance = Project.objects.filter(pk=pk, leader=request.user.pk).first()
if project_instance:
project = Project.objects.get(pk=pk)
context = {"project": project}
return render(request, "issue_tracker/manage_project_details.html", context)
return HttpResponse("You are not allowed to see this project")
@login_required(login_url="issue_tracker:sign-in")
@group_required("leader", "admin")
@require_http_methods(["GET", "POST"])
def manage_project_developers(request, pk):
if request.user.groups.filter(name__in=("admin",)):
project_instance = (
Project.objects.filter(pk=pk).prefetch_related("developer").first()
)
elif request.user.groups.filter(name__in=("leader",)):
project_instance = (
Project.objects.filter(pk=pk, leader=request.user.pk)
.prefetch_related("developer")
.first()
)
if project_instance:
developers_before = project_instance.developer.all()
form = AddDeveloper(instance=project_instance, request=request)
if request.method == "POST":
form = AddDeveloper(
request.POST, instance=project_instance, request=request
)
if form.is_valid():
new_project = form.save(commit=False)
new_project.developer.set(list(form.cleaned_data["developer"]))
developers_now = new_project.developer.all()
# Checking if user has developer position in any project
# Deference between two sets, which developers have changed on the list
changed_developers = set(list(developers_now)) ^ set(
list(developers_before)
)
developer_group = Group.objects.get(name="developer")
for user in changed_developers:
# User is not developer anymore, will be deleted from group
if not user.project_set.all():
developer_group.user_set.remove(user)
# User is developer, will be added to group
else:
developer_group.user_set.add(user)
new_project.save()
return redirect(request.path)
else:
print(form.errors)
context = {"project": project_instance, "form": form}
return render(request, "issue_tracker/manage_project_developers.html", context)
return HttpResponse("You are not allowed to see this project")
@login_required(login_url="issue_tracker:sign-in")
@group_required("leader", "admin")
@require_http_methods(["GET"])
def manage_project_issues_list(request, pk):
if request.user.groups.filter(name__in=("admin",)):
project_instance = Project.objects.filter(pk=pk).first()
elif request.user.groups.filter(name__in=("leader",)):
project_instance = Project.objects.filter(pk=pk, leader=request.user.pk).first()
if project_instance:
context = {}
| |
<reponame>zhanghaicang/DeepMRF
import layers
import os
import tensorflow as tf
import numpy as np
import tensorflow as tf
import util
from util import RunMode
import logging
np.set_printoptions(threshold=np.nan)
PADDING_FULL_LEN = 500
#PADDING_FULL_LEN = None
#PADDING_FULL_LEN = 250
class Resnet:
def __init__(self, sess, dataset, train_config, model_config):
self.sess = sess
self.dataset = dataset
self.train_config = train_config
self.model_config = model_config
self.input_tfrecord_files = tf.placeholder(tf.string, shape=[None])
self.keep_prob = tf.placeholder(tf.float32)
self.training = tf.placeholder(tf.bool)
self.x1d_channel_dim = model_config['1d']['channel_dim']
self.x2d_channel_dim = model_config['2d']['channel_dim']
def resn1d(self, x1d, reuse=False):
with tf.variable_scope('resn_1d', reuse=reuse) as scope:
act = tf.nn.relu
filters_1d = self.model_config['1d']['filters']
kernel_size_1d = self.model_config['1d']['kernel_size']
block_num_1d = self.model_config['1d']['block_num']
#kernel_initializer = tf.glorot_normal_initializer()
kernel_initializer = tf.variance_scaling_initializer()
bias_initializer = tf.zeros_initializer()
if self.train_config.l2_reg <= 0.0:
kernel_regularizer = None
else:
#kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=self.train_config.l2_reg)
kernel_regularizer = tf.contrib.layers.l1_l2_regularizer(
scale_l1=0.0,
scale_l2=self.train_config.l2_reg)
bias_regularizer = None
prev_1d = tf.layers.conv1d(inputs=x1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
for i in np.arange(block_num_1d):
#prev_1d=tf.layers.batch_normalization(prev_1d, training=self.training)
conv_1d = act(prev_1d)
conv_1d = tf.layers.conv1d(inputs=conv_1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
#conv_1d=tf.layers.batch_normalization(conv_1d, training=self.training)
conv_1d = act(conv_1d)
conv_1d = tf.layers.conv1d(inputs=conv_1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
prev_1d = tf.add(conv_1d, prev_1d)
#prev_1d = act(prev_1d)
logits = tf.layers.conv1d(inputs=prev_1d, filters=self.model_config['1d_label_size'],
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
return logits
def resn2d(self, x1d, x2d, reuse=False):
with tf.variable_scope('resn_1d_2d', reuse=reuse) as scope:
act = tf.nn.relu
filters_1d = self.model_config['1d']['filters']
kernel_size_1d = self.model_config['1d']['kernel_size']
block_num_1d = self.model_config['1d']['block_num']
filters_2d = self.model_config['2d']['filters']
kernel_size_2d = self.model_config['2d']['kernel_size']
block_num_2d = self.model_config['2d']['block_num']
#kernel_initializer = tf.glorot_normal_initializer()
kernel_initializer = tf.variance_scaling_initializer()
bias_initializer = tf.zeros_initializer()
if self.train_config.l2_reg <= 0.0:
kernel_regularizer = None
else:
#kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=self.train_config.l2_reg)
kernel_regularizer = tf.contrib.layers.l1_l2_regularizer(
scale_l1=0.0,
scale_l2=self.train_config.l2_reg)
bias_regularizer = None
prev_1d = tf.layers.conv1d(inputs=x1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
for i in np.arange(block_num_1d):
conv_1d = act(prev_1d)
conv_1d = tf.layers.conv1d(inputs=conv_1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
conv_1d = act(conv_1d)
conv_1d = tf.layers.conv1d(inputs=conv_1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
prev_1d = tf.add(conv_1d, prev_1d)
out_1d = tf.expand_dims(prev_1d, axis=3)
ones = tf.ones((1, PADDING_FULL_LEN))
left_1d = tf.einsum('abcd,de->abce', out_1d, ones)
left_1d = tf.transpose(left_1d, perm=[0,1,3,2])
right_1d = tf.transpose(left_1d, perm=[0,2,1,3])
input_2d = tf.concat([x2d, left_1d, right_1d], axis=3)
prev_2d = tf.layers.conv2d(inputs=input_2d, filters=filters_2d,
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
for i in np.arange(block_num_2d):
conv_2d = act(prev_2d)
conv_2d = tf.layers.conv2d(inputs=conv_2d, filters=filters_2d,
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
conv_2d = act(conv_2d)
conv_2d = tf.layers.conv2d(inputs=conv_2d, filters=filters_2d,
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
prev_2d = tf.add(conv_2d, prev_2d)
logits = tf.layers.conv2d(inputs=prev_2d, filters=self.model_config['2d_label_size'],
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
#logits_tran = tf.transpose(logits, perm=[0, 2, 1, 3])
#logits = (logits + logits_tran) / 2.0
return logits
def resn_mrf(self, x1d, x2d, y1d, reuse=False):
with tf.variable_scope('resn_1d_2d', reuse=reuse) as scope:
act = tf.nn.relu
filters_1d = self.model_config['1d']['filters']
kernel_size_1d = self.model_config['1d']['kernel_size']
block_num_1d = self.model_config['1d']['block_num']
filters_2d = self.model_config['2d']['filters']
kernel_size_2d = self.model_config['2d']['kernel_size']
block_num_2d = self.model_config['2d']['block_num']
kernel_initializer = tf.glorot_normal_initializer()
bias_initializer = tf.zeros_initializer()
kernel_regularizer = tf.contrib.layers.l1_l2_regularizer(
scale_l1=0.0,
scale_l2=self.train_config.l2_reg)
bias_regularizer = tf.contrib.layers.l1_l2_regularizer(
scale_l1=0.0,
scale_l2=self.train_config.l2_reg)
prev_1d = tf.layers.conv1d(inputs=x1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
for i in np.arange(block_num_1d):
conv_1d = act(prev_1d)
conv_1d = tf.layers.conv1d(inputs=conv_1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
conv_1d = act(conv_1d)
conv_1d = tf.layers.conv1d(inputs=conv_1d, filters=filters_1d,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
prev_1d = tf.add(conv_1d, prev_1d)
#mrf_1d = tf.layers.conv1d(inputs=prev_1d, filters=self.model_config['1d_label_size'],
logits_1d = tf.layers.conv1d(inputs=prev_1d, filters=19,
kernel_size=kernel_size_1d, strides=1, padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
#out_1d = tf.expand_dims(prev_1d, axis=3)
out_1d = tf.expand_dims(x1d, axis=3)
ones = tf.ones((1, PADDING_FULL_LEN))
left_1d = tf.einsum('abcd,de->abce', out_1d, ones)
left_1d = tf.transpose(left_1d, perm=[0,1,3,2])
right_1d = tf.transpose(left_1d, perm=[0,2,1,3])
input_2d = tf.concat([x2d, left_1d, right_1d], axis=3)
prev_2d = tf.layers.conv2d(inputs=input_2d, filters=filters_2d,
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
for i in np.arange(block_num_2d):
conv_2d = act(prev_2d)
conv_2d = tf.layers.conv2d(inputs=conv_2d, filters=filters_2d,
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
conv_2d = act(conv_2d)
conv_2d = tf.layers.conv2d(inputs=conv_2d, filters=filters_2d,
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
prev_2d = tf.add(conv_2d, prev_2d)
#logits = tf.layers.conv2d(inputs=prev_2d, filters=self.model_config['2d_label_size'],
logits_2d = tf.layers.conv2d(inputs=prev_2d, filters=19*19,
kernel_size=kernel_size_2d, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
#logits_2d=tf.reshape(logits, [-1, PADDING_FULL_LEN, PADDING_FULL_LEN, 20, 20])
logits_2d=tf.reshape(logits_2d, [-1, PADDING_FULL_LEN, PADDING_FULL_LEN, 19, 19])
logits_2d_sym=tf.transpose(logits_2d, perm=[0,2,1,4,3])
logits_2d = 0.5 * (logits_2d + logits_2d_sym)
#MRF part
#hot_encode = tf.one_hot(tf.cast(y1d, tf.int32), depth=self.model_config['1d_label_size'])
hot_encode = tf.one_hot(tf.cast(y1d, tf.int32), depth=19)
hot_encode_e = tf.expand_dims(hot_encode, axis=2)
#?, 500, 1, 20
ones = tf.ones((1, PADDING_FULL_LEN))
#1, 500
v0=tf.einsum('DieB,ej->DjiB', hot_encode_e, ones)
v1=tf.expand_dims(v0, axis=-1)
v2=tf.matmul(logits_2d, v1)
v3=tf.squeeze(v2, axis=-1)
v4=tf.transpose(v3, perm=[0,3,1,2])
diag=tf.zeros_like(v4[:,:,:,0])
v5=tf.matrix_set_diag(v4, tf.zeros_like(v4[:,:,:,0]))
v6=tf.transpose(v5, perm=[0,2,3,1])
mrf_2d_reduced=tf.reduce_sum(v6, axis=2)
self.mrf_1d = logits_1d
self.mrf_2d = logits_2d
mrf_logits = tf.add(logits_1d, mrf_2d_reduced)
mrf_logits = tf.pad(mrf_logits, [[0,0], [0,0], [0, 1]])
return mrf_logits
def predict1d(self, output_dir, in_model_path):
self.x1d, self.x2d, self.y1d, self.y2d,\
self.size, self.name, self.iterator = self.build_input()
with tf.device('/gpu:{}'.format(self.train_config.gpu_label)):
logits = self.resn1d(self.x1d)
self.pred = tf.nn.softmax(logits)
#tf.global_variables_initializer().run()
saver = tf.train.Saver()
saver.restore(self.sess, in_model_path)
def test(test_name):
self.sess.run(self.iterator.initializer,\
feed_dict={self.input_tfrecord_files:self.dataset.get_chunks(test_name)})
acc = []
total = 0
while True:
try:
pred, y1d, size, name = self.sess.run([self.pred, self.y1d, self.size, self.name],
feed_dict={self.training:False})
for y_, pred_, size_, name_ in zip(y1d, pred, size, name):
acc_ = util.calc_acc1d(pred_[:size_], y_[:size_])
acc.append(acc_)
total += 1
np.savez('{}/{}.npz'.format(
self.train_config.output_dir, name_), target=y_[:size_], pred=pred_[:size_])
except tf.errors.OutOfRangeError:
break
acc_ = np.mean(np.array(acc))
logging.info('{:s} total= {} 1d_acc= {}'.format(test_name, total, acc_))
test('casp12')
test('cameo')
def evaluate1d(self, mode):
self.sess.run(self.iterator.initializer,\
feed_dict={self.input_tfrecord_files:self.dataset.get_chunks(mode)})
acc = []
total = 0
while True:
try:
pred, y1d, size, name = self.sess.run([self.pred, self.y1d, self.size, self.name],
feed_dict={self.training:False})
for y_, pred_, size_, name_ in zip(y1d, pred, size, name):
acc_ = util.calc_acc1d(pred_[:size_], y_[:size_])
acc.append(acc_)
total += 1
except tf.errors.OutOfRangeError:
break
acc_ = np.mean(np.array(acc))
logging.info('{:s} total= {} 1d_acc= {}'.format(mode, total, acc_))
return
def evaluate2d(self, mode, epoch):
self.sess.run(self.iterator.initializer,\
feed_dict={self.input_tfrecord_files:self.dataset.get_chunks(mode)})
acc = []
total = 0
save_dir = '{}/pred_{}'.format(self.train_config.out_pred_dir, epoch)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
while True:
try:
pred, y1d, size, name = self.sess.run([self.pred, self.y1d, self.size, self.name])
for y_, pred_, size_, name_ in zip(y1d, pred, size, name):
save_path = '{}/{}.pred'.format(save_dir, name_)
np.savez(save_path, pred=pred_[:size_, :size_], y=y_[:size_])
#inference.infer_map(pred_[:size_,:size_], y_[:size_], name)
except tf.errors.OutOfRangeError:
break
#logging.info('{:s} total= {} 1d_acc= {}'.format(mode, total, acc_))
return
def evaluate_mrf(self, mode, epoch):
self.sess.run(self.iterator.initializer,\
feed_dict={self.input_tfrecord_files:self.dataset.get_chunks(mode)})
acc = []
total = 0
save_dir = '{}/pred_{}'.format(self.train_config.out_pred_dir, epoch)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
acc = []
total = 0
while True:
try:
pred, y1d, size, name, mrf_1d, mrf_2d =\
self.sess.run([self.pred, self.y1d, self.size, self.name, self.mrf_1d, self.mrf_2d])
for y_, pred_, size_, name_, mrf_1d_, mrf_2d_ in zip(y1d, pred, size, name,
mrf_1d, mrf_2d):
save_path = '{}/{}.pred'.format(save_dir, name_)
np.savez(save_path, mrf_1d=mrf_1d_[:size_], mrf_2d=mrf_2d_[:size_, :size_], y=y_[:size_])
acc_ = util.calc_acc1d(pred_[:size_], y_[:size_])
acc.append(acc_)
total += 1
except tf.errors.OutOfRangeError:
break
acc_ = np.mean(np.array(acc))
logging.info('{:s} total= {} 1d_acc= {}'.format(mode, total, acc_))
return
def build_input(self):
with tf.device('/cpu:0'):
def parser(record):
keys_to_features = {
'x1d' :tf.FixedLenFeature([], tf.string),
'x2d' :tf.FixedLenFeature([], tf.string),
'y1d' :tf.FixedLenFeature([], tf.string),
'y2d' :tf.FixedLenFeature([], tf.string),
'size':tf.FixedLenFeature([], tf.int64),
'name':tf.FixedLenFeature([], tf.string)
}
parsed = tf.parse_single_example(record, keys_to_features)
size = parsed['size']
name = parsed['name']
x1d = tf.decode_raw(parsed['x1d'], tf.float32)
x2d = tf.decode_raw(parsed['x2d'] ,tf.float32)
y1d = tf.decode_raw(parsed['y1d'] ,tf.int16)
y2d = tf.decode_raw(parsed['y2d'] ,tf.int16)
x1d = tf.reshape(x1d, tf.stack([size, -1]))
x2d = tf.reshape(x2d, tf.stack([size, size, -1]))
y1d = tf.reshape(y1d, tf.stack([size, -1]))
y2d = tf.reshape(y2d, tf.stack([size, size]))
return x1d, x2d, y1d, y2d, size, name
def filter_fn(x1d, x2d, y1d, y2d, size, name):
return tf.size(y1d) <= PADDING_FULL_LEN
dataset = tf.data.TFRecordDataset(self.input_tfrecord_files)
dataset = dataset.map(parser, num_parallel_calls=8)
dataset = dataset.shuffle(buffer_size=256)
dataset = dataset.filter(filter_fn)
dataset = dataset.padded_batch(self.train_config.batch_size,
padded_shapes=(
[PADDING_FULL_LEN, self.x1d_channel_dim],
[PADDING_FULL_LEN, PADDING_FULL_LEN, self.x2d_channel_dim],
[PADDING_FULL_LEN, 1],
[PADDING_FULL_LEN, PADDING_FULL_LEN],
[],[]),
padding_values=(0.0, 0.0, np.int16(-1), np.int16(-1), np.int64(PADDING_FULL_LEN), ""))
#dataset = dataset.batch(1)
dataset = dataset.prefetch(512)
iterator = dataset.make_initializable_iterator()
x1d, x2d, y1d, y2d, size, name = iterator.get_next()
#x1d=tf.reshape(x1d, [1, -1, self.x1d_channel_dim])
#y1d=tf.reshape(y1d, [1, -1])
return x1d, x2d, tf.squeeze(y1d, [2]), y2d, size, name, iterator
#return x1d, x2d, y1d, y2d, size, name, iterator
def train(self):
self.x1d, self.x2d, self.y1d, self.y2d,\
self.size, self.name, self.iterator = self.build_input()
with tf.device('/gpu:{}'.format(self.train_config.gpu_label)):
if self.train_config.model_type == 'resn2d':
logits = self.resn2d(self.x1d, self.x2d)
self.pred = tf.nn.softmax(logits)
labels = tf.one_hot(tf.cast(self.y2d, tf.int32), depth=self.model_config['2d_label_size'])
mask = tf.greater_equal(self.y2d, 0)
labels = tf.boolean_mask(labels, mask)
logits = tf.boolean_mask(logits, mask)
if self.train_config.model_type == 'resn_mrf':
logits = self.resn_mrf(self.x1d, self.x2d, self.y1d)
self.pred = tf.nn.softmax(logits)
labels = tf.one_hot(tf.cast(self.y1d, tf.int32), depth=self.model_config['1d_label_size'])
mask = tf.greater_equal(self.y1d, 0)
labels = tf.boolean_mask(labels, mask)
logits = tf.boolean_mask(logits, mask)
elif self.train_config.model_type == 'resn1d':
logits = self.resn1d(self.x1d)
self.pred = tf.nn.softmax(logits)
labels = tf.one_hot(tf.cast(self.y1d, tf.int32), depth=self.model_config['1d_label_size'])
mask = tf.greater_equal(self.y1d, 0)
labels = tf.boolean_mask(labels, mask)
logits = tf.boolean_mask(logits, mask)
log_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels = labels, logits = logits))
reg_loss = tf.losses.get_regularization_loss()
self.loss = log_loss + reg_loss
if self.train_config.mrf_reg > 0.0:
mrf_1d_reg = tf.reduce_mean(tf.reduce_sum(tf.square(self.mrf_1d),axis=[1,2]))
mrf_2d_reg = tf.reduce_mean(tf.reduce_sum(tf.square(self.mrf_2d),axis=[1,2,3,4]))
mrf_reg_loss = mrf_1d_reg + mrf_2d_reg
self.loss += self.train_config.mrf_reg * | |
compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
#성공 메시지 추가
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
print "종료"
@approvalsApi.route('/api/approvals/salements', methods=['GET'])
def salements():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "startDate").split(' - ')
selType = getParameter(form_data , "selType")
noType = getParameter(form_data , "noType")
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'merchantId': getParameter(form_data , "submerchantId"),
'serviceId': getParameter(form_data , "serviceId"),
'approvalStatus': getParameter(form_data , "status"),
'saleMethod': getParameter(form_data,"saleTypeDetail"),
'prodType': getParameter(form_data,"saleType"),
'orderNo': noType == "1" and getParameter(form_data, "approvalNo") or "",
'approvalNo' : noType == "2" and getParameter(form_data, "approvalNo") or "",
'svcConnId': getParameter(form_data,"svcConnId"),
'amount': paramEscape(getParameter(form_data,"amount")),
'startDate': startDate,
'endDate': endDate,
'merchantNm': selType == "0" and getParameter(form_data, "selName") or "",
'serviceNm': selType == "1" and getParameter(form_data, "selName") or "",
'offset': setStringToNumber(request.args.get("start")),
'limit': setStringToNumber(request.args.get("length")),
'excelAllFlag':'',
}
result_data = getApiData("/approvals/salements" ,queryData)
return json.dumps(result_data)
@approvalsApi.route('/api/approvals/salementsMonth', methods=['GET'])
def salementsMonth():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "startDate").split(' - ')
selType = getParameter(form_data , "selType")
noType = getParameter(form_data , "noType")
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'merchantId': getParameter(form_data , "submerchantId"),
'serviceId': getParameter(form_data , "serviceId"),
'approvalStatus': getParameter(form_data , "status"),
'saleMethod': getParameter(form_data,"saleTypeDetail"),
'prodType': getParameter(form_data,"saleType"),
'orderNo': noType == "1" and getParameter(form_data, "approvalNo") or "",
'approvalNo' : noType == "2" and getParameter(form_data, "approvalNo") or "",
'svcConnId': getParameter(form_data,"svcConnId"),
'amount': paramEscape(getParameter(form_data,"amount")),
'startDate': startDate,
'endDate': endDate,
'merchantNm': selType == "0" and getParameter(form_data, "selName") or "",
'serviceNm': selType == "1" and getParameter(form_data, "selName") or "",
'offset': setStringToNumber(request.args.get("start")),
'limit': setStringToNumber(request.args.get("length")),
'excelAllFlag':'',
}
result_data = getApiData("/approvals/salementsMonth" ,queryData)
return json.dumps(result_data)
@approvalsApi.route('/api/approvals/salements/excelAll', methods=['GET'])
def salementsExcelAll():
form_data = {}
searchDate = getParameter(form_data , "startDate").split(' - ')
selType = getParameter(form_data , "selType")
noType = getParameter(form_data , "noType")
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'merchantId': getParameter(form_data , "submerchantId"),
'serviceId': getParameter(form_data , "serviceId"),
'approvalStatus': getParameter(form_data , "status"),
'saleMethod': getParameter(form_data,"saleTypeDetail"),
'prodType': getParameter(form_data,"saleType"),
'orderNo': noType == "1" and getParameter(form_data, "approvalNo") or "",
'approvalNo' : noType == "2" and getParameter(form_data, "approvalNo") or "",
'svcConnId': getParameter(form_data,"svcConnId"),
'amount': paramEscape(getParameter(form_data,"amount")),
'startDate': startDate,
'endDate': endDate,
'merchantNm': selType == "0" and getParameter(form_data, "selName") or "",
'serviceNm': selType == "1" and getParameter(form_data, "selName") or "",
'offset': 0,
'limit': EXCEL_FILE_DOWNLOAD_COUNT,
'excelAllFlag': '1',
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=makeSalementsExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
def makeSalementsExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'판매정산내역_'+ datetime.datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "판매 정산 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '판매정산내역_' + datetime.datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"거래처명")
worksheet.write(row, 1 ,"서비스명")
worksheet.write(row, 2 ,"연동아이디")
worksheet.write(row, 3 ,"상태")
worksheet.write(row, 4 ,"상품명")
worksheet.write(row, 5 ,"상품ID")
worksheet.write(row, 6 ,"상품구분")
worksheet.write(row, 7 ,"거래일")
worksheet.write(row, 8 ,"거래시간")
worksheet.write(row, 9 ,"주문번호")
worksheet.write(row, 10 ,"승인번호")
worksheet.write(row, 11 ,"권종")
worksheet.write(row, 12 ,"거래건수")
worksheet.write(row, 13 ,"거래금액")
worksheet.write(row, 14 ,"비고")
while True :
searchData = getData("/approvals/salements" ,queryData)
for data in searchData["resultList"]:
row += 1
saleCnt = long(data["saleCnt"])
saleAmt = long(data["saleAmt"]) * saleCnt
if data["approvalStatus"] == "SSTS-0002":
saleAmt = saleAmt * -1
worksheet.write(row, 0 ,data["submerchantNm"])
worksheet.write(row, 1 ,data["serviceNm"])
worksheet.write(row, 2 ,data["svcConnId"])
worksheet.write(row, 3 ,data["approvalStatusNm"])
worksheet.write(row, 4 ,data["prodNm"])
worksheet.write(row, 5 ,data["prodCd"])
worksheet.write(row, 6 ,data["prodTypeNm"])
worksheet.write(row, 7 ,parseDate(data["approvalDt"] ,'%Y-%m-%d %H:%M:%S' ,'%Y-%m-%d'))
worksheet.write(row, 8 ,parseDate(data["approvalDt"] ,'%Y-%m-%d %H:%M:%S' ,'%H:%M:%S'))
worksheet.write(row, 9 ,data["orderNo"])
worksheet.write(row, 10 ,data["approvalNo"])
worksheet.write_number(row, 11, long(data["saleAmt"]), money_format)
worksheet.write_number(row, 12, saleCnt, money_format)
worksheet.write_number(row, 13, saleAmt, money_format)
worksheet.write(row, 14 ,data["desc01"])
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '판매정산내역_' + datetime.datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"거래처명")
worksheet.write(row, 1 ,"서비스명")
worksheet.write(row, 2 ,"연동아이디")
worksheet.write(row, 3 ,"상태")
worksheet.write(row, 4 ,"상품명")
worksheet.write(row, 5 ,"상품ID")
worksheet.write(row, 6 ,"상품구분")
worksheet.write(row, 7 ,"거래일")
worksheet.write(row, 8 ,"거래시간")
worksheet.write(row, 9 ,"주문번호")
worksheet.write(row, 10 ,"승인번호")
worksheet.write(row, 11 ,"권종")
worksheet.write(row, 12 ,"거래건수")
worksheet.write(row, 13 ,"거래금액")
worksheet.write(row, 14 ,"비고")
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(searchData["resultList"]) < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
@approvalsApi.route('/api/approvals/salements/monthExcelAll', methods=['GET'])
def salementsMonthExcelAll():
form_data = {}
searchDate = getParameter(form_data , "startDate").split(' - ')
selType = getParameter(form_data , "selType")
noType = getParameter(form_data , "noType")
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
'merchantId': getParameter(form_data , "submerchantId"),
'serviceId': getParameter(form_data , "serviceId"),
'approvalStatus': getParameter(form_data , "status"),
'saleMethod': getParameter(form_data,"saleTypeDetail"),
'prodType': getParameter(form_data,"saleType"),
'orderNo': noType == "1" and getParameter(form_data, "approvalNo") or "",
'approvalNo' : noType == "2" and getParameter(form_data, "approvalNo") or "",
'svcConnId': getParameter(form_data,"svcConnId"),
'amount': paramEscape(getParameter(form_data,"amount")),
'startDate': startDate,
'endDate': endDate,
'merchantNm': selType == "0" and getParameter(form_data, "selName") or "",
'serviceNm': selType == "1" and getParameter(form_data, "selName") or "",
'offset': 0,
'limit': EXCEL_FILE_DOWNLOAD_COUNT,
'excelAllFlag': '1',
'empId' : session['empId']
}
rootPath = current_app.root_path
t1 = threading.Thread(target=makeSalementsMonthExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
def makeSalementsMonthExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'판매정산내역_월별_'+ datetime.datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "판매 정산 월별 요약 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '판매정산내역_월별_' + datetime.datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
row = 0
worksheet.write(row, 0 ,"거래처명")
worksheet.write(row, 1 ,"서비스명")
worksheet.write(row, 2 ,"연동아이디")
worksheet.write(row, 3 ,"상태")
worksheet.write(row, 4 ,"상품명")
worksheet.write(row, 5 ,"상품ID")
worksheet.write(row, 6 ,"상품구분")
worksheet.write(row, 7 ,"권종")
worksheet.write(row, 8 ,"거래일")
worksheet.write(row, 9 ,"거래건수")
worksheet.write(row, 10 ,"거래금액")
while True :
searchData = getData("/approvals/salementsMonth" ,queryData)
for data in searchData["resultList"]:
row += 1
saleCnt = long(data["saleCnt"])
saleAmt = long(data["saleAmt"]) * saleCnt
if data["approvalStatus"] == "SSTS-0002":
saleAmt = saleAmt * -1
worksheet.write(row, 0 ,data["submerchantNm"])
worksheet.write(row, 1 ,data["serviceNm"])
worksheet.write(row, 2 ,data["svcConnId"])
worksheet.write(row, 3 ,data["approvalStatusNm"])
worksheet.write(row, 4 ,data["prodNm"])
worksheet.write(row, 5 ,data["prodCd"])
worksheet.write(row, 6 ,data["prodTypeNm"])
worksheet.write_number(row, 7, long(data["saleAmt"]), money_format)
worksheet.write(row, 8 ,parseDate(data["dealDt"] + "01" ,'%Y%m%d' ,'%Y-%m-%d'))
worksheet.write_number(row, 9, saleCnt, money_format)
worksheet.write_number(row, 10, saleAmt, money_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 0
fileCnt += 1
fileName = '판매정산내역_월별_' + datetime.datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
worksheet.write(row, 0 ,"거래처명")
worksheet.write(row, 1 ,"서비스명")
worksheet.write(row, 2 ,"연동아이디")
worksheet.write(row, 3 ,"상태")
worksheet.write(row, 4 ,"상품명")
worksheet.write(row, 5 ,"상품ID")
worksheet.write(row, 6 ,"상품구분")
worksheet.write(row, 7 ,"권종")
worksheet.write(row, 8 ,"거래일")
worksheet.write(row, 9 ,"거래건수")
worksheet.write(row, 10 ,"거래금액")
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(searchData["resultList"]) < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # | |
"""
File : utils.py
Start Date : 19971217
Refactor Date : 20180514
Description : General utility functions
$Id: utils.py 953 2018-05-14 05:28:21Z phillips.ryan $
"""
__version__ = '$Revision: 953 $'
__copyright__ = 'Copyright (c) Ryan Phillips 2018'
__author__ = '<NAME>; <NAME>'
__author_email__ = '<EMAIL>; '
__maintainer__ = '<NAME> aka Tidanium'
__maintainer_email__ = '<EMAIL>'
__license__ = """
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re, threading, os, subprocess, sys, smtplib, asyncio, configparser
from . import log
from .. import commands
config = configparser.ConfigParser(allow_no_value=True, comment_prefixes=(';'), inline_comment_prefixes=(';'))
try:
config.read(f'{commands.Main.relpath}/config/config.ini', encoding='utf-8')
except KeyError as e:
log.logEvent.critical(e)
except configparser.MissingSectionHeaderError as e:
log.logEvent.critical(e)
except configparser.ParsingError as e:
log.logEvent.critical(e)
class WorkdirError(Exception):
"""An operation involving WORDKIR could not be completed."""
class Main:
def __init__(self):
self.loop = commands.loop # to keep from having to import ..commands for every file
self.p = re.compile(r'[^\d-]*(-?[\d]+(\.[\d]*)?([eE][+=]?[\d]+)?)')
def atoi(self, obj:str):
for s in obj:
m = p.match(s)
if m:
result = m.group(0)
if "." in result or "e" in result or "E" in result:
return float(result)
else:
return int(result)
else:
return None
class Config:
def getConfigVariable(section: str, option: str):
section = section.upper()
option = option.lower()
try: get = config.getfloat(section, option)
except ValueError:
try: get = config.getboolean(section, option)
except ValueError:
try: get = config.get(section, option)
except: raise KeyError
except configparser.NoSectionError as e:
log.logEvent.info(e)
except configparser.NoOptionError as e:
log.logEvent.info(e)
except configparser.ParsingError as e:
log.logEvent.info(e)
if type(get) not in [int, float, bool]:
s = ' '.join(get.split())
else:
s = get
return s
def setConfigVariable(section:str, option:str, toSet=None):
section = section.upper()
option = option.lower()
if not config.has_section(section):
log.logEvent.info(f'Config = Added [{section}]'); return config.add_section(section)
elif config.has_section(section) and not config.has_option(section, option):
s = ''
for sect in config.sections():
if sect != 'DEFAULT' and sect != section:
s += f'[{sect}]\n'
for opt in config.options(sect):
val = config.get(sect, opt)
s += f'{opt}={val}\n'
for sect in config.sections():
if sect == section:
s += f'[{sect}]\n'
for opt in config.options(section):
val = config.get(section, opt)
s += f'{opt}={val}\n'
s += f'{option}={toSet}\n'
with open('config/config.ini','w') as f:
f.write(s); log.logEvent.info(f'Overwrote config.ini with:\n{s}')
f.close()
del sect, opt, s; return log.logEvent.info('Cleaned up via `del sect, opt, s`')
elif config.has_option(section, option) and toSet != None:
oldOpt = config.get(section, option)
config.set(section, option, toSet)
log.logEvent.info(f'Changed [{section}]{option} from {oldOpt} to {toSet}')
del oldOpt; log.logEvent.info('Cleaned up via `del oldOpt`')
class Stack:
"""General purpose stack object."""
def __init__(self):
self.stack = []
def __str__(self):
return f'{self.stack}'
def __len__(self):
return len(self.stack)
def __getitem__(self, item):
return self.stack[item]
def push(self, obj):
self.stack.append(obj)
def pop(self):
obj = self.stack[-1]
del self.stack[-1]
return obj
def top(self):
if len(self.stack) is 0:
return None
else:
return self.stack[-1]
def trickySplit(line, delim):
"""trickySplit(line, delim) - split line by delimiter delim, but ignoring delimiters found inside (), [], {}, ''' and "".
eg: trickySplit("email(root,'hi there'),system('echo hi, mum')", ',')
would return: ["email(root,'hi there'", "system('echo hi, mum')"]
"""
parenCnt = 0 # ()
curlyCnt = 0 # {}
squareCnt = 0 # []
doubleqCnt = 0 # ""
quoteCnt = 0 # ''
splitList = [] # split strings
current = '' # current split string'
for c in line:
if c == '(':
parenCnt += 1
elif c == ')':
parenCnt -= 1
elif c == '{':
curlyCnt += 1
elif c == '}':
curlyCnt -= 1
elif c == '[':
squareCnt += 1
elif c == ']':
squareCnt -= 1
elif c == '"':
doubleqCnt = 1 - doubleqCnt
elif c == '\'':
quoteCnt = 1 - quoteCnt
elif c == delim:
if parenCnt == 0 and curlyCnt == 0 and squareCnt == 0 and doubleqCnt == 0 and quoteCnt == 0:
splitList.append(current)
current = ''
continue
current += c
if len(current) > 0:
splitList.append(current)
return splitList
def quoteArgs(l):
"""quoteArgs(l) - cycle through list of strings, if the string looks like a
function call (eg: "blah(a, b, c)") then put quotes around each of the
arguments. [Useful if you want to pass the string to eval()]. eg: the
previous example would be converted to 'blah("a", "b", "c")'."""
newList = []
sre = re.compile("([\t ]*[A-Za-z0-9_]*[\t ]*\()(.*)([\t ]*\)[\t ]*)")
for s in l:
inx = sre.search(s)
if inx != None:
argLine = inx.group(2)
argList = str.split(argLine, ',')
newCmd = inx.group(1)
i = 0
for a in argList:
a = str.strip(a)
if re.search("[\"'].*[\"']$", a) is None:
a = f'"{a}"'
if i > 0:
newCmd += ','
newCmd += a
i += 1
newCmd += inx.group(3)
newList.append(newCmd)
else:
newList.append(s)
return newList
def charPresent(s, chars):
"""charpresent(s, chars) - returns 1 if ANY of the characters present in the string
chars is found in the string s. If none are found, 0 is returned."""
for c in chars:
if str.find(s, c) != -1:
return True
return False
def stripQuote(s):
"""stripquote(s) - strips start & end of string s of whitespace then
strips " or ' from start & end of string if found - repeats stripping
" and ' until none left."""
if s != str:
return s
s = str(s).strip()
while len(s) > 0 and (s[0] in ["'", '"'] and s[-1] in ["'", '"']):
if s[0] == "'" or '"':
s = s[1:]
if s[-1] == "'" or '"':
s = s[:-1]
return s
def atom(ch:str):
"""atom(ch) - ascii-to-multiplier - converts ascii char to a time miltiplier.
eg: s=seconds, m=minutes, h=hours, d=days, w=weeks, c=calendar=months, y=years"""
ch=ch.lower()
if ch == 's':
mult = 1
elif ch =='m':
mult = 60
elif ch == 'h':
mult = 60*60
elif ch == 'd':
mult = 60*60*24
elif ch == 'w':
mult = 60*60*24*7
elif ch == 'c':
mult = 60*60*24*30
elif ch == 'y':
mult = 60*60*24*365
else:
mult = None
return mult
def valToSeconds(value):
"""Convert a time string to seconds.
return None if failed."""
if re.search('[mshdwcyMSHDWCY]', value):
if int(value):
return int(value)
elif float(value) and not int(value):
return int(str(value).split('.')[0])
timeCh = value[-1]
value = value[:-1]
try:
mult = atom(timeCh)
except Exception as e:
naAwait(log.logEvent.debug(e)); pass
if mult == None:
return None
elif mult == 0:
return 0
return Main.atoi(value)*mult
class safe:
def __init__(self):
self.systemCallSemaphore = threading.Semaphore()
def safePopen(self, cmd, mode):
"""A thread-safe wrapper for os.popen() which did not appear to like
being called simultaneously from multiple threads. Obviously only
allows one thread at a time to call os.popen().
NOTE: safe_pclose() _must_ be called or the semaphore will never be
released."""
self.systemCallSemaphore.acquire()
try:
r = os.popen(cmd, mode)
except:
self.systemCallSemaphore.release()
e = sys.exc_info()
raise e[0] and e[1]
return r
def safePclose(self, fh):
"""Close the file handler and release the semaphore."""
try:
fh.close()
except:
self.systemCallSemaphore.release()
e = sys.exc_info()
raise e[0] and e[1]
self.systemCallSemaphore.release()
def safeGetStatusOutput(self, cmd):
"""A thread-safe wrapper for commands.getstatusoutput() which did not
appear to like being called simultaneously from multiple threads.
Semaphore locking allows only one call to commands.getstatusoutput()
to be executed at any one time.
NOTE: It is still not known whether a call to commands.getstatusoutput
and popen() [and os.system() for that matter] can be called
simultaneously. If not, a global semaphore will have to be used to
protect them all. UPDATE: This appears to be the case, so a global
'systemcall' semaphore is now used."""
self.systemCallSemaphore.acquire()
| |
"▁1924": 8312,
"mani": 8313,
"110": 8314,
"wald": 8315,
"▁2013)": 8316,
"▁Sud": 8317,
"iyah": 8318,
"link": 8319,
"▁Kras": 8320,
"▁Kansas": 8321,
"vic": 8322,
"▁Kenn": 8323,
"wat": 8324,
"▁McLaren": 8325,
"▁viol": 8326,
"lı": 8327,
"▁Bryan": 8328,
"▁Mot": 8329,
"esh": 8330,
"▁Mind": 8331,
"▁Flores": 8332,
"het": 8333,
"fish": 8334,
"▁290": 8335,
"ds": 8336,
"▁cap": 8337,
"amine": 8338,
"▁Turbo": 8339,
"ming": 8340,
"▁1910": 8341,
"hida": 8342,
"că": 8343,
"▁full": 8344,
"▁Lor": 8345,
"Can": 8346,
"▁Dead": 8347,
"▁Madonna": 8348,
"uan": 8349,
"▁Tik": 8350,
"IFF": 8351,
"rop": 8352,
"hou": 8353,
"llis": 8354,
"▁245": 8355,
"zne": 8356,
"fax": 8357,
"▁Focus": 8358,
"▁pô": 8359,
"lat": 8360,
"▁Cesar": 8361,
"hiya": 8362,
"▁Blanco": 8363,
"24.": 8364,
"▁Mumbai": 8365,
"▁1919": 8366,
"elo": 8367,
"TU": 8368,
"fur": 8369,
"▁1927": 8370,
"nova": 8371,
"tá": 8372,
"▁Ata": 8373,
"ssum": 8374,
"▁Mill": 8375,
"▁dô": 8376,
"▁Barry": 8377,
"▁Side": 8378,
"CES": 8379,
"BU": 8380,
"nê": 8381,
"▁Marcos": 8382,
"LG": 8383,
"▁sam": 8384,
"oto": 8385,
"▁Ek": 8386,
"ore": 8387,
"tte": 8388,
"▁Llo": 8389,
"▁Pablo": 8390,
"95%": 8391,
"Real": 8392,
"ppy": 8393,
"lê": 8394,
"Miss": 8395,
"▁Elite": 8396,
"▁Schi": 8397,
"game": 8398,
"aux": 8399,
"▁Cru": 8400,
"raz": 8401,
"▁Square": 8402,
"▁Stefan": 8403,
"▁AH": 8404,
"▁Dara": 8405,
"▁Chart": 8406,
"▁5.5": 8407,
"▁Morris": 8408,
"sio": 8409,
"▁Woman": 8410,
"RG": 8411,
"▁Watson": 8412,
"▁6-": 8413,
"rani": 8414,
"ere": 8415,
"von": 8416,
"▁Carol": 8417,
"▁33.": 8418,
"abad": 8419,
"▁Gordon": 8420,
"▁Bach": 8421,
"▁Hub": 8422,
"jar": 8423,
"▁Koh": 8424,
"▁Blanc": 8425,
"▁Jaguar": 8426,
"oso": 8427,
"ari": 8428,
"▁Math": 8429,
"▁540": 8430,
"▁Arka": 8431,
"das": 8432,
"▁XVI": 8433,
"-32": 8434,
"▁157": 8435,
"▁4.4": 8436,
"▁Deportivo": 8437,
"Fe": 8438,
"▁181": 8439,
"▁Ala": 8440,
"▁Thom": 8441,
"▁México": 8442,
"wn": 8443,
"▁Pla": 8444,
"▁12,5": 8445,
"▁Bee": 8446,
"▁not": 8447,
"▁ESP": 8448,
"▁2011)": 8449,
"usta": 8450,
"▁gara": 8451,
"ted": 8452,
"Don": 8453,
"PAC": 8454,
"▁Tele": 8455,
"279": 8456,
"IFA": 8457,
"▁opera": 8458,
"▁Shir": 8459,
"gam": 8460,
"▁Way": 8461,
"kull": 8462,
"▁Sz": 8463,
"▁Cloud": 8464,
"▁Med": 8465,
"▁Salvador": 8466,
"▁Hack": 8467,
"▁1911": 8468,
"▁item": 8469,
"▁Under": 8470,
"▁Yak": 8471,
"▁Jazz": 8472,
"Indonesia": 8473,
"▁Mono": 8474,
"▁Joachim": 8475,
"omi": 8476,
"▁Patriot": 8477,
"FP": 8478,
"333": 8479,
"▁Universal": 8480,
"OP": 8481,
"▁199": 8482,
"▁đươ": 8483,
"ade": 8484,
"▁Law": 8485,
"vă": 8486,
"▁Marine": 8487,
"nach": 8488,
"▁Valle": 8489,
"▁anime": 8490,
"/16": 8491,
"Gazprom": 8492,
"pat": 8493,
"anada": 8494,
"rella": 8495,
"▁1913": 8496,
"▁Batman": 8497,
"▁Vatican": 8498,
"uki": 8499,
"3,0": 8500,
"▁Film": 8501,
"▁Gill": 8502,
"▁Copenhagen": 8503,
"▁Mina": 8504,
"champ": 8505,
"▁192": 8506,
"▁Jag": 8507,
"/04/": 8508,
"use": 8509,
"▁Trip": 8510,
"egi": 8511,
"cea": 8512,
"▁10,5": 8513,
"▁Ruby": 8514,
"▁176": 8515,
"▁Arte": 8516,
"▁151": 8517,
"▁154": 8518,
"▁Pyr": 8519,
"tamine": 8520,
"olla": 8521,
"gno": 8522,
"Ỳ": 8523,
"▁1908": 8524,
"▁Hugh": 8525,
"▁Story": 8526,
"▁Dur": 8527,
"▁Borussia": 8528,
"berry": 8529,
"▁Athletic": 8530,
"▁Coco": 8531,
"▁Luxembourg": 8532,
"▁Fre": 8533,
"▁Antoine": 8534,
"▁Last": 8535,
"▁Marca": 8536,
"▁Pana": 8537,
"10%": 8538,
"▁Prime": 8539,
"venta": 8540,
"pti": 8541,
"▁Bot": 8542,
"▁Liberty": 8543,
"nell": 8544,
"hr": 8545,
"▁Dam": 8546,
"key": 8547,
"▁VE": 8548,
"▁Seattle": 8549,
"3.7": 8550,
"dele": 8551,
"▁Company": 8552,
"▁Talk": 8553,
"Per": 8554,
"tze": 8555,
"wal": 8556,
"▁Viss": 8557,
"▁Pradesh": 8558,
"▁1923": 8559,
"▁Franklin": 8560,
"▁Dick": 8561,
"sida": 8562,
"▁CCTV": 8563,
"747": 8564,
"shop": 8565,
"cti": 8566,
"▁179": 8567,
"▁VR": 8568,
"odon": 8569,
"▁Mir": 8570,
"▁Grant": 8571,
"▁Sociedad": 8572,
"roy": 8573,
"bé": 8574,
"nsa": 8575,
"▁Valley": 8576,
"lac": 8577,
"▁Maz": 8578,
"1994": 8579,
"▁Zara": 8580,
"dle": 8581,
"▁Luz": 8582,
"ovina": 8583,
"▁Gray": 8584,
"▁out": 8585,
"▁Stanford": 8586,
"ders": 8587,
"mata": 8588,
"▁CÔNG": 8589,
"▁189": 8590,
"lata": 8591,
"▁Pac": 8592,
"▁tè": 8593,
"ké": 8594,
"▁blockchain": 8595,
"▁Lac": 8596,
"lea": 8597,
"sit": 8598,
"▁4.5": 8599,
"NB": 8600,
"▁Mus": 8601,
"▁Cos": 8602,
"▁Not": 8603,
"dou": 8604,
"▁KI": 8605,
"hir": 8606,
"▁bons": 8607,
"▁cy": 8608,
"▁Gri": 8609,
"360": 8610,
"IF": 8611,
"▁Dyna": 8612,
"▁Marshall": 8613,
"ction": 8614,
"▁Barbara": 8615,
"▁Senegal": 8616,
"▁bra": 8617,
"▁184": 8618,
"▁Meghan": 8619,
"▁INS": 8620,
"▁Josh": 8621,
"▁Sat": 8622,
"cla": 8623,
"xt": 8624,
"sam": 8625,
"180": 8626,
"Ga": 8627,
"▁Marina": 8628,
"/05/20": 8629,
"▁Kwa": 8630,
"lene": 8631,
"▁Set": 8632,
"hev": 8633,
"nza": 8634,
"▁Lazada": 8635,
"Eco": 8636,
"BR": 8637,
"coli": 8638,
"color": 8639,
"kawa": 8640,
"▁DV": 8641,
"bí": 8642,
"FE": 8643,
"vina": 8644,
"▁mobile": 8645,
"▁Friday": 8646,
"eth": 8647,
"emon": 8648,
"▁Astra": 8649,
"▁Friedrich": 8650,
"▁Linda": 8651,
"56%": 8652,
"▁kil": 8653,
"rif": 8654,
"té": 8655,
"▁Silver": 8656,
"229": 8657,
"One": 8658,
"iso": 8659,
"▁Musta": 8660,
"▁Mem": 8661,
"lux": 8662,
"▁Vivo": 8663,
"rik": 8664,
"▁Ok": 8665,
"▁un": 8666,
"rett": 8667,
"▁2023": 8668,
"yun": 8669,
"30%": 8670,
"▁6.5": 8671,
"ges": 8672,
"▁Ander": 8673,
"core": 8674,
"icky": 8675,
"▁Apollo": 8676,
"▁Arch": 8677,
"idea": 8678,
"▁Word": 8679,
"▁Rup": 8680,
"▁Pit": 8681,
"▁Kyoto": 8682,
"▁Sarkozy": 8683,
"▁His": 8684,
"▁2018-2019": 8685,
"▁Stewart": 8686,
"sberg": 8687,
"AO": 8688,
"▁2008)": 8689,
"neo": 8690,
"▁Coupe": 8691,
"▁Virgin": 8692,
"af": 8693,
"1972": 8694,
"▁Player": 8695,
"▁Neuro": 8696,
"▁Mand": 8697,
"25%": 8698,
"▁Hali": 8699,
"▁Philadelphia": 8700,
"▁Mae": 8701,
"3.6": 8702,
"RI": 8703,
"▁Kal": 8704,
"INA": 8705,
"285": 8706,
"▁Pil": 8707,
"Australia": 8708,
"mur": 8709,
"Gra": 8710,
"ém": 8711,
"SU": 8712,
"rica": 8713,
"▁post": 8714,
"eau": 8715,
"lands": 8716,
"corp": 8717,
"▁Number": 8718,
"▁164": 8719,
"▁Klo": 8720,
"▁Blog": 8721,
"▁Lauren": 8722,
"enberg": 8723,
"▁mono": 8724,
"end": 8725,
"▁04.": 8726,
"▁Typ": 8727,
"yon": 8728,
"▁AMD": 8729,
"▁Natal": 8730,
"WD": 8731,
"▁fashion": 8732,
"007": 8733,
"▁Kush": 8734,
"jah": 8735,
"may": 8736,
"For": 8737,
"1988": 8738,
"▁2014-2015": 8739,
"We": 8740,
"▁×": 8741,
"▁Sunny": 8742,
"yana": 8743,
"▁Cali": 8744,
"▁172": 8745,
"▁Wid": 8746,
"▁1905": 8747,
"▁Milo": 8748,
"▁Hea": 8749,
"teng": 8750,
"▁Thunder": 8751,
"iki": 8752,
"770": 8753,
"▁1926": 8754,
"tone": 8755,
"▁Classic": 8756,
"As": 8757,
"▁Year": 8758,
"zie": 8759,
"▁Slim": 8760,
"▁Think": 8761,
"gay": 8762,
"▁Christine": 8763,
"490": 8764,
"hd": 8765,
"▁Over": 8766,
"▁(?)": 8767,
"llo": 8768,
"▁440": 8769,
"▁Adidas": 8770,
"entia": 8771,
"gabe": 8772,
"▁512": 8773,
"4.9": 8774,
"▁201": 8775,
"▁it": 8776,
"ras": 8777,
"▁1916": 8778,
"Pe": 8779,
"YG": 8780,
"▁Ghost": 8781,
"780": 8782,
"bb": 8783,
"▁Komp": 8784,
"▁SAR": 8785,
"▁Thompson": 8786,
"graph": 8787,
"▁Angola": 8788,
"chet": 8789,
"ries": 8790,
"▁Let": 8791,
"▁Optim": 8792,
"▁Vigo": 8793,
"XL": 8794,
"140": 8795,
"ego": 8796,
"▁OK": 8797,
"▁Tem": 8798,
"▁Field": 8799,
"▁Dallas": 8800,
"Cal": 8801,
"▁Sparta": 8802,
"▁Delta": 8803,
"190": 8804,
"▁183": 8805,
"▁TA": 8806,
"▁4000": 8807,
"▁Like": 8808,
"▁gra": 8809,
"zuki": 8810,
"15%": 8811,
"pô": 8812,
"▁Virus": 8813,
"▁Pet": 8814,
"▁Asi": 8815,
"▁Islam": 8816,
"▁Citi": 8817,
"▁DAP": 8818,
"▁Who": 8819,
"baby": 8820,
"VR": 8821,
"▁Oregon": 8822,
"▁Bing": 8823,
"▁Ud": 8824,
"▁Kru": 8825,
"▁nua": 8826,
"▁Gin": 8827,
"▁Steel": 8828,
"▁name": 8829,
"enburg": 8830,
"▁Sant": 8831,
"▁Hunter": 8832,
"ies": 8833,
"ovi": 8834,
"vir": 8835,
"ssen": 8836,
"▁Sunday": 8837,
"1991": 8838,
"▁Albi": 8839,
"GIA": 8840,
"sir": 8841,
"▁mon": 8842,
"▁Display": 8843,
"▁Evan": 8844,
"▁Gonzalo": 8845,
"ulatus": 8846,
"787": 8847,
"▁Nie": 8848,
"▁OnePlus": 8849,
"tier": 8850,
"▁197": 8851,
"▁dó": 8852,
"eye": 8853,
"ald": 8854,
"▁3.1": 8855,
"▁SI": 8856,
"▁202": 8857,
"▁640": 8858,
"▁177": 8859,
"▁Kry": 8860,
"▁Ris": 8861,
"oth": 8862,
"▁Einstein": 8863,
"▁195": 8864,
"nas": 8865,
"gram": 8866,
"ught": 8867,
"IL": 8868,
"▁161": 8869,
"▁EP": 8870,
"▁Aquino": 8871,
"▁Solo": 8872,
"gun": 8873,
"▁PCI": 8874,
"IG": 8875,
"pte": 8876,
"cap": 8877,
"▁10-11": 8878,
"▁Electric": 8879,
"orum": 8880,
"nor": 8881,
"NHK": 8882,
"plu": 8883,
"-01": 8884,
"nel": 8885,
"ulia": 8886,
"lica": 8887,
"▁Carne": 8888,
"▁Review": 8889,
"▁Aga": 8890,
"sion": 8891,
"▁Chao": 8892,
"▁Hava": 8893,
"▁Tumblr": 8894,
"2002": 8895,
"▁Caroline": 8896,
"▁Fen": 8897,
"▁marathon": 8898,
"▁LO": 8899,
"▁Mig": 8900,
"MIT": 8901,
"▁Alzheimer": 8902,
"▁MAI": 8903,
"-2019": 8904,
"▁Qi": 8905,
"▁1915": 8906,
"1990": 8907,
"stre": 8908,
"sula": 8909,
"▁This": 8910,
"▁Vista": 8911,
"▁chr": 8912,
"bie": 8913,
"▁Suu": 8914,
"▁Gon": 8915,
"▁Find": 8916,
"▁(2016)": 8917,
"▁2018)": 8918,
"783": 8919,
"▁Hello": 8920,
"▁macro": 8921,
"nal": 8922,
"1992": 8923,
"▁Fas": 8924,
"SCI": 8925,
"170": 8926,
"▁Gigi": 8927,
"1,2": 8928,
"▁Lukas": 8929,
"ttoman": 8930,
"▁~": 8931,
"▁Recep": 8932,
"inus": 8933,
"▁Beau": 8934,
"gata": 8935,
"gra": 8936,
"sto": 8937,
"▁Amar": 8938,
"ose": 8939,
"ré": 8940,
"▁Ninja": 8941,
"▁Florence": 8942,
"▁Sci": 8943,
"PD": 8944,
"▁5-": 8945,
"thal": 8946,
"toma": 8947,
"▁Sofia": 8948,
"▁Speed": 8949,
"▁Otto": 8950,
"oglu": 8951,
| |
<reponame>prismtolley/SoleboxAccountGenerator
#### made by: rtuna#4321 | @rTunaboss
#### Working on Python 3.8.0
print(r'''
____ ____ _ _ ______ __ __ _
| _ \ / __ \ | \ | ||___ / /\ \ \ / /(_)
| |_) || | | || \| | / / / \ \ \_/ / _ ___
| _ < | | | || . ` | / / / /\ \ \ / | | / _ \
| |_) || |__| || |\ | / /__ / ____ \ | | _ | || (_) |
|____/ \____/ |_| \_|/_____|/_/ \_\|_|(_)|_| \___/
''')
print(" • made by: rtuna#4321 | @rTunaboss")
print(" • for personal use only")
print('-------------------------------------\n')
#################### Settings [Feel free to modify this] ####################
how_many = None
# how_many = 1
while not how_many:
try:
how_many = int(input("How many accounts would you like to create?\n"))
except ValueError:
print("This is not an integer. Try again...")
jigFirstAndLast = False #or True
jigFirst = False #or True
jigPhone = True #or False
jigFirstLineAddress = True #or False
#TODO ^for some reason if you set this to False, the account generation stops working (Fake Success)
jigSecondLineAddress = True #or False
#TODO Also make sure you fill in everything in the userdata.json file.
#-------------------------------- DO NOT MODIFY THE CODE BELOW UNLESS YOU KNOW WHAT YOU'RE DOING --------------------------------#
#################### Importing necessary libraries ####################
try:
import requests
from bs4 import BeautifulSoup as bs
from names import get_first_name, get_last_name
import random
import time
import datetime
import threading
import cfscrape
import json
import os
from colorama import Fore, Style, init
from discord_webhook import DiscordWebhook, DiscordEmbed
except:
print('[FATAL ERROR] -> "Some dependencies are not installed."')
print('!!! Make sure you read and do EVERYTHING in the "Before running" section of the README.md file on Github !!!')
print('Available from:\thttps://github.com/rtunaboss/SoleboxAccountGenerator')
input()
quit()
init(autoreset=True)
class logger:
print_lock = threading.Lock()
#################### Defining non-account specific functions ####################
def gettime():
now = str(datetime.datetime.now())
now = now.split(' ')[1]
threadname = threading.currentThread().getName()
threadname = str(threadname).replace('Thread', 'Task')
now = '[' + str(now) + ']' + ' ' + '[' + str(threadname) + ']'
return now
def send_webhook(webhook_url, email, passwd):
hook = DiscordWebhook(url=webhook_url, username="rTuna's Solebox Gen", avatar_url='https://avatars1.githubusercontent.com/u/38296319?s=460&v=4')
color=15957463
embed = DiscordEmbed(
title = 'Account successfully created!',
color=color,
url='https://github.com/rtunaboss/SoleboxAccountGenerator',
)
embed.set_footer(text=f'BONZAY Solebox • {datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}',icon_url='https://cdn.discordapp.com/attachments/527830358767566848/622854816120569887/Bonzay.png')
embed.add_embed_field(name='Username', value=f'{email}')
embed.add_embed_field(name='Password', value=f'||{passwd}||', inline=False)
hook.add_embed(embed)
hook.execute()
def loadProxyUserPass(filename):
global proxyList
with open(filename + '.txt') as f:
file_content = f.read()
file_rows = file_content.split('\n')
for i in range(0, len(file_rows)):
if ':' in file_rows[i]:
tmp = file_rows[i]
tmp = tmp.split(':')
proxies = {'http': 'http://' + tmp[2] + ':' + tmp[3] + '@' + tmp[0] + ':' + tmp[1] + '/',
'https': 'http://' + tmp[2] + ':' + tmp[3] + '@' + tmp[0] + ':' + tmp[1] + '/'}
proxyList.append(proxies)
def loadProxyIpAuth(filename):
with open(filename + '.txt') as f:
file_content = f.read()
tmp = file_content.split('\n')
for n in range(0, len(tmp)):
if ':' in tmp[n]:
temp = tmp[n]
proxies = {'http': 'http://' + temp, 'https': 'http://' + temp}
proxyList.append(proxies)
def saveEmail(email, passwd):
with open('valid_emails.txt', 'a') as f:
f.write(f'{email}:{passwd}\n')
def saveNoShipEmail(email, passwd):
with open('no_ship_addy_emails.txt', 'a') as f:
f.write(f'{email}:{passwd}\n')
def getStoken(s):
try:
with logger.print_lock:
print(gettime() + ' [STATUS] -> Trying to scrape stoken...')
index_url = 'https://www.solebox.com/en/my-account/'
index_r = s.get(url=index_url, headers=headers)
if 'captcha.js' in index_r.text:
print(Fore.RED + gettime() + ' [ERROR] -> Encountered CloudFare.')
return
if index_r.status_code == 200:
soup = bs(index_r.text, 'lxml')
stoken = soup.find('input', {'name': 'stoken'})['value']
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + f' [SUCCESS] -> Successfully scraped stoken: {stoken} !')
return stoken
else:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Bad request. Satus code %d, unable to get stoken...' % index_r.status_code)
return
except:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Unable to get stoken.')
def scrapeCountryIds():
country_data = {}
with logger.print_lock:
print(gettime() + ' [STATUS] -> Scraping country IDs...')
s = cfscrape.create_scraper()
r = s.get(url='https://www.solebox.com/', headers=headers)
soup = bs(r.text, 'lxml')
countrySelection = soup.find('select', {'id':'invCountrySelect'})
countryValues = countrySelection.contents
for val in countryValues:
# scraped info is separate by new lines which we want to skip
if val == '\n':
continue
else:
country_id = val['value']
country_name = val.text
country_data[country_name] = country_id
with open('countrydata.json', 'w') as f:
json.dump(country_data, f)
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + ' [SUCCESS] -> Country IDs scraped!')
def getCountryId(country_name):
with open('countrydata.json', 'r') as f:
country_data = json.loads(f.read())
try:
country_id = country_data[country_name]
return country_id
except:
print(Fore.RED + gettime() + ' [ERROR] -> Error getting country_id, check your country name in userdata.json!')
#################### Loading data and initializing other later used variables ####################
with open('useragents.txt', 'r') as f:
# with open('commonagents.txt', 'r') as f:
useragents = f.read()
useragents = useragents.split('\n')
with open('userdata.json', 'r') as f:
userData = json.loads(f.read())
webhook_url = userData['webhook_url']
firstName = userData['firstName']
if firstName == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your firstName!')
input()
quit()
lastName = userData['lastName']
if lastName == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your lastName!')
input()
quit()
phoneNum = userData['phoneNum']
if phoneNum == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your phoneNum!')
input()
quit()
passwd = userData['passwd']
if passwd == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your passwd!')
input()
quit()
addyFirstLine = userData['addyFirstLine']
if addyFirstLine == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your addyFirstLine!')
input()
quit()
houseNum = userData['houseNum']
if houseNum == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your houseNum!')
input()
quit()
zipcode = userData['zipcode']
if zipcode == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your zipcode!')
input()
quit()
city = userData['city']
if city == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your city!')
input()
quit()
country_name = userData['country_name']
if country_name == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your country_name!')
input()
quit()
stateUS = userData['stateUS']
if len(stateUS) > 2:
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your State settings! Correct formatting: "NY" or "TX"')
addySecondLine = userData['addySecondLine']
catchall = userData['catchall']
if catchall == '':
catchall = 'gmail.com'
if '@' in catchall:
catchall = catchall.replace('@', '')
country_id = getCountryId(country_name)
if country_id == None:
input()
quit()
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8,cs;q=0.7,de;q=0.6',
# 'cache-control': 'max-age=0',
'content-type':'application/x-www-form-urlencoded',
'upgrade-insecure-requests': '1',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
}
linetwolist = ['apt', 'apartment', 'dorm', 'suite', 'unit', 'house', 'unt', 'room', 'floor']
#################### Main function ####################
def generateAccount():
########## Initializing a session & getting stoken ##########
useragent = random.choice(useragents)
headers['user-agent'] = useragent
# headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0'
with logger.print_lock:
print(gettime() + ' [STATUS] -> Account generation has started...')
# s = cfscrape.create_scraper()
s = requests.Session()
if proxyList:
proxy_is_bad = True
while proxy_is_bad:
s.proxies = random.choice(proxyList)
with logger.print_lock:
print(gettime() + ' [STATUS] -> Checking proxy...')
test = s.get('https://www.solebox.com/', headers=headers)
if test.status_code in (302, 200):
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + ' [SUCCESS] -> Proxy working...')
proxy_is_bad = False
elif 'captcha.js' in test.text:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Encountered CloudFare, rotating proxy...')
else:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Proxy banned, rotating proxy...')
time.sleep(1)
stoken = getStoken(s)
if stoken is None:
return
time.sleep(1)
s.get(url='https://www.solebox.com/en/open-account/', headers=headers)
########## Jigging info ##########
global firstName, lastName, phoneNum, jiggedFirstLineAddress, jiggedSecondLineAddress
if jigFirstAndLast:
firstName = get_first_name()
lastName = get_last_name()
elif jigFirst:
firstName = get_first_name()
if jigPhone:
phoneNum = f'+1{random.randint(300,999)}{random.randint(300,999)}{random.randint(300,999)}'
if jigFirstLineAddress:
jiggedFirstLineAddress = f'{2*(chr(random.randint(97,97+25)).upper() + chr(random.randint(97,97+25)).upper())} {addyFirstLine}'
else:
jiggedFirstLineAddress = addyFirstLine
if jigSecondLineAddress:
jiggedSecondLineAddress = f'{random.choice(linetwolist)} {random.randint(1,20)}{chr(random.randint(97,97+25)).upper()}'
else:
jiggedSecondLineAddress = addySecondLine
email = f'{get_first_name()}{random.randint(1,9999999)}@{catchall}'
time.sleep(0.5)
with logger.print_lock:
print(gettime() + ' [STATUS] -> Trying to create an account...')
########## Configuring payload for registering and POSTing it to create an account ##########
register_payload = {
'stoken': stoken,
'lang': '1',
'listtype': '',
'actcontrol': 'register',
'fnc': 'registeruser',
'cl': 'register',
'lgn_cook' : 0,
'reloadaddress': '',
'blshowshipaddress': 1,
'option' : 3,
'invadr[oxuser__oxsal]': random.choice(['MR', 'MRS']), # MR OR MRS
'invadr[oxuser__oxfname]': firstName,
'invadr[oxuser__oxlname]': lastName,
'invadr[oxuser__oxstreet]': jiggedFirstLineAddress,
'invadr[oxuser__oxstreetnr]': houseNum,
'invadr[oxuser__oxaddinfo]': jiggedSecondLineAddress,
'invadr[oxuser__oxzip]': zipcode,
'invadr[oxuser__oxcity]': city,
| |
fileBinary[56:imageHeaderAddress]
paletteLength = len(paletteData)
paletteEntries = paletteLength/4
for i in xrange(paletteLength, 0, -4): ## Iterate backwards through the palette data, seeking magenta.
if paletteData[i-4:i] == 'fc1f':
paletteData = paletteData[:i-4] + '0000' + paletteData[i:] ## Replace magenta with full transparency.
break
for i in xrange(paletteLength, 0, -4): ## Iterate backwards through the palette data, seeking lime green.
if paletteData[i-4:i] == '83e0':
paletteData = paletteData[:i-4] + '3000' + paletteData[i:] ## Replace lime green with the drop-shadow.
break
imageDataOffset = int(fileBinary[imageHeaderAddress + 16:imageHeaderAddress + 24], 16)
imageData = fileBinary[(imageDataOffset - 4)*2:] ## 0x260 = 608, 608 - the offset of 8 lost with the file identifier = 600.
paletteHeader = paletteType + '00000000' + "{0:0{1}X}".format(paletteEntries, 4) # This is in the format that would appear in a file, but without the paletteDataOffset.
else:
## No palette. Return just the image data.
imageData = fileBinary[120:] ## 0x40 -> 64. 2(64 - 4) = 120
imageHeader = width + height + "{0:0{1}X}".format(imageType, 8) # This is in the format that would appear in a file, but without the imageDataOffset.
## The returned imageData will be a bytearray, except for cases with conversion errors, in which case it will be a string.
return (status, imageHeader, imageData, paletteHeader, paletteData) # All returned values are strings.
def buildTextureDumpPath( datFileObj, imageDataOffset, imageType, extension ):
""" Creates a save/destination path for new image files being dumped from the program.
Only used for dumping images from a globally loaded DAT file (not banners). """
sourceDatFilename = os.path.basename( datFileObj.path ).split('_')[-1]
newFileName = sourceDatFilename + '_' + uHex(imageDataOffset + 0x20) + '_' + str(imageType)
# Get the Game ID if this file was loaded from a disc.
if datFileObj.source == 'disc' and globalDiscDetails['gameId'] != '':
# Means an ISO has been loaded, and (looking at the file path) the current dat is not from an outside standalone file.
gameID = globalDiscDetails['gameId']
else: gameID = 'No Associated Disc'
# Construct the destination file path, and create the folders if they don't already exist.
destinationFolder = texDumpsFolder + '\\' + gameID + '\\' + sourceDatFilename + '\\'
if not os.path.exists( destinationFolder ): os.makedirs( destinationFolder )
return destinationFolder + newFileName + extension
def updateEntryHex( event, widget=None ):
""" Updates hex data in a hex entry field to the currently loaded DAT file.
Able to update multiple locations in the file if widget.offset is a list of offsets. """
# Get the entry widget containing details on this edit
if not widget:
widget = event.widget
# Validate the input
newHex = widget.get().zfill( widget.byteLength * 2 ).upper() # Pads the string with zeroes to the left if not enough characters
if not validHex( newHex ):
msg( 'The entered text is not valid hexadecimal!' )
return
# Confirm whether updating is necessary by checking if this is actually new data for any of the offset locations
if type( widget.offsets ) == list:
for offset in widget.offsets:
currentFileHex = hexlify( globalDatFile.getData(offset, widget.byteLength) ).upper()
if currentFileHex != newHex: # Found a difference
break
else: # The loop above didn't break; no change found
return # No change to be updated
else: # The offsets attribute is just a single value (the usual case)
currentFileHex = hexlify( globalDatFile.getData(widget.offsets, widget.byteLength) ).upper()
if currentFileHex == newHex:
return # No change to be updated
# Get the data as a bytearray, and check for other GUI compoenents that may need to be updated
newData = bytearray.fromhex( newHex )
valueEntryWidget = getattr( widget, 'valueEntryWidget', None )
formatting = getattr( widget, 'formatting', None )
decodedValue = None
if len( newData ) != widget.byteLength: # Thanks to the zfill above, this should only happen if the hex entry is too long
msg( 'The new value must be ' + str( widget.byteLength ) + ' characters long.' )
return
if valueEntryWidget and formatting:
# Check that the appropriate value can be decoded from this hex (if formatting is available)
try:
decodedValue = struct.unpack( '>' + formatting, newData )
except Exception as err:
# Construct and display an error message for the user
dataTypes = { '?': 'a boolean', 'b': 'a signed character', 'B': 'an unsigned character', # 1-byte
'h': 'a signed short (halfword)', 'H': 'an unsigned short', # 2-bytes
'i': 'a signed integer', 'I': 'an unsigned integer', 'f': 'a float' } # 4-bytes
if formatting in dataTypes:
expectedLength = struct.calcsize( formatting )
msg( 'The entered value is invalid for {} value (should be {} byte(s)).'.format( dataTypes[formatting], expectedLength ) )
else: # I tried
msg( 'The entered value is invalid.' )
print err
return
# Change the background color of the widget, to show that changes have been made to it and are pending saving.
widget.configure( background='#faa' )
# If this entry has a color swatch associated with it, redraw it.
colorSwatchWidget = getattr( widget, 'colorSwatch', None )
if colorSwatchWidget:
#print 'recreating color swatch image with', newHex
widget.colorSwatch.renderCircle( newHex )
# Add the widget to a list, to keep track of what widgets need to have their background restored to white when saving.
global editedDatEntries
editedDatEntries.append( widget )
# Update the hex shown in the widget (in case the user-entered value was zfilled; i.e. was not long enough)
widget.delete( 0, 'end' )
widget.insert( 0, newHex )
# Update the data shown in the neighboring, decoded value widget
if decodedValue:
valueEntryWidget.delete( 0, 'end' )
valueEntryWidget.insert( 0, decodedValue )
valueEntryWidget.configure( background='#faa' )
editedDatEntries.append( valueEntryWidget )
# Replace the data in the file for each location
updateName = widget.updateName.replace( '\n', ' ' )
descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName
if type( widget.offsets ) == list:
for offset in widget.offsets:
globalDatFile.updateData( offset, newData, descriptionOfChange )
else: # The offsets attribute is just a single value (the usual case)
globalDatFile.updateData( widget.offsets, newData, descriptionOfChange )
updateProgramStatus( updateName + ' Updated' )
def updateEntryValue( event ):
""" Formats a value in an entry field and updates it into the currently loaded DAT file.
Able to update multiple locations in the file if widget.offset is a list of offsets. """
if event.__class__ == HexEditDropdown:
widget = event
else:
widget = event.widget
# Validate the entered value by making sure it can be correctly encoded
try:
formatting = widget.formatting
if formatting == 'f':
newHex = hexlify( struct.pack( '>f', float(widget.get()) ) ).upper()
else:
newHex = hexlify( struct.pack( '>' + formatting, int(widget.get()) ) ).upper()
except Exception as err:
# Construct and display an error message for the user
dataTypes = { '?': 'a boolean', 'b': 'a signed character', 'B': 'an unsigned character', # 1-byte
'h': 'a signed short (halfword)', 'H': 'an unsigned short', # 2-bytes
'i': 'a signed integer', 'I': 'an unsigned integer', 'f': 'a float' } # 4-bytes
if formatting in dataTypes:
msg( 'The entered value is invalid for {} value.'.format( dataTypes[formatting] ) )
else: # I tried
msg( 'The entered value is invalid.' )
print err
return
# Confirm whether updating is necessary by checking if this is actually new data for any of the offset locations
if type( widget.offsets ) == list:
for offset in widget.offsets:
currentFileHex = hexlify( globalDatFile.getData(offset, widget.byteLength) ).upper()
if currentFileHex != newHex: # Found a difference
break
else: # The loop above didn't break; no change found
return # No change to be updated
else: # The offsets attribute is just a single value (the usual case)
currentFileHex = hexlify( globalDatFile.getData(widget.offsets, widget.byteLength) ).upper()
if currentFileHex == newHex:
return # No change to be updated
# Change the background color of the widget, to show that changes have been made to it and are pending saving.
if event.__class__ == HexEditDropdown:
widget.configure( style='Edited.TMenubutton' )
else:
widget.configure( background='#faa' )
# Add the widget to a list, to keep track of what widgets need to have their background restored to white when saving.
global editedDatEntries
editedDatEntries.append( widget )
# Update the data shown in the neiboring widget
hexEntryWidget = getattr( widget, 'hexEntryWidget', None )
if hexEntryWidget:
hexEntryWidget.delete( 0, 'end' )
hexEntryWidget.insert( 0, newHex )
hexEntryWidget.configure( background='#faa' )
editedDatEntries.append( hexEntryWidget )
# Replace the data in the file for each location
newData = bytearray.fromhex( newHex )
updateName = widget.updateName.replace( '\n', ' ' )
descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName
if type( widget.offsets ) == list:
for offset in widget.offsets:
globalDatFile.updateData( offset, newData, descriptionOfChange )
else: # The offsets attribute is just a single value (the usual case)
globalDatFile.updateData( widget.offsets, newData, descriptionOfChange )
updateProgramStatus( updateName + ' Updated' )
def updateDiscDetails( event ):
offset = event.widget.offset # In this case, these ARE counting the file header
maxLength = event.widget.maxByteLength
targetFile = event.widget.targetFile # Defines the file this disc detail resides in. Will be a string of either 'opening.bnr' or 'boot.bin'
# Return if the Shift key was held while pressing Enter (indicating the user wants a line break).
modifierKeysState = event.state # An int. Check individual bits for mod key status'; http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
shiftDetected = (modifierKeysState & 0x1) | |
<reponame>mkXXXIX/glyphsLib<gh_stars>1-10
# coding=UTF-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import difflib
import os.path
import shutil
import sys
import tempfile
import unittest
import xml.etree.ElementTree as etree
import defcon
from fontTools.misc.py23 import open
from glyphsLib.builder.constants import GLYPHS_PREFIX
from glyphsLib.interpolation import (
build_designspace, set_weight_class, set_width_class, build_stylemap_names
)
from glyphsLib.classes import GSInstance, GSCustomParameter
def makeFamily(familyName):
m1 = makeMaster(familyName, "Regular", weight=90.0)
m2 = makeMaster(familyName, "Black", weight=190.0)
instances = {
"data": [
makeInstance("Regular", weight=("Regular", 400, 90)),
makeInstance("Semibold", weight=("Semibold", 600, 128)),
makeInstance("Bold", weight=("Bold", 700, 151), is_bold=True),
makeInstance("Black", weight=("Black", 900, 190)),
],
}
return [m1, m2], instances
def makeMaster(familyName, styleName, weight=None, width=None):
m = defcon.Font()
m.info.familyName, m.info.styleName = familyName, styleName
if weight is not None:
m.lib[GLYPHS_PREFIX + "weightValue"] = weight
if width is not None:
m.lib[GLYPHS_PREFIX + "widthValue"] = width
return m
def makeInstance(name, weight=None, width=None, is_bold=None, is_italic=None,
linked_style=None):
inst = GSInstance()
inst.name = name
if weight is not None:
# Glyphs 2.3 stores the instance weight in two to three places:
# 1. as a textual weightClass (such as “Bold”; no value defaults to
# "Regular");
# 2. (optional) as numeric customParameters.weightClass (such as 700),
# which corresponds to OS/2.usWeightClass where 100 means Thin,
# 400 means Regular, 700 means Bold, and 900 means Black;
# 3. as numeric interpolationWeight (such as 66.0), which typically is
# the stem width but can be anything that works for interpolation
# (no value defaults to 100).
weightName, weightClass, interpolationWeight = weight
if weightName is not None:
inst.weightClass = weightName
if weightClass is not None:
inst.customParameters["weightClass"] = weightClass
if interpolationWeight is not None:
inst.interpolationWeight = interpolationWeight
if width is not None:
# Glyphs 2.3 stores the instance width in two places:
# 1. as a textual widthClass (such as “Condensed”; no value defaults
# to "Medium (normal)");
# 2. as numeric interpolationWidth (such as 79), which typically is
# a percentage of whatever the font designer considers “normal”
# but can be anything that works for interpolation (no value
# defaults to 100).
widthClass, interpolationWidth = width
if widthClass is not None:
inst.widthClass = widthClass
if interpolationWidth is not None:
inst.interpolationWidth = interpolationWidth
# TODO: Support custom axes; need to triple-check how these are encoded in
# Glyphs files. Glyphs 3 will likely overhaul the representation of axes.
if is_bold is not None:
inst.isBold = is_bold
if is_italic is not None:
inst.isItalic = is_italic
if linked_style is not None:
inst.linkStyle = linked_style
return inst
class DesignspaceTest(unittest.TestCase):
def build_designspace(self, masters, instances):
master_dir = tempfile.mkdtemp()
try:
designspace, _ = build_designspace(
masters, master_dir, os.path.join(master_dir, "out"), instances)
with open(designspace, mode="r", encoding="utf-8") as f:
result = f.readlines()
finally:
shutil.rmtree(master_dir)
return result
def expect_designspace(self, masters, instances, expectedFile):
actual = self.build_designspace(masters, instances)
path, _ = os.path.split(__file__)
expectedPath = os.path.join(path, "data", expectedFile)
with open(expectedPath, mode="r", encoding="utf-8") as f:
expected = f.readlines()
if os.path.sep == '\\':
# On windows, the test must not fail because of a difference between
# forward and backward slashes in filname paths.
# The failure happens because of line 217 of "mutatorMath\ufo\document.py"
# > pathRelativeToDocument = os.path.relpath(fileName, os.path.dirname(self.path))
expected = [line.replace('filename="out/', 'filename="out\\') for line in expected]
if actual != expected:
for line in difflib.unified_diff(
expected, actual,
fromfile=expectedPath, tofile="<generated>"):
sys.stderr.write(line)
self.fail("*.designspace file is different from expected")
def test_basic(self):
masters, instances = makeFamily("DesignspaceTest Basic")
self.expect_designspace(masters, instances,
"DesignspaceTestBasic.designspace")
def test_inactive_from_exports(self):
# Glyphs.app recognizes exports=0 as a flag for inactive instances.
# https://github.com/googlei18n/glyphsLib/issues/129
masters, instances = makeFamily("DesignspaceTest Inactive")
for inst in instances["data"]:
if inst.name != "Semibold":
inst.exports = False
self.expect_designspace(masters, instances,
"DesignspaceTestInactive.designspace")
def test_familyName(self):
masters, instances = makeFamily("DesignspaceTest FamilyName")
customFamily = makeInstance("Regular", weight=("Bold", 600, 151))
customFamily.customParameters["familyName"] = "Custom Family"
instances["data"] = [
makeInstance("Regular", weight=("Regular", 400, 90)),
customFamily,
]
self.expect_designspace(masters, instances,
"DesignspaceTestFamilyName.designspace")
def test_fileName(self):
masters, instances = makeFamily("DesignspaceTest FamilyName")
customFileName= makeInstance("Regular", weight=("Bold", 600, 151))
customFileName.customParameters["fileName"] = "Custom FileName"
instances["data"] = [
makeInstance("Regular", weight=("Regular", 400, 90)),
customFileName,
]
self.expect_designspace(masters, instances,
"DesignspaceTestFileName.designspace")
def test_noRegularMaster(self):
# Currently, fonttools.varLib fails to build variable fonts
# if the default axis value does not happen to be at the
# location of one of the interpolation masters.
# glyhpsLib tries to work around this downstream limitation.
masters = [
makeMaster("NoRegularMaster", "Thin", weight=26),
makeMaster("NoRegularMaster", "Black", weight=190),
]
instances = {"data": [
makeInstance("Black", weight=("Black", 900, 190)),
makeInstance("Regular", weight=("Regular", 400, 90)),
makeInstance("Bold", weight=("Thin", 100, 26)),
]}
doc = etree.fromstringlist(self.build_designspace(masters, instances))
weightAxis = doc.find('axes/axis[@tag="wght"]')
self.assertEqual(weightAxis.attrib["minimum"], "100.0")
self.assertEqual(weightAxis.attrib["default"], "100.0") # not 400
self.assertEqual(weightAxis.attrib["maximum"], "900.0")
def test_postscriptFontName(self):
master = makeMaster("PSNameTest", "Master")
thin, black = makeInstance("Thin"), makeInstance("Black")
instances = {"data": [thin, black]}
black.customParameters["postscriptFontName"] = "PSNameTest-Superfat"
d = etree.fromstringlist(self.build_designspace([master], instances))
def psname(doc, style):
inst = doc.find('instances/instance[@stylename="%s"]' % style)
return inst.attrib.get('postscriptfontname')
self.assertIsNone(psname(d, "Thin"))
self.assertEqual(psname(d, "Black"), "PSNameTest-Superfat")
def test_instanceOrder(self):
# The generated *.designspace file should place instances
# in the same order as they appear in the original source.
# https://github.com/googlei18n/glyphsLib/issues/113
masters, instances = makeFamily("DesignspaceTest InstanceOrder")
instances["data"] = [
makeInstance("Black", weight=("Black", 900, 190)),
makeInstance("Regular", weight=("Regular", 400, 90)),
makeInstance("Bold", weight=("Bold", 700, 151), is_bold=True),
]
self.expect_designspace(masters, instances,
"DesignspaceTestInstanceOrder.designspace")
def test_twoAxes(self):
# In NotoSansArabic-MM.glyphs, the regular width only contains
# parameters for the weight axis. For the width axis, glyphsLib
# should use 100 as default value (just like Glyphs.app does).
familyName = "DesignspaceTest TwoAxes"
masters = [
makeMaster(familyName, "Regular", weight=90),
makeMaster(familyName, "Black", weight=190),
makeMaster(familyName, "Thin", weight=26),
makeMaster(familyName, "ExtraCond", weight=90, width=70),
makeMaster(familyName, "ExtraCond Black", weight=190, width=70),
makeMaster(familyName, "ExtraCond Thin", weight=26, width=70),
]
instances = {
"data": [
makeInstance("Thin", weight=("Thin", 100, 26)),
makeInstance("Regular", weight=("Regular", 400, 90)),
makeInstance("Semibold", weight=("Semibold", 600, 128)),
makeInstance("Black", weight=("Black", 900, 190)),
makeInstance("ExtraCondensed Thin",
weight=("Thin", 100, 26),
width=("Extra Condensed", 70)),
makeInstance("ExtraCondensed",
weight=("Regular", 400, 90),
width=("Extra Condensed", 70)),
makeInstance("ExtraCondensed Black",
weight=("Black", 900, 190),
width=("Extra Condensed", 70)),
]
}
self.expect_designspace(masters, instances,
"DesignspaceTestTwoAxes.designspace")
def test_variationFontOrigin(self):
# Glyphs 2.4.1 introduced a custom parameter “Variation Font Origin”
# to specify which master should be considered the origin.
# https://glyphsapp.com/blog/glyphs-2-4-1-released
masters = [
makeMaster("Family", "Thin", weight=26),
makeMaster("Family", "Regular", weight=100),
makeMaster("Family", "Medium", weight=111),
makeMaster("Family", "Black", weight=190),
]
instances = {
"data": [
makeInstance("Black", weight=("Black", 900, 190)),
makeInstance("Medium", weight=("Medium", 444, 111)),
makeInstance("Regular", weight=("Regular", 400, 100)),
makeInstance("Thin", weight=("Thin", 100, 26)),
],
"Variation Font Origin": "Medium",
}
doc = etree.fromstringlist(self.build_designspace(masters, instances))
medium = doc.find('sources/source[@stylename="Medium"]')
self.assertEqual(medium.find("lib").attrib["copy"], "1")
weightAxis = doc.find('axes/axis[@tag="wght"]')
self.assertEqual(weightAxis.attrib["default"], "444.0")
def test_designspace_name(self):
master_dir = tempfile.mkdtemp()
try:
designspace_path, _ = build_designspace(
[
makeMaster("Family Name", "Regular", weight=100),
makeMaster("Family Name", "Bold", weight=190),
], master_dir, os.path.join(master_dir, "out"), {})
# no shared base style name, only write the family name
self.assertEqual(os.path.basename(designspace_path),
"FamilyName.designspace")
designspace_path, _ = build_designspace(
[
makeMaster("Family Name", "Italic", weight=100),
makeMaster("Family Name", "Bold Italic", weight=190),
], master_dir, os.path.join(master_dir, "out"), {})
# 'Italic' is the base style; append to designspace name
self.assertEqual(os.path.basename(designspace_path),
"FamilyName-Italic.designspace")
finally:
shutil.rmtree(master_dir)
WEIGHT_CLASS_KEY = GLYPHS_PREFIX + "weightClass"
WIDTH_CLASS_KEY = GLYPHS_PREFIX + "widthClass"
class SetWeightWidthClassesTest(unittest.TestCase):
def test_no_weigth_class(self):
ufo = defcon.Font()
# name here says "Bold", however no excplit weightClass
# is assigned
set_weight_class(ufo, makeInstance("Bold"))
# the default OS/2 weight class is set
self.assertEqual(ufo.info.openTypeOS2WeightClass, 400)
# non-empty value is stored in the UFO lib even if same as default
self.assertEqual(ufo.lib[WEIGHT_CLASS_KEY], "Regular")
def test_weight_class(self):
ufo = defcon.Font()
data = makeInstance(
"Bold",
weight=("Bold", None, 150)
)
set_weight_class(ufo, data)
self.assertEqual(ufo.info.openTypeOS2WeightClass, 700)
self.assertEqual(ufo.lib[WEIGHT_CLASS_KEY], "Bold")
def test_explicit_default_weight(self):
ufo = defcon.Font()
data = makeInstance(
"Regular",
weight=("Regular", None, 100)
)
set_weight_class(ufo, data)
# the default OS/2 weight class is set
self.assertEqual(ufo.info.openTypeOS2WeightClass, 400)
# non-empty value is stored in the UFO lib even if same as default
self.assertEqual(ufo.lib[WEIGHT_CLASS_KEY], "Regular")
def test_no_width_class(self):
ufo = defcon.Font()
# no explicit widthClass set, instance name doesn't matter
set_width_class(ufo, makeInstance("Normal"))
# the default OS/2 width class is set
self.assertEqual(ufo.info.openTypeOS2WidthClass, 5)
# non-empty value is stored in the UFO lib even if same as default
self.assertEqual(ufo.lib[WIDTH_CLASS_KEY], "Medium (normal)")
def test_width_class(self):
| |
return cls(DateTime2Type(precision=prec))
def write_info(self, w):
w.put_byte(self._typ.precision)
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
if value.tzinfo:
if not w.session.use_tz:
raise tds_base.DataError('Timezone-aware datetime is used without specifying use_tz')
value = value.astimezone(w.session.use_tz).replace(tzinfo=None)
w.put_byte(self.size)
self._write_time(w, Time.from_pytime(value), self._typ.precision)
self._write_date(w, Date.from_pydate(value))
def read_fixed(self, r, size):
time = self._read_time(r, size - 3, self._typ.precision)
date = self._read_date(r)
dt = DateTime2(date=date, time=time)
res = dt.to_pydatetime()
if r.session.tzinfo_factory is not None:
tzinfo = r.session.tzinfo_factory(0)
res = res.replace(tzinfo=tzinfo)
return res
def read(self, r):
size = r.get_byte()
if size == 0:
return None
return self.read_fixed(r, size)
class DateTimeOffsetSerializer(BaseDateTime73Serializer):
type = tds_base.SYBMSDATETIMEOFFSET
def __init__(self, typ):
super(DateTimeOffsetSerializer, self).__init__(precision=typ.precision,
size=self._precision_to_len[typ.precision] + 5)
self._typ = typ
@classmethod
def from_stream(cls, r):
prec = r.get_byte()
return cls(DateTimeOffsetType(precision=prec))
def write_info(self, w):
w.put_byte(self._typ.precision)
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
utcoffset = value.utcoffset()
value = value.astimezone(_utc).replace(tzinfo=None)
w.put_byte(self.size)
self._write_time(w, Time.from_pytime(value), self._typ.precision)
self._write_date(w, Date.from_pydate(value))
w.put_smallint(int(tds_base.total_seconds(utcoffset)) // 60)
def read_fixed(self, r, size):
time = self._read_time(r, size - 5, self._typ.precision)
date = self._read_date(r)
offset = r.get_smallint()
dt = DateTimeOffset(date=date, time=time, offset=offset)
return dt.to_pydatetime()
def read(self, r):
size = r.get_byte()
if size == 0:
return None
return self.read_fixed(r, size)
class MsDecimalSerializer(BaseTypeSerializer):
type = tds_base.SYBDECIMAL
_max_size = 17
_bytes_per_prec = [
#
# precision can't be 0 but using a value > 0 assure no
# core if for some bug it's 0...
#
1,
5, 5, 5, 5, 5, 5, 5, 5, 5,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
13, 13, 13, 13, 13, 13, 13, 13, 13,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
]
_info_struct = struct.Struct('BBB')
def __init__(self, precision=18, scale=0):
super(MsDecimalSerializer, self).__init__(precision=precision,
scale=scale,
size=self._bytes_per_prec[precision])
if precision > 38:
raise tds_base.DataError('Precision of decimal value is out of range')
def __repr__(self):
return 'MsDecimal(scale={}, prec={})'.format(self.scale, self.precision)
@classmethod
def from_value(cls, value):
sql_type = DecimalType.from_value(value)
return cls(scale=sql_type.scale, prec=sql_type.precision)
@classmethod
def from_stream(cls, r):
size, prec, scale = r.unpack(cls._info_struct)
return cls(scale=scale, precision=prec)
def write_info(self, w):
w.pack(self._info_struct, self.size, self.precision, self.scale)
def write(self, w, value):
with decimal.localcontext() as context:
context.prec = 38
if value is None:
w.put_byte(0)
return
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(value)
value = value.normalize()
scale = self.scale
size = self.size
w.put_byte(size)
val = value
positive = 1 if val > 0 else 0
w.put_byte(positive) # sign
if not positive:
val *= -1
size -= 1
val *= 10 ** scale
for i in range(size):
w.put_byte(int(val % 256))
val //= 256
assert val == 0
def _decode(self, positive, buf):
val = _decode_num(buf)
val = decimal.Decimal(val)
with decimal.localcontext() as ctx:
ctx.prec = 38
if not positive:
val *= -1
val /= 10 ** self._scale
return val
def read_fixed(self, r, size):
positive = r.get_byte()
buf = tds_base.readall(r, size - 1)
return self._decode(positive, buf)
def read(self, r):
size = r.get_byte()
if size <= 0:
return None
return self.read_fixed(r, size)
class Money4Serializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBMONEY4
declaration = 'SMALLMONEY'
def read(self, r):
return decimal.Decimal(r.get_int()) / 10000
def write(self, w, val):
val = int(val * 10000)
w.put_int(val)
Money4Serializer.instance = Money4Serializer()
class Money8Serializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBMONEY
declaration = 'MONEY'
_struct = struct.Struct('<lL')
def read(self, r):
hi, lo = r.unpack(self._struct)
val = hi * (2 ** 32) + lo
return decimal.Decimal(val) / 10000
def write(self, w, val):
val *= 10000
hi = int(val // (2 ** 32))
lo = int(val % (2 ** 32))
w.pack(self._struct, hi, lo)
Money8Serializer.instance = Money8Serializer()
class MoneyNSerializer(BaseTypeSerializerN):
type = tds_base.SYBMONEYN
subtypes = {
4: Money4Serializer.instance,
8: Money8Serializer.instance,
}
class MsUniqueSerializer(BaseTypeSerializer):
type = tds_base.SYBUNIQUE
declaration = 'UNIQUEIDENTIFIER'
instance = None
def __repr__(self):
return 'MsUniqueSerializer()'
@classmethod
def from_stream(cls, r):
size = r.get_byte()
if size != 16:
raise tds_base.InterfaceError('Invalid size of UNIQUEIDENTIFIER field')
return cls.instance
def write_info(self, w):
w.put_byte(16)
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
w.put_byte(16)
w.write(value.bytes_le)
@staticmethod
def read_fixed(r, size):
return uuid.UUID(bytes_le=tds_base.readall(r, size))
def read(self, r):
size = r.get_byte()
if size == 0:
return None
if size != 16:
raise tds_base.InterfaceError('Invalid size of UNIQUEIDENTIFIER field')
return self.read_fixed(r, size)
MsUniqueSerializer.instance = MsUniqueSerializer()
def _variant_read_str(r, size):
collation = r.get_collation()
r.get_usmallint()
return r.read_str(size, collation.get_codec())
def _variant_read_nstr(r, size):
r.get_collation()
r.get_usmallint()
return r.read_str(size, ucs2_codec)
def _variant_read_decimal(r, size):
prec, scale = r.unpack(VariantSerializer.decimal_info_struct)
return MsDecimalSerializer(precision=prec, scale=scale).read_fixed(r, size)
def _variant_read_binary(r, size):
r.get_usmallint()
return tds_base.readall(r, size)
class VariantSerializer(BaseTypeSerializer):
type = tds_base.SYBVARIANT
declaration = 'SQL_VARIANT'
decimal_info_struct = struct.Struct('BB')
_type_map = {
tds_base.GUIDTYPE: lambda r, size: MsUniqueSerializer.instance.read_fixed(r, size),
tds_base.BITTYPE: lambda r, size: BitSerializer.instance.read(r),
tds_base.INT1TYPE: lambda r, size: TinyIntSerializer.instance.read(r),
tds_base.INT2TYPE: lambda r, size: SmallIntSerializer.instance.read(r),
tds_base.INT4TYPE: lambda r, size: IntSerializer.instance.read(r),
tds_base.INT8TYPE: lambda r, size: BigIntSerializer.instance.read(r),
tds_base.DATETIMETYPE: lambda r, size: DateTimeSerializer.instance.read(r),
tds_base.DATETIM4TYPE: lambda r, size: SmallDateTimeSerializer.instance.read(r),
tds_base.FLT4TYPE: lambda r, size: RealSerializer.instance.read(r),
tds_base.FLT8TYPE: lambda r, size: FloatSerializer.instance.read(r),
tds_base.MONEYTYPE: lambda r, size: Money8Serializer.instance.read(r),
tds_base.MONEY4TYPE: lambda r, size: Money4Serializer.instance.read(r),
tds_base.DATENTYPE: lambda r, size: MsDateSerializer(DateType()).read_fixed(r),
tds_base.TIMENTYPE: lambda r, size: MsTimeSerializer(TimeType(precision=r.get_byte())).read_fixed(r, size),
tds_base.DATETIME2NTYPE: lambda r, size: DateTime2Serializer(
DateTime2Type(precision=r.get_byte())).read_fixed(r, size),
tds_base.DATETIMEOFFSETNTYPE: lambda r, size: DateTimeOffsetSerializer(
DateTimeOffsetType(precision=r.get_byte())).read_fixed(r, size),
tds_base.BIGVARBINTYPE: _variant_read_binary,
tds_base.BIGBINARYTYPE: _variant_read_binary,
tds_base.NUMERICNTYPE: _variant_read_decimal,
tds_base.DECIMALNTYPE: _variant_read_decimal,
tds_base.BIGVARCHRTYPE: _variant_read_str,
tds_base.BIGCHARTYPE: _variant_read_str,
tds_base.NVARCHARTYPE: _variant_read_nstr,
tds_base.NCHARTYPE: _variant_read_nstr,
}
@classmethod
def from_stream(cls, r):
size = r.get_int()
return VariantSerializer(size)
def write_info(self, w):
w.put_int(self.size)
def read(self, r):
size = r.get_int()
if size == 0:
return None
type_id = r.get_byte()
prop_bytes = r.get_byte()
type_factory = self._type_map.get(type_id)
if not type_factory:
r.session.bad_stream('Variant type invalid', type_id)
return type_factory(r, size - prop_bytes - 2)
def write(self, w, val):
if val is None:
w.put_int(0)
return
raise NotImplementedError
class TableType(SqlTypeMetaclass):
"""
Used to serialize table valued parameters
spec: https://msdn.microsoft.com/en-us/library/dd304813.aspx
"""
def __init__(self, typ_schema, typ_name, columns):
"""
@param typ_schema: Schema where TVP type defined
@param typ_name: Name of TVP type
@param columns: List of column types
"""
if len(typ_schema) > 128:
raise ValueError("Schema part of TVP name should be no longer than 128 characters")
if len(typ_name) > 128:
raise ValueError("Name part of TVP name should be no longer than 128 characters")
if columns is not None:
if len(columns) > 1024:
raise ValueError("TVP cannot have more than 1024 columns")
if len(columns) < 1:
raise ValueError("TVP must have at least one column")
self._typ_dbname = '' # dbname should always be empty string for TVP according to spec
self._typ_schema = typ_schema
self._typ_name = typ_name
self._columns = columns
def __repr__(self):
return 'TableType(s={},n={},cols={})'.format(
self._typ_schema, self._typ_name, repr(self._columns)
)
def get_declaration(self):
assert not self._typ_dbname
if self._typ_schema:
full_name = '{}.{}'.format(self._typ_schema, self._typ_name)
else:
full_name = self._typ_name
return '{} READONLY'.format(full_name)
@property
def typ_schema(self):
return self._typ_schema
@property
def typ_name(self):
return self._typ_name
@property
def columns(self):
return self._columns
class TableValuedParam(SqlValueMetaclass):
"""
Used to represent a value of table-valued parameter
"""
def __init__(self, type_name=None, columns=None, rows=None):
# parsing type name
self._typ_schema = ''
self._typ_name = ''
if type_name:
parts = type_name.split('.')
if len(parts) > 2:
raise ValueError('Type name should consist of at most 2 parts, e.g. dbo.MyType')
self._typ_name = parts[-1]
if len(parts) > 1:
self._typ_schema = parts[0]
self._columns = columns
self._rows = rows
@property
def typ_name(self):
return self._typ_name
@property
def typ_schema(self):
return self._typ_schema
@property
def columns(self):
return self._columns
@property
def rows(self):
return self._rows
def is_null(self):
return self._rows is None
def peek_row(self):
try:
rows = iter(self._rows)
except TypeError:
raise tds_base.DataError('rows should be iterable')
try:
row = next(rows)
except StopIteration:
# no rows
raise tds_base.DataError("Cannot infer columns from rows for TVP because there are no rows")
else:
# put row back
self._rows = itertools.chain([row], rows)
return row
class TableSerializer(BaseTypeSerializer):
"""
Used to serialize table valued parameters
spec: https://msdn.microsoft.com/en-us/library/dd304813.aspx
"""
type = tds_base.TVPTYPE
def read(self, r):
""" According to spec TDS does not support output TVP values """
raise NotImplementedError
@classmethod
def from_stream(cls, r):
""" According to spec TDS does not support output TVP values """
raise NotImplementedError
def __init__(self, table_type, columns_serializers):
super(TableSerializer, self).__init__()
self._table_type = table_type
self._columns_serializers = columns_serializers
@property
def table_type(self):
return self._table_type
def __repr__(self):
return 'TableSerializer(t={},c={})'.format(
repr(self._table_type), repr(self._columns_serializers)
)
def write_info(self, w):
"""
Writes TVP_TYPENAME structure
spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx
@param w: TdsWriter
@return:
"""
w.write_b_varchar("") # db_name, should be empty
w.write_b_varchar(self._table_type.typ_schema)
w.write_b_varchar(self._table_type.typ_name)
def write(self, w, val):
"""
Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA
specs:
https://msdn.microsoft.com/en-us/library/dd302994.aspx
https://msdn.microsoft.com/en-us/library/dd305261.aspx
https://msdn.microsoft.com/en-us/library/dd303230.aspx
@param w: TdsWriter
@param val: TableValuedParam or None
@return:
"""
if val.is_null():
w.put_usmallint(tds_base.TVP_NULL_TOKEN)
else:
columns = self._table_type.columns
w.put_usmallint(len(columns))
for i, column in enumerate(columns):
w.put_uint(column.column_usertype)
w.put_usmallint(column.flags)
# TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx
serializer = self._columns_serializers[i]
type_id | |
Data |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
/ MIC Value /
/ +-+-+-+-+-+-+-+-+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Payload_MIC: Parsed parameter data.
"""
_next = self._read_unpack(1)
_resv = self._read_fileng(3)
_data = self._read_fileng(4)
_micv = self._read_fileng(clen-8)
payload_mic = dict(
type=desc,
critical=cbit,
length=clen,
next=TP_PROTO.get(_next),
data=_data,
value=_micv,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return payload_mic
def _read_para_transaction_id(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``TRANSACTION_ID`` parameter.
Structure of HIP ``TRANSACTION_ID`` parameter [:rfc:`6078`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identifier /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Transaction_ID: Parsed parameter data.
"""
_tsid = self._read_unpack(clen)
transaction_id = dict(
type=desc,
critical=cbit,
length=clen,
id=_tsid,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return transaction_id
def _read_para_overlay_id(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``OVERLAY_ID`` parameter.
Structure of HIP ``OVERLAY_ID`` parameter [:rfc:`6079`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identifier /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Overlay_ID: Parsed parameter data.
"""
_olid = self._read_unpack(clen)
overlay_id = dict(
type=desc,
critical=cbit,
length=clen,
id=_olid,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return overlay_id
def _read_para_route_dst(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``ROUTE_DST`` parameter.
Structure of HIP ``ROUTE_DST`` parameter [:rfc:`6028`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| HIT #1 |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
. . .
. . .
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| HIT #n |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Route_Dst: Parsed parameter data.
Raises:
ProtocolError: If the parameter is malformed.
"""
if (clen - 4) % 16 != 0:
raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')
_flag = self._read_binary(2)
_resv = self._read_fileng(2)
_addr = list()
for _ in range((clen - 4) // 16):
_addr.append(ipaddress.ip_address(self._read_fileng(16)))
route_dst = dict(
type=desc,
critical=cbit,
length=clen,
flags=dict(
symmetric=bool(int(_flag[0], base=2)),
must_follow=bool(int(_flag[1], base=2)),
),
ip=tuple(_addr),
)
return route_dst
def _read_para_hip_transport_mode(self, code, cbit, clen, *, desc, length, version):
"""Read HIP ``HIP_TRANSPORT_MODE`` parameter.
Structure of HIP ``HIP_TRANSPORT_MODE`` parameter [:rfc:`6261`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Port | Mode ID #1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Mode ID #2 | Mode ID #3 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Mode ID #n | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Transport_Mode: Parsed parameter data.
Raises:
ProtocolError: If ``clen`` is **NOT** ``2`` modulo.
"""
if clen % 2 != 0:
raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')
_port = self._read_unpack(2)
_mdid = list()
for _ in range((clen - 2) // 2):
_mdid.append(_TP_MODE_ID.get(self._read_unpack(2)))
hip_transport_mode = dict(
type=desc,
critical=cbit,
length=clen,
port=_port,
id=tuple(_mdid),
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return hip_transport_mode
def _read_para_hip_mac(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``HIP_MAC`` parameter.
Structure of HIP ``HIP_MAC`` parameter [:rfc:`7401`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| HMAC |
/ /
/ +-------------------------------+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_HMAC: Parsed parameter data.
"""
_hmac = self._read_fileng(clen)
hip_mac = dict(
type=desc,
critical=cbit,
length=clen,
hmac=_hmac,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return hip_mac
def _read_para_hip_mac_2(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``HIP_MAC_2`` parameter.
Structure of HIP ``HIP_MAC_2`` parameter [:rfc:`7401`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| HMAC |
/ /
/ +-------------------------------+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_HMAC_2: Parsed parameter data.
"""
_hmac = self._read_fileng(clen)
hip_mac_2 = dict(
type=desc,
critical=cbit,
length=clen,
hmac=_hmac,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return hip_mac_2
def _read_para_hip_signature_2(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``HIP_SIGNATURE_2`` parameter.
Structure of HIP ``HIP_SIGNATURE_2`` parameter [:rfc:`7401`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| SIG alg | Signature /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Signature_2: Parsed parameter data.
"""
_algo = self._read_unpack(2)
_sign = self._read_fileng(clen-2)
hip_signature_2 = dict(
type=desc,
critical=cbit,
length=clen,
algorithm=_HI_ALGORITHM.get(_algo),
signature=_sign,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return hip_signature_2
def _read_para_hip_signature(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``HIP_SIGNATURE`` parameter.
Structure of HIP ``HIP_SIGNATURE`` parameter [:rfc:`7401`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| SIG alg | Signature /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Signature: Parsed parameter data.
"""
_algo = self._read_unpack(2)
_sign = self._read_fileng(clen-2)
hip_signature = dict(
| |
<gh_stars>10-100
import json
from multiprocessing import Process
import os
import LeapRecord
from AnyPy import AnyPy
from config.Configuration import env
from BVHAnimation import bvh_animation
from resources.pymo.pymo.parsers import BVHParser as Pymo_BVHParser
from gooey.gui import application
from gooey.gui import processor
from gooey.gui.containers import application as containers_application
from gooey.python_bindings import gooey_decorator, gooey_parser
# strings for the actions in the Gooey side-menu
ACTION_RECORD = 'Record'
ACTION_ANYBODY = 'AnyBody'
ACTION_CONVERTER = 'Converter'
ACTION_ANIMATION = 'Animation'
EXECUTED_COMMAND = 'command'
class GooeyModification:
# overwrite the Gooey default stop method to cancel Leap Motion recording
def on_stop(self):
"""Overload the stop method to allow stopping of Leap Motion Recording"""
executed_command = LeapGui.StoredArgs().load().stored_args[EXECUTED_COMMAND]
if executed_command == ACTION_RECORD:
return self.clientRunner.stop_leap()
self.clientRunner.stop()
def stop_leap(self):
if not self._process.stdin.closed:
"""Send Keyboard Interrupt to stop Leap Motion recording and parse bvh / interpolation"""
self._process.stdin.write(b"\n")
self._process.stdin.close()
return
self.stop()
# overwrite classes and methods of the stop button
containers_application.GooeyApplication.onStopExecution = on_stop
processor.ProcessController.stop_leap = stop_leap
containers_application.ProcessController = processor.ProcessController
application.GooeyApplication = containers_application.GooeyApplication
gooey_decorator.application = application
# create alias
Gooey = gooey_decorator.Gooey
GooeyParser = gooey_parser.GooeyParser
class LeapGui:
"""Implementation of the Gooey including some adaptions"""
Gooey = GooeyModification.Gooey
GooeyParser = GooeyModification.GooeyParser
@staticmethod
@Gooey(program_name="Leap Motion Recorder (c) <NAME>",
sidebar_title='Actions',
# return_to_config=True,
image_dir='config/gooey',
tabbed_groups=True,
default_size=(1000, 700))
def parse_args():
""" Use GooeyParser to build up the arguments we will use in our script
Save the arguments in a default json file so that we can retrieve them
every time we run the script.
"""
# load default values from json (saved from last run)
stored_args = LeapGui.StoredArgs().load()
parser = LeapGui.GooeyParser(description='Record Leap Motion data and export to BVH or AnyBody')
env.add_parser(parser)
subs = parser.add_subparsers(help='Tools', dest='command')
# === record === #
record_parser = subs.add_parser(ACTION_RECORD, help='Leap Recording')
# settings Group
settings_group = record_parser.add_argument_group(
"Settings",
gooey_options={
'show_border': True,
'columns': 1
}
)
basis_group = settings_group.add_mutually_exclusive_group(
required=True,
gooey_options={
'initial_selection': 0
})
settings_group.add_argument('frames_per_second',
metavar='Frames per second',
action='store',
default=stored_args.get(ACTION_RECORD, 'frames_per_second', '30'),
gooey_options={
'validator': {
'test': '1 <= int(user_input) <= 150',
'message': 'Must be between 1 and 150'
}
}
)
settings_group.add_argument('-show_animation',
metavar='Animate',
help='Show motion animation after recording',
action='store_true')
basis_group.add_argument('-anybody_basis',
metavar='Calculate joint angles to AnyBody basis',
action='store_true')
basis_group.add_argument('-firstframe_basis',
metavar='Calculate joint angles to initial recorded basis (first frame)',
action='store_true')
# bvh Group
bvh_group = record_parser.add_argument_group(
"BVH Export",
gooey_options={
'show_border': True,
'columns': 1
}
)
bvh_group.add_argument('-bvh',
metavar='Write BVH-File',
action='store_true')
bvh_group.add_argument('-bvh_path',
metavar='BVH File',
action='store',
default=stored_args.get(
ACTION_RECORD, 'bvh_path', LeapGui.StoredArgs.path('../output/BVH/RightHand.bvh')),
widget='FileSaver',
help='Choose location, where to save the BVH File')
bvh_group.add_argument('channels',
metavar='BVH Channels',
action='store',
default=stored_args.get(ACTION_RECORD, 'channels', 'rotation'),
widget='Dropdown',
help='Rotation: (X,Y,Z) rotation only (default)\n'
'Position: (X,Y,Z) rotation and position for all channels',
choices=['rotation', 'position'],
gooey_options={
'validator': {
'test': 'user_input != "Select Option"',
'message': 'Choose a channel setting'
}
})
# interpol Group
interpol_group = record_parser.add_argument_group(
"Interpolation Vector",
gooey_options={
'show_border': True,
'columns': 1
}
)
interpol_group.add_argument('-anybody',
metavar='Write interpolation files for AnyBody',
action='store_true')
interpol_group.add_argument('-anybody_template_path',
metavar='AnyBody templates',
action='store',
default=stored_args.get(
ACTION_RECORD, 'anybody_template_path',
LeapGui.StoredArgs.path('config/anybody_templates')),
widget='DirChooser',
help='Source directory that contains *.template files for AnyBody')
interpol_group.add_argument('-anybody_output_path',
metavar='Output directory',
action='store',
default=stored_args.get(
ACTION_RECORD, 'anybody_output_path',
LeapGui.StoredArgs.path('../output/Anybody')),
widget='DirChooser',
help='Output directory for interpolation files')
# # c3d Group
# c3d_group = record_parser.add_argument_group(
# "C3D",
# gooey_options={
# 'show_border': True,
# 'columns': 2
# }
# )
# c3d_group.add_argument('-c3d',
# metavar='Write C3D-File',
# action='store_true')
#
# c3d_group.add_argument('-c3d_filename',
# metavar=' ',
# action='store',
# default=stored_args.get(ACTION_RECORD, 'c3d_filename', 'RightHand'),
# help='Filename')
#
# c3d_group.add_argument('-c3d_path',
# metavar=' ',
# action='store',
# default=stored_args.get(
# ACTION_RECORD, 'c3d_path', LeapGui.StoredArgs.path('../output/C3D')),
# widget='DirChooser',
# help='Output directory for c3d file')
# === anybody === #
anybody_parser = subs.add_parser(ACTION_ANYBODY, help='Anybody Simulation')
anybody_group = anybody_parser.add_argument_group(
"Source files",
"Make a selection for the source files used for the AnyBody analysis",
gooey_options={
'show_border': True,
'columns': 1
}
)
anybody_file_group = anybody_group.add_mutually_exclusive_group(
required=True,
gooey_options={
'initial_selection': 0
})
anybody_file_group.add_argument('-any_interpol_files',
metavar='Use existing vector files (in {})'.format(AnyPy.INTERPOL_DIR),
help='Use interpolation vector files from AnyBody project default directory',
action='store_true')
anybody_file_group.add_argument('-any_bvh_file',
metavar='Source of the *.bvh file',
action='store',
default=stored_args.get(
ACTION_ANYBODY, 'any_bvh_file',
LeapGui.StoredArgs.path('../output/BVH/RightHand.bvh')),
widget='FileChooser',
help='Choose a bvh file to be converted to the interpolation vector files')
anybody_file_group.add_argument('-any_files_dir',
metavar='Source (.any)',
action='store',
default=stored_args.get(
ACTION_ANYBODY, 'any_files_dir',
LeapGui.StoredArgs.path('../output/Anybody')),
widget='DirChooser',
help='Source directory that contains interpolation *.any files for AnyBody')
anybody_group.add_argument('any_main_file',
metavar='Source of HAND.Main.any',
action='store',
default=stored_args.get(ACTION_ANYBODY, 'any_main_file', ''),
widget='FileChooser',
help='Choose the main AnyBody file for the calculation')
anybody_group.add_argument('-start_frame',
metavar='Start Frame',
help='default: 1',
action='store',
# default=stored_args.get(ACTION_ANYBODY, 'start_frame', '1'),
gooey_options={
'validator': {
'test': '1 <= int(user_input)',
'message': 'Must be greater or equal than 1'
}
},
type=int)
anybody_group.add_argument('-end_frame',
metavar='End Frame',
help='default: end',
action='store',
# default=stored_args.get(ACTION_ANYBODY, 'end_frame', 'end'),
gooey_options={
'validator': {
'test': '("end" in user_input.lower()) or (1 <= int(user_input))',
'message': 'Must be a positive value and greater than the start frame'
}
})
operation_group = anybody_parser.add_argument_group(
"Operations",
"Select which operations should be executed by AnyBody",
gooey_options={
'show_border': True,
'columns': 1
}
)
operation_group.add_argument('-load',
metavar='Load AnyBody model',
action='store_true')
operation_group.add_argument('-initial_conditions',
metavar='Calc initial conditions',
action='store_true')
operation_group.add_argument('-kinematic',
metavar='Calc kinematic analysis',
action='store_true')
operation_group.add_argument('-inverse_dynamics',
metavar='Calc inverse dynamics',
action='store_true')
operation_group.add_argument('-nstep',
metavar='Time steps',
help='Number of equally spaced time steps\n'
'(leave empty for not changing the setting)',
action='store',
# default=stored_args.get(ACTION_ANYBODY, 'nstep', '50'),
gooey_options={
'validator': {
'test': '1 <= int(user_input)',
'message': 'Must be greater or equal than 1'
}
},
type=int)
# operation_group.add_argument('-order',
# metavar='Order of B-spline interpolation',
# help='Interpolates between the data points with a B-spline using this order'
# '\n(leave empty for using the default value)',
# action='store',
# # default=stored_args.get(ACTION_ANYBODY, 'order', '4'),
# gooey_options={
# 'validator': {
# 'test': '1 <= int(user_input)',
# 'message': 'Must be greater or equal than 1'
# }
# },
# type=int)
result_group = anybody_parser.add_argument_group(
"Results",
gooey_options={
'show_border': True,
'columns': 1
}
)
result_group.add_argument('-plot',
metavar='Open the plot after the analysis',
action='store_true')
result_group.add_argument('-output_file_path',
metavar='Save .anydata.h5 file',
action='store',
default=stored_args.get(
ACTION_ANYBODY, 'output_file_path',
LeapGui.StoredArgs.path('../output/Anybody/FreeHand.anydata.h5')),
widget='FileSaver',
help='Save .anydata.h5 file to save the analysis results')
result_group.add_argument('-replay_output',
metavar='Open AnyBody and load the results',
action='store_true')
# === converter === #
converter_parser = subs.add_parser(ACTION_CONVERTER, help='Convert a BVH-File in .any-Files')
converter_group = converter_parser.add_argument_group(
"Converter",
"Convert a BVH-File in .any-Files",
gooey_options={
'show_border': True,
'columns': 1
}
)
converter_group.add_argument('bvh_file',
metavar='Source: *.bvh',
action='store',
default=stored_args.get(
ACTION_CONVERTER, 'bvh_file',
LeapGui.StoredArgs.path('../output/BVH/RightHand.bvh')),
widget='FileChooser',
help='Source bvh-file to convert')
# converter_group.add_argument('-any_file',
# metavar='Convert to .any files',
# action='store_true')
# converter_group.add_argument('-c3d',
# metavar='Convert to .c3d files',
# action='store_true')
converter_group.add_argument('file_dir',
metavar='Target: store files',
action='store',
default=stored_args.get(
ACTION_CONVERTER, 'file_dir', LeapGui.StoredArgs.path('../output')),
widget='DirChooser',
help='Directory to store the converted files')
# === bvh animation === #
animation_parser = subs.add_parser(ACTION_ANIMATION, help='Show an animation for a BVH file')
animation_group = animation_parser.add_argument_group(
"Animation",
"Select a BVH file to be animated",
gooey_options={
'show_border': True,
'columns': 1
}
)
animation_group.add_argument('bvh_animation',
metavar='BVH file path',
action='store',
default=stored_args.get(
ACTION_ANIMATION, 'bvh_animation',
LeapGui.StoredArgs.path('../output/BVH/RightHand.bvh')),
widget='FileChooser')
# start the UI and save arguments to json for next run
stored_args.save(parser.parse_args())
class StoredArgs:
"""class for loading and saving arguments from/to json, also to handle default values"""
def __init__(self):
self.stored_args = {}
self.loaded_actions = {}
# get the script name without the extension & use it to build up the json filename
script_name = os.path.splitext(os.path.basename(__file__))[0]
self.args_file = "{}-args.json".format(script_name)
def load(self):
# Read in the prior arguments as a dictionary
if os.path.isfile(self.args_file):
with open(self.args_file) as data_file:
self.stored_args = json.load(data_file)
self.loaded_actions = self.stored_args.keys()
return self
def get(self, action, arg, default):
return self.stored_args[action].get(arg) if action in self.loaded_actions else default
@staticmethod
def path(relative_path):
return os.path.normpath(os.path.join(os.getcwd(), relative_path))
def save(self, args):
# Store the values of the arguments to the environment to access them in the code
env.save_arguments(args)
# Store the values of the arguments so we have them next time we run
with open(self.args_file, 'w') as data_file:
# Using vars(args) returns the data as a dictionary
self.stored_args[EXECUTED_COMMAND] = args.command
self.stored_args[args.command] = vars(args)
json.dump(self.stored_args, data_file)
@staticmethod
def run():
"""Open the Gooey GUI and then run the selected action with the chosen arguments"""
LeapGui.parse_args()
# Record, Anybody, Converter
if env.config.command == ACTION_RECORD:
from GuiControl import GuiControl
gui = GuiControl()
gui.set_windows_record()
import time
countdown = 5
for ii in range(countdown):
print("Record starting in {} seconds ...".format(countdown-ii))
time.sleep(1)
LeapRecord.start_recording()
gui.end_record()
print("End of recording\n")
if env.config.show_animation:
print("Loading the animation ...")
p = Process(target=bvh_animation.animate)
p.start()
# wait for bvh_animation to be closed
p.join()
return True
if env.config.command == ACTION_ANYBODY:
from LogWatcher import log_watcher
anypy = AnyPy(env.config.any_main_file, env.config.any_files_dir)
log_watcher.start(os.path.join(anypy.any_path, anypy.LOG_FILE))
run_status = anypy.run()
log_watcher.stop()
if not run_status:
# AnyBody operations were not successful
return False
if env.config.replay_output:
anypy.post_operations()
if env.config.plot:
anypy.plot()
return True
if env.config.command == ACTION_CONVERTER:
from AnyWriter import AnyWriter
| |
v in list(SMCOGs.items()):
ID = k
if v != 'none':
out.write("%s\tnote\t%s\n" % (ID, v))
return bbDomains, bbSubType, BackBone
def GetClusterGenes(input, GFF, genome, annotations):
# load clusters into InterLap
interClust = bed2interlapNames(input)
# load GFF3 into Dictionary
Genes = {}
Genes = gff2dict(GFF, genome, Genes)
# loop through genes and check if in Clusters
dictClusters = {}
for k, v in natsorted(Genes.items()):
if v['type'] == 'mRNA':
if v['location'] in interClust[v['contig']]:
best_hit = list(interClust[v['contig']].find(v['location']))[0]
clusterName = best_hit[2]
if not clusterName in dictClusters:
dictClusters[clusterName] = v['ids']
else:
dictClusters[clusterName] += v['ids']
# write the output file
with open(annotations, 'w') as annotout:
for k, v in list(dictClusters.items()):
for i in v:
annotout.write("%s\tnote\tantiSMASH:%s\n" % (i, k))
return dictClusters
def splitFASTA(input, outputdir):
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
with open(input, 'r') as InputFasta:
SeqRecords = SeqIO.parse(InputFasta, 'fasta')
for record in SeqRecords:
name = str(record.id)
outputfile = os.path.join(outputdir, name+'.fa')
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
def genomeStats(input):
from Bio.SeqUtils import GC
lengths = []
GeeCee = []
Genes = 0
tRNA = 0
Prots = 0
locus_tag = ''
organism = None
isolate = None
strain = None
uniqueIso = None
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
lengths.append(len(record.seq))
GeeCee.append(str(record.seq))
organism = record.annotations['organism'].replace(
' Unclassified.', '')
for f in record.features:
if f.type == "source":
isolate = f.qualifiers.get("isolate", [None])[0]
strain = f.qualifiers.get("strain", [None])[0]
if f.type == "CDS":
Prots += 1
if f.type == "gene":
Genes += 1
if Genes == 1:
locus_tag = f.qualifiers.get("locus_tag")[
0].split('_')[0]
if f.type == "tRNA":
tRNA += 1
if strain:
log.info("working on %s %s" % (organism, strain))
uniqueIso = strain.replace(' ', '')
elif isolate:
log.info("working on %s %s" % (organism, isolate))
uniqueIso = isolate.replace(' ', '')
else:
log.info("working on %s" % organism)
GenomeSize = sum(lengths)
LargestContig = max(lengths)
ContigNum = len(lengths)
AvgContig = int(round(GenomeSize / ContigNum))
pctGC = round(GC("".join(GeeCee)), 2)
# now get N50
lengths.sort()
nlist = []
for x in lengths:
nlist += [x]*x
if len(nlist) % 2 == 0:
medianpos = int(len(nlist) / 2)
N50 = int((nlist[medianpos] + nlist[medianpos-1]) / 2)
else:
medianpos = int(len(nlist) / 2)
N50 = int(nlist[medianpos])
# return values in a list
return [organism, uniqueIso, locus_tag, "{0:,}".format(GenomeSize)+' bp', "{0:,}".format(LargestContig)+' bp', "{0:,}".format(AvgContig)+' bp', "{0:,}".format(ContigNum), "{0:,}".format(N50)+' bp', "{:.2f}".format(pctGC)+'%', "{0:,}".format(Genes), "{0:,}".format(Prots), "{0:,}".format(tRNA)]
def MEROPS2dict(input):
dict = {}
with open(input, 'r') as fasta:
for line in fasta:
if line.startswith('>'):
cols = line.split(' ')
ID = cols[0].replace('>', '')
family = cols[1].replace('\n', '')
dict[ID] = family
return dict
def getEggNogfromNote(input):
dict = {}
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in dict:
dict[ID] = hit
return dict
def getStatsfromNote(input, word, Database):
dict = {}
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if hit.startswith('MER'): # change to family name
hit = meropsDict.get(hit)
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getSMBackbones(input):
dict = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
if f.type == 'CDS':
product = f.qualifiers['product'][0]
if not product == 'hypothetical protein':
if product == "Hybrid PKS-NRPS":
dict['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
dict['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
dict['PKS'] += 1
return dict
def parseGOterms(input, folder, genome):
with open(os.path.join(folder, 'associations.txt'), 'a') as assoc:
with open(os.path.join(folder, genome+'.txt'), 'w') as terms:
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
GOS = []
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
if GOS:
assoc.write("%s\t%s\n" % (ID, ";".join(GOS)))
terms.write("%s\n" % ID)
def getStatsfromDbxref(input, word):
dict = {}
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getGBKannotation(input, Database):
'''
Function will loop through GBK file pulling out funannotate functional annotation
and returning a list of dictionaries for each annotation class
'''
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
SMs = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
pfams = {}
iprs = {}
nogs = {}
cogs = {}
merops = {}
cazys = {}
secreted = {}
membrane = {}
buscos = {}
secmet = {}
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
locusTag, ID, Parent = (None,)*3
if f.type == 'CDS':
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
product = f.qualifiers['product'][0]
if product == "Hybrid PKS-NRPS":
SMs['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
SMs['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
SMs['PKS'] += 1
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
if not hit in pfams:
pfams[hit] = [ID]
else:
pfams[hit].append(ID)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
if not hit in iprs:
iprs[hit] = [ID]
else:
iprs[hit].append(ID)
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in nogs:
nogs[ID] = hit
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
if not hit in buscos:
buscos[hit] = [ID]
else:
buscos[hit].append(ID)
elif i.startswith('MEROPS:'): # change to family name
hit = i.replace('MEROPS:', '')
hit = meropsDict.get(hit)
if not hit in merops:
merops[hit] = [ID]
else:
merops[hit].append(ID)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
if not hit in cazys:
cazys[hit] = [ID]
else:
cazys[hit].append(ID)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
if not x in cogs:
cogs[x] = [ID]
else:
cogs[x].append(ID)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
if not hit in secreted:
secreted[hit] = [ID]
else:
secreted[hit].append(ID)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
if not hit in membrane:
membrane[hit] = [ID]
else:
membrane[hit].append(ID)
elif i.startswith('antiSMASH:'):
hit = i.replace('antiSMASH:', '')
if not hit in secmet:
secmet[hit] = [ID]
else:
secmet[hit].append(ID)
return [pfams, iprs, nogs, buscos, merops, cazys, cogs, secreted, membrane, secmet, SMs]
def annotationtable(input, Database, HeaderNames, InterProDict, output):
from collections import OrderedDict
'''
Function will create a tsv annotation table from GenBank file
trying to capture all annotation in a parsable tsv file or
something that could be imported into excel
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
# get note new/unique note names
uniqueNotes = OrderedDict()
for x in HeaderNames:
if not x in ['BUSCO', 'CAZy', 'COG', 'EggNog', 'SECRETED', 'GO', 'MEROPS', 'TransMembrane']:
uniqueNotes[x] = []
# load genbank into funannotate dictionary (required as we need transcript/cds/etc)
Genes = {}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
gb_feature_add2dict(f, record, Genes)
SeqRecords = SeqIO.to_dict(SeqIO.parse(input, 'genbank'))
sGenes = natsorted(Genes.items(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# input should be fully annotation GBK file from funannotate
with open(output, 'w') as outfile:
header = ['GeneID', 'TranscriptID', 'Feature', 'Contig', 'Start',
'Stop', 'Strand', 'Name', 'Product', 'Alias/Synonyms', 'EC_number',
'BUSCO', 'PFAM', 'InterPro', 'EggNog', 'COG', 'GO Terms',
| |
label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_limit_range_list" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v1/watch/limitranges'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_namespace_list(self, **kwargs):
"""
watch individual changes to a list of Namespace
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_namespace_list(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_namespace_list" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v1/watch/namespaces'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_endpoints_list(self, namespace, **kwargs):
"""
watch individual changes to a list of Endpoints
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_endpoints_list(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_endpoints_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_endpoints_list`")
resource_path = '/api/v1/watch/namespaces/{namespace}/endpoints'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_endpoints(self, namespace, name, **kwargs):
"""
watch changes to an object of kind Endpoints
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_endpoints(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Endpoints (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_endpoints" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_endpoints`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `watch_namespaced_endpoints`")
resource_path = '/api/v1/watch/namespaces/{namespace}/endpoints/{name}'.replace('{format}', 'json')
method = | |
import collections
import os
import traceback
from datetime import datetime, timedelta
import pandas as pd
from openpyxl.styles import PatternFill
import config
from openpyxl import load_workbook
import numpy as np
import xlrd
def get_date_index(date, dates_values, lookback_index=0):
if isinstance(dates_values[0], str):
dates_values = [datetime.strptime(x, '%Y-%m-%d') for x in dates_values]
elif isinstance(dates_values[0], np.datetime64):
dates_values = [x.astype('M8[ms]').astype('O') for x in dates_values]
if len(dates_values) > 1:
if dates_values[0] > dates_values[1]: # if dates decreasing rightwards or downwards
date_index = next((index for (index, item) in enumerate(dates_values) if item < date), 0)
# adjusted_lookback = date_item - lookback_period
# lookback_index = next((
# index for (index, item) in enumerate(dates_values[date_index:]) if item <= adjusted_lookback), 0)
return date_index + lookback_index
else: # if dates increasing rightwards or downwards
date_index = next((index for (index, item) in enumerate(dates_values) if item > date), -1)
# adjusted_lookback = date_item - lookback_period
# lookback_index = next((
# index for (index, item) in enumerate(dates_values[date_index:]) if item > adjusted_lookback), -1)
return date_index - lookback_index # TODO Fix lookback index is a date here, convert before calling method
else:
return 0
def slice_series_dates(series, from_date, to_date):
date_idx_from = get_date_index(from_date, series.index)
date_idx_to = get_date_index(to_date, series.index)
return series[date_idx_from:date_idx_to]
def save_into_csv(filename, df, sheet_name='Sheet1', startrow=None,
overwrite_sheet=False, concat=False,
**to_excel_kwargs):
# ignore [engine] parameter if it was passed
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(filename, engine='openpyxl')
try:
# try to open an existing workbook
writer.book = load_workbook(filename)
# get the last row in the existing Excel sheet
# if it was not specified explicitly
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
# TODO Not working yet
if concat and sheet_name in writer.book.sheetnames:
try:
sheet_df = pd.read_excel(filename, sheet_name,
index_col=[0, 1, 2] if config.balance_sheet_name in sheet_name else [0, 1])
print(sheet_df.to_string())
idx = writer.book.sheetnames.index(sheet_name)
writer.book.remove(writer.book.worksheets[idx])
writer.book.create_sheet(sheet_name, idx)
df = pd.concat([df, sheet_df], axis=1)
df = df.reindex(sorted(df.columns, reverse=True), axis=1)
except Exception:
traceback.print_exc()
# truncate sheet
if overwrite_sheet and sheet_name in writer.book.sheetnames:
# index of [sheet_name] sheet
idx = writer.book.sheetnames.index(sheet_name)
# remove [sheet_name]
writer.book.remove(writer.book.worksheets[idx])
# create an empty sheet [sheet_name] using old index
writer.book.create_sheet(sheet_name, idx)
# copy existing sheets
writer.sheets = {ws.title: ws for ws in writer.book.worksheets}
except FileNotFoundError:
# file does not exist yet, we will create it
pass
if startrow is None:
startrow = 0
# write out the new sheet
df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)
# save the workbook
writer.save()
def read_df_from_csv(path, sheet_name='Sheet1'):
if os.path.exists(path):
workbook = xlrd.open_workbook(path, on_demand=True)
sheets = workbook.sheet_names()
if sheet_name not in sheets:
return pd.DataFrame()
else:
xls = pd.ExcelFile(path)
return pd.read_excel(xls, sheet_name, index_col=0)
return pd.DataFrame()
def read_entry_from_pickle(path, x, y, lookback_index=0):
if os.path.exists(path):
df: pd.DataFrame = pd.read_pickle(filepath_or_buffer=path)
if isinstance(y, datetime): # if the input is a date...
date_index = get_date_index(date=y, dates_values=df.index.values, lookback_index=lookback_index)
return df[x].iloc[date_index]
elif isinstance(x, datetime):
date_index = get_date_index(date=x, dates_values=df.columns, lookback_index=lookback_index)
reduced_df = df.iloc[:, date_index]
for el in list(y):
if el in reduced_df.index:
reduced_df = reduced_df.loc[el]
else:
return np.nan
return reduced_df
elif isinstance(y, list) and isinstance(y[0], datetime):
to_return = pd.Series()
for date in y:
date_index = get_date_index(date=date, dates_values=df.index, lookback_index=lookback_index)
reduced_df = df.iloc[date_index, :]
for el in ([x] if not isinstance(x, list) else x):
if el in reduced_df.index:
reduced_df = reduced_df.loc[el]
else:
to_return[date] = np.nan
to_return[date] = reduced_df
return to_return
elif isinstance(x, list) and isinstance(x[0], datetime):
to_return = pd.Series()
for date in x:
date_index = get_date_index(date=date, dates_values=df.columns, lookback_index=lookback_index)
reduced_df = df.iloc[:, date_index]
if reduced_df.index.isin([tuple(y)]).any():
reduced_df = reduced_df.loc[tuple(y)]
to_return[date] = reduced_df
else:
to_return[date] = np.nan
return to_return
else:
return df[x].loc[y]
else:
return np.nan
def read_entry_from_csv(path, x, y, sheet_name='Sheet1', lookback_index=0, skip_first_sheet=False):
if os.path.exists(path):
# ticker = Path(path).stem
if config.balance_sheet_name in sheet_name:
index_col = [0, 1, 2]
elif config.income_statement_name in sheet_name or config.cash_flow_statement_name in sheet_name:
index_col = [0, 1]
else:
index_col = [0]
df = pd.read_excel(pd.ExcelFile(path), sheet_name, index_col=index_col)
if isinstance(y, datetime): # if the input is a date...
# if isinstance(df.index, pd.DatetimeIndex):
date_index = get_date_index(date=y, dates_values=df.index.values, lookback_index=lookback_index)
# print('The {} for {} on {}, lookback {}, is {}'.format(x, ticker, y, lookback_index, df[x].iloc[date_index]))
return df[x].iloc[date_index]
elif isinstance(x, datetime):
date_index = get_date_index(date=x, dates_values=df.columns, lookback_index=lookback_index)
reduced_df = df.iloc[:, date_index]
for el in list(y):
if el in reduced_df.index:
reduced_df = reduced_df.loc[el]
else:
# print('The {} for {} on {}, lookback {}, is {}'.format(y, ticker, x, lookback_index, np.nan))
return np.nan
# print('The {} for {} on {}, lookback {}, is {}'.format(y, ticker, x, lookback_index, reduced_df))
return reduced_df
else:
# print('The {}/{} for {} is {}'.format(x, y, ticker, df[x].loc[y]))
return df[x].loc[y]
else:
# print('The entry is {}'.format(np.nan))
return np.nan
def read_dates_from_csv(path, sheet_name):
if os.path.exists(path):
sheets = xlrd.open_workbook(path, on_demand=True).sheet_names()
if sheet_name not in sheets:
return []
with open(path, "r") as csv:
xls = pd.ExcelFile(path)
df = pd.read_excel(xls, '{}'.format(sheet_name), index_col=0)
ls = []
for col in df.columns:
try:
ls.append(datetime.strptime(col, '%Y-%m-%d'))
except:
continue
return ls
else:
return []
def get_stock_universe(index='in_directory', date=datetime.now()):
'''
:param index: NASDAQ, DJIA, S&P1500, S&P500, S&P100, RUSSELL3000, RUSSELL2000, FTSE100
:return:
'''
if index == 'in_directory':
return [os.path.splitext(file)[0]
for root, dirs, files in os.walk(config.FINANCIAL_STATEMENTS_DIR_PATH)
for file in files]
index_file_name = os.path.join(config.MARKET_TICKERS_DIR_PATH, 'Dow-Jones-Stock-Tickers.xlsx' if index == 'DJIA'
else 'Russell-3000-Stock-Tickers.xlsx' if index == 'RUSSELL3000'
else Exception)
excel_df = pd.read_excel(index_file_name, index_col=0)
date_index = get_date_index(date=date, dates_values=excel_df.index)
tickers = excel_df.iloc[date_index]
return tickers.to_list()
def slice_resample_merge_returns(returns: list, from_date=None, to_date=None, lookback=None,
frequency: str = 'Monthly'):
'''
:param returns:
:param from_date:
:param to_date:
:param lookback:
:return:
'''
# Load each asset returns and merge
returns_copy = []
for retrn in returns:
if isinstance(retrn, str):
path = '{}/{}.xlsx'.format(config.STOCK_PRICES_DIR_PATH, retrn)
series = read_df_from_csv(path)['Adj Close'].pct_change().rename(retrn)
returns_copy.append(series)
elif isinstance(retrn, pd.Series):
returns_copy.append(retrn)
elif isinstance(retrn, pd.DataFrame):
for col in retrn.columns:
returns_copy.append(retrn[col])
else:
raise Exception
# Resample based on desired frequency and merge
merged_returns = pd.DataFrame()
for retrn in returns_copy:
resampled_returns = retrn.resample(frequency[0]).apply(lambda x: ((x + 1).cumprod() - 1).last("D"))
# Resample usually resets date to beginning of day, so we re-do the end of day trick:
resampled_returns.index = resampled_returns.index + timedelta(days=1) - timedelta(seconds=1)
merged_returns = merged_returns.join(resampled_returns.to_frame(), how='outer')
# Cutoff based on from_date and to_date
# Go over returns list because merged would have nans
to_date = min([series.index[-1] for series in returns_copy]) if to_date is None else to_date
to_date_idx = get_date_index(date=to_date, dates_values=merged_returns.index)
if from_date is not None: # from_date has precedence over lookback if both are not none
from_date_idx = get_date_index(date=from_date, dates_values=merged_returns.index)
elif lookback is not None:
if isinstance(lookback, int) and frequency is not None:
period_to_int = {'D': 1, 'W': 7, 'M': 30.5, 'Y': 365.25}
lookback = timedelta(days=int(period_to_int[frequency] * lookback))
elif not isinstance(lookback, timedelta):
raise Exception
from_date_idx = get_date_index(date=to_date - lookback, dates_values=merged_returns.index)
else:
from_date_idx = 0
merged_returns = merged_returns.iloc[from_date_idx:to_date_idx]
for col in merged_returns.columns:
merged_returns[col] = merged_returns[col].apply(lambda y: 0 if isinstance(y, np.ndarray) else y)
return merged_returns
def unflatten(dictionary):
resultDict = dict()
for key, value in dictionary.items():
parts = key.split("_")
d = resultDict
for part in parts[:-1]:
try:
if part not in d:
d[part] = dict()
d = d[part]
except:
continue
try:
d[parts[-1]] = value
except:
continue
return resultDict
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def save_pretty_excel(path, financials_dictio, with_pickle=True):
for sheet_name in [config.income_statement_name, config.cash_flow_statement_name]:
for sheet_period, sheet_dict in financials_dictio.items():
# keep 2 levels for the Income Statement and Cash Flow i.e.
# Operating Expenses -> Research and Development Expense -> Value
# Cash Flow from Financing Activities -> Dividend Payments -> Value
diction = collections.OrderedDict({i: collections.OrderedDict({
(j.split('_')[1], j.split('_')[-1]): sheet_dict[i][j]
for j in sheet_dict[i].keys() if j.split('_')[0] in sheet_name # sheet name
}) for i in sheet_dict.keys()}) # ) # date
df = pd.DataFrame.from_dict(diction)
df = df.reindex(sorted(df.columns, reverse=True), axis=1)
df.dropna(axis=0, how='all', inplace=True)
df = df.loc[:, df.any()]
if not df.empty:
if with_pickle:
stock = path.split('/')[-1].split('.xlsx')[0]
pickly_path = os.path.join(config.FINANCIAL_STATEMENTS_DIR_PATH_PICKLE, sheet_period,
sheet_name, '{}.pkl'.format(stock))
df.to_pickle(pickly_path)
save_into_csv(path, df, '{} ({})'.format(sheet_name, sheet_period))
# this is to standardize cumulated 3 6 9 12 months
# for quarterly_statement in [config.cash_flow_statement_quarterly, config.income_statement_quarterly]:
#
# try:
# quarterly_df = pd.read_excel(path, quarterly_statement, index_col=[0, 1])
# except:
# pass
# temp_statements = config.income_statements if quarterly_statement == config.income_statement_quarterly else config.cash_flow_statements
# for i, item in enumerate(temp_statements):
# try:
# smaller_period_df = pd.read_excel(path, item, index_col=[0, 1])
# bigger_period_df = pd.read_excel(path, temp_statements[i + 1], index_col=[0, 1])
# quarterly_df = pd.concat([quarterly_df,
| |
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
import copy
from sklearn.metrics.pairwise import cosine_similarity as cosine
class Tracker(object):
def __init__(self, opt):
self.opt = opt
self.reset()
self.nID = 10000
self.alpha = 0.1
def init_track(self, results):
for item in results:
if item['score'] > self.opt.new_thresh:
self.id_count += 1
# active and age are never used in the paper
item['active'] = 1
item['age'] = 1
item['tracking_id'] = self.id_count
if not ('ct' in item):
bbox = item['bbox']
item['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
self.tracks.append(item)
self.nID = 10000
self.embedding_bank = np.zeros((self.nID, 128))
self.cat_bank = np.zeros((self.nID), dtype=np.int)
def reset(self):
self.id_count = 0
self.nID = 10000
self.tracks = []
self.embedding_bank = np.zeros((self.nID, 128))
self.cat_bank = np.zeros((self.nID), dtype=np.int)
self.tracklet_ages = np.zeros((self.nID), dtype=np.int)
self.alive = []
def step(self, results_with_low, public_det=None):
results = [item for item in results_with_low if item['score'] >= self.opt.track_thresh]
# first association
N = len(results)
M = len(self.tracks)
self.alive = []
track_boxes = np.array([[track['bbox'][0], track['bbox'][1],
track['bbox'][2], track['bbox'][3]] for track in self.tracks], np.float32) # M x 4
det_boxes = np.array([[item['bbox'][0], item['bbox'][1],
item['bbox'][2], item['bbox'][3]] for item in results], np.float32) # N x 4
box_ious = self.bbox_overlaps_py(det_boxes, track_boxes)
dets = np.array(
[det['ct'] + det['tracking'] for det in results], np.float32) # N x 2
track_size = np.array([((track['bbox'][2] - track['bbox'][0]) * \
(track['bbox'][3] - track['bbox'][1])) \
for track in self.tracks], np.float32) # M
track_cat = np.array([track['class'] for track in self.tracks], np.int32) # M
item_size = np.array([((item['bbox'][2] - item['bbox'][0]) * \
(item['bbox'][3] - item['bbox'][1])) \
for item in results], np.float32) # N
item_cat = np.array([item['class'] for item in results], np.int32) # N
tracks = np.array(
[pre_det['ct'] for pre_det in self.tracks], np.float32) # M x 2
dist = (((tracks.reshape(1, -1, 2) - \
dets.reshape(-1, 1, 2)) ** 2).sum(axis=2)) # N x M
if self.opt.dataset == 'youtube_vis':
invalid = ((dist > track_size.reshape(1, M)) + \
(dist > item_size.reshape(N, 1)) + (box_ious < self.opt.overlap_thresh)) > 0
else:
invalid = ((dist > track_size.reshape(1, M)) + \
(dist > item_size.reshape(N, 1)) + \
(item_cat.reshape(N, 1) != track_cat.reshape(1, M)) + (box_ious < self.opt.overlap_thresh)) > 0
dist = dist + invalid * 1e18
if self.opt.hungarian:
item_score = np.array([item['score'] for item in results], np.float32) # N
dist[dist > 1e18] = 1e18
matched_indices = linear_assignment(dist)
else:
matched_indices = greedy_assignment(copy.deepcopy(dist))
unmatched_dets = [d for d in range(dets.shape[0]) \
if not (d in matched_indices[:, 0])]
unmatched_tracks = [d for d in range(tracks.shape[0]) \
if not (d in matched_indices[:, 1])]
if self.opt.hungarian:
matches = []
for m in matched_indices:
if dist[m[0], m[1]] > 1e16:
unmatched_dets.append(m[0])
unmatched_tracks.append(m[1])
else:
matches.append(m)
matches = np.array(matches).reshape(-1, 2)
else:
matches = matched_indices
ret = []
for m in matches:
track = results[m[0]]
track['tracking_id'] = self.tracks[m[1]]['tracking_id']
track['age'] = 1
track['active'] = self.tracks[m[1]]['active'] + 1
if 'embedding' in track:
self.alive.append(track['tracking_id'])
self.embedding_bank[self.tracks[m[1]]['tracking_id'] - 1, :] = self.alpha * track['embedding'] \
+ (1 - self.alpha) * self.embedding_bank[
self.tracks[m[1]][
'tracking_id'] - 1,
:]
self.cat_bank[self.tracks[m[1]]['tracking_id'] - 1] = track['class']
ret.append(track)
if self.opt.public_det and len(unmatched_dets) > 0:
# Public detection: only create tracks from provided detections
pub_dets = np.array([d['ct'] for d in public_det], np.float32)
dist3 = ((dets.reshape(-1, 1, 2) - pub_dets.reshape(1, -1, 2)) ** 2).sum(
axis=2)
matched_dets = [d for d in range(dets.shape[0]) \
if not (d in unmatched_dets)]
dist3[matched_dets] = 1e18
for j in range(len(pub_dets)):
i = dist3[:, j].argmin()
if dist3[i, j] < item_size[i]:
dist3[i, :] = 1e18
track = results[i]
if track['score'] > self.opt.new_thresh:
self.id_count += 1
track['tracking_id'] = self.id_count
track['age'] = 1
track['active'] = 1
ret.append(track)
else:
# Private detection: create tracks for all un-matched detections
for i in unmatched_dets:
track = results[i]
if track['score'] > self.opt.new_thresh:
if 'embedding' in track:
max_id, max_cos = self.get_similarity(track['embedding'], False, track['class'])
if max_cos >= 0.3 and self.tracklet_ages[max_id - 1] < self.opt.window_size:
track['tracking_id'] = max_id
track['age'] = 1
track['active'] = 1
self.embedding_bank[track['tracking_id'] - 1, :] = self.alpha * track['embedding'] \
+ (1 - self.alpha) * self.embedding_bank[track['tracking_id'] - 1,:]
else:
self.id_count += 1
track['tracking_id'] = self.id_count
track['age'] = 1
track['active'] = 1
self.embedding_bank[self.id_count - 1, :] = track['embedding']
self.cat_bank[self.id_count - 1] = track['class']
self.alive.append(track['tracking_id'])
ret.append(track)
else:
self.id_count += 1
track['tracking_id'] = self.id_count
track['age'] = 1
track['active'] = 1
ret.append(track)
self.tracklet_ages[:self.id_count] = self.tracklet_ages[:self.id_count] + 1
for track in ret:
self.tracklet_ages[track['tracking_id'] - 1] = 1
# second association
results_second = [item for item in results_with_low if item['score'] < self.opt.track_thresh]
self_tracks_second = [self.tracks[i] for i in unmatched_tracks if self.tracks[i]['active'] > 0]
second2original = [i for i in unmatched_tracks if self.tracks[i]['active'] > 0]
N = len(results_second)
M = len(self_tracks_second)
if N > 0 and M > 0:
track_boxes_second = np.array([[track['bbox'][0], track['bbox'][1],
track['bbox'][2], track['bbox'][3]] for track in self_tracks_second], np.float32) # M x 4
det_boxes_second = np.array([[item['bbox'][0], item['bbox'][1],
item['bbox'][2], item['bbox'][3]] for item in results_second], np.float32) # N x 4
box_ious_second = self.bbox_overlaps_py(det_boxes_second, track_boxes_second)
dets = np.array(
[det['ct'] + det['tracking'] for det in results_second], np.float32) # N x 2
track_size = np.array([((track['bbox'][2] - track['bbox'][0]) * \
(track['bbox'][3] - track['bbox'][1])) \
for track in self_tracks_second], np.float32) # M
track_cat = np.array([track['class'] for track in self_tracks_second], np.int32) # M
item_size = np.array([((item['bbox'][2] - item['bbox'][0]) * \
(item['bbox'][3] - item['bbox'][1])) \
for item in results_second], np.float32) # N
item_cat = np.array([item['class'] for item in results_second], np.int32) # N
tracks_second = np.array(
[pre_det['ct'] for pre_det in self_tracks_second], np.float32) # M x 2
dist = (((tracks_second.reshape(1, -1, 2) - \
dets.reshape(-1, 1, 2)) ** 2).sum(axis=2)) # N x M
invalid = ((dist > track_size.reshape(1, M)) + \
(dist > item_size.reshape(N, 1)) + \
(item_cat.reshape(N, 1) != track_cat.reshape(1, M)) + (box_ious_second < 0.3)) > 0
dist = dist + invalid * 1e18
matched_indices_second = greedy_assignment(copy.deepcopy(dist), 1e8)
unmatched_tracks_second = [d for d in range(tracks_second.shape[0]) \
if not (d in matched_indices_second[:, 1])]
matches_second = matched_indices_second
for m in matches_second:
track = results_second[m[0]]
track['tracking_id'] = self_tracks_second[m[1]]['tracking_id']
track['age'] = 1
track['active'] = self_tracks_second[m[1]]['active'] + 1
if 'embedding' in track:
self.alive.append(track['tracking_id'])
self.embedding_bank[self_tracks_second[m[1]]['tracking_id'] - 1, :] = self.alpha * track['embedding'] \
+ (1 - self.alpha) * self.embedding_bank[self_tracks_second[m[1]]['tracking_id'] - 1,:]
self.cat_bank[self_tracks_second[m[1]]['tracking_id'] - 1] = track['class']
ret.append(track)
unmatched_tracks = [second2original[i] for i in unmatched_tracks_second] + \
[i for i in unmatched_tracks if self.tracks[i]['active'] == 0]
# Never used
for i in unmatched_tracks:
track = self.tracks[i]
if track['age'] < self.opt.max_age:
track['age'] += 1
track['active'] = 1 # 0
bbox = track['bbox']
ct = track['ct']
v = [0, 0]
track['bbox'] = [
bbox[0] + v[0], bbox[1] + v[1],
bbox[2] + v[0], bbox[3] + v[1]]
track['ct'] = [ct[0] + v[0], ct[1] + v[1]]
ret.append(track)
for r_ in ret:
del r_['embedding']
self.tracks = ret
return ret
def get_similarity(self, feat, stat, cls):
max_id = -1
max_cos = -1
if stat:
nID = self.id_count
else:
nID = self.id_count
a = feat[None, :]
b = self.embedding_bank[:nID, :]
if len(b) > 0:
alive = np.array(self.alive, dtype=np.int) - 1
cosim = cosine(a, b)
cosim = np.reshape(cosim, newshape=(-1))
cosim[alive] = -2
cosim[nID - 1] = -2
cosim[np.where(self.cat_bank[:nID] != cls)[0]] = -2
max_id = int(np.argmax(cosim) + 1)
max_cos = np.max(cosim)
return max_id, max_cos
def bbox_overlaps_py(self, boxes, query_boxes):
"""
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
"""
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
overlaps = np.zeros((n_, k_), dtype=np.float)
for k in range(k_):
query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
for n in range(n_):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
all_area = float(box_area + query_box_area - iw * ih)
overlaps[n, k] = iw * ih / all_area
return overlaps
def greedy_assignment(dist, thresh=1e16):
matched_indices = []
if dist.shape[1] == 0:
return np.array(matched_indices, np.int32).reshape(-1, 2)
for i in range(dist.shape[0]):
j | |
"""Zookeeper Partitioner Implementation
:Maintainer: None
:Status: Unknown
:class:`SetPartitioner` implements a partitioning scheme using
Zookeeper for dividing up resources amongst members of a party.
This is useful when there is a set of resources that should only be
accessed by a single process at a time that multiple processes
across a cluster might want to divide up.
Example Use-Case
----------------
- Multiple workers across a cluster need to divide up a list of queues
so that no two workers own the same queue.
"""
from functools import partial
import logging
import os
import socket
from kazoo.exceptions import KazooException, LockTimeout
from kazoo.protocol.states import KazooState
from kazoo.recipe.watchers import PatientChildrenWatch
log = logging.getLogger(__name__)
class PartitionState(object):
"""High level partition state values
.. attribute:: ALLOCATING
The set needs to be partitioned, and may require an existing
partition set to be released before acquiring a new partition
of the set.
.. attribute:: ACQUIRED
The set has been partitioned and acquired.
.. attribute:: RELEASE
The set needs to be repartitioned, and the current partitions
must be released before a new allocation can be made.
.. attribute:: FAILURE
The set partition has failed. This occurs when the maximum
time to partition the set is exceeded or the Zookeeper session
is lost. The partitioner is unusable after this state and must
be recreated.
"""
ALLOCATING = "ALLOCATING"
ACQUIRED = "ACQUIRED"
RELEASE = "RELEASE"
FAILURE = "FAILURE"
class SetPartitioner(object):
"""Partitions a set amongst members of a party
This class will partition a set amongst members of a party such
that each member will be given zero or more items of the set and
each set item will be given to a single member. When new members
enter or leave the party, the set will be re-partitioned amongst
the members.
When the :class:`SetPartitioner` enters the
:attr:`~PartitionState.FAILURE` state, it is unrecoverable
and a new :class:`SetPartitioner` should be created.
Example:
.. code-block:: python
from kazoo.client import KazooClient
client = KazooClient()
client.start()
qp = client.SetPartitioner(
path='/work_queues', set=('queue-1', 'queue-2', 'queue-3'))
while 1:
if qp.failed:
raise Exception("Lost or unable to acquire partition")
elif qp.release:
qp.release_set()
elif qp.acquired:
for partition in qp:
# Do something with each partition
elif qp.allocating:
qp.wait_for_acquire()
**State Transitions**
When created, the :class:`SetPartitioner` enters the
:attr:`PartitionState.ALLOCATING` state.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.ACQUIRED`
Set was partitioned successfully, the partition list assigned
is accessible via list/iter methods or calling list() on the
:class:`SetPartitioner` instance.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.FAILURE`
Allocating the set failed either due to a Zookeeper session
expiration, or failure to acquire the items of the set within
the timeout period.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.RELEASE`
The members of the party have changed, and the set needs to be
repartitioned. :meth:`SetPartitioner.release` should be called
as soon as possible.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.FAILURE`
The current partition was lost due to a Zookeeper session
expiration.
:attr:`~PartitionState.RELEASE` ->
:attr:`~PartitionState.ALLOCATING`
The current partition was released and is being re-allocated.
"""
def __init__(self, client, path, set, partition_func=None,
identifier=None, time_boundary=30, max_reaction_time=1,
state_change_event=None):
"""Create a :class:`~SetPartitioner` instance
:param client: A :class:`~kazoo.client.KazooClient` instance.
:param path: The partition path to use.
:param set: The set of items to partition.
:param partition_func: A function to use to decide how to
partition the set.
:param identifier: An identifier to use for this member of the
party when participating. Defaults to the
hostname + process id.
:param time_boundary: How long the party members must be stable
before allocation can complete.
:param max_reaction_time: Maximum reaction time for party members
change.
:param state_change_event: An optional Event object that will be set
on every state change.
"""
# Used to differentiate two states with the same names in time
self.state_id = 0
self.state = PartitionState.ALLOCATING
self.state_change_event = state_change_event or \
client.handler.event_object()
self._client = client
self._path = path
self._set = set
self._partition_set = []
self._partition_func = partition_func or self._partitioner
self._identifier = identifier or '%s-%s' % (
socket.getfqdn(), os.getpid())
self._locks = []
self._lock_path = '/'.join([path, 'locks'])
self._party_path = '/'.join([path, 'party'])
self._time_boundary = time_boundary
self._max_reaction_time = max_reaction_time
self._acquire_event = client.handler.event_object()
# Create basic path nodes
client.ensure_path(path)
client.ensure_path(self._lock_path)
client.ensure_path(self._party_path)
# Join the party
self._party = client.ShallowParty(self._party_path,
identifier=self._identifier)
self._party.join()
self._state_change = client.handler.rlock_object()
client.add_listener(self._establish_sessionwatch)
# Now watch the party and set the callback on the async result
# so we know when we're ready
self._child_watching(self._allocate_transition, async=True)
def __iter__(self):
"""Return the partitions in this partition set"""
for partition in self._partition_set:
yield partition
@property
def failed(self):
"""Corresponds to the :attr:`PartitionState.FAILURE` state"""
return self.state == PartitionState.FAILURE
@property
def release(self):
"""Corresponds to the :attr:`PartitionState.RELEASE` state"""
return self.state == PartitionState.RELEASE
@property
def allocating(self):
"""Corresponds to the :attr:`PartitionState.ALLOCATING`
state"""
return self.state == PartitionState.ALLOCATING
@property
def acquired(self):
"""Corresponds to the :attr:`PartitionState.ACQUIRED` state"""
return self.state == PartitionState.ACQUIRED
def wait_for_acquire(self, timeout=30):
"""Wait for the set to be partitioned and acquired
:param timeout: How long to wait before returning.
:type timeout: int
"""
self._acquire_event.wait(timeout)
def release_set(self):
"""Call to release the set
This method begins the step of allocating once the set has
been released.
"""
self._release_locks()
if self._locks: # pragma: nocover
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
else:
with self._state_change:
if self.failed:
return
self._set_state(PartitionState.ALLOCATING)
self._child_watching(self._allocate_transition, async=True)
def finish(self):
"""Call to release the set and leave the party"""
self._release_locks()
self._fail_out()
def _fail_out(self):
with self._state_change:
self._set_state(PartitionState.FAILURE)
if self._party.participating:
try:
self._party.leave()
except KazooException: # pragma: nocover
pass
def _allocate_transition(self, result):
"""Called when in allocating mode, and the children settled"""
# Did we get an exception waiting for children to settle?
if result.exception: # pragma: nocover
self._fail_out()
return
children, async_result = result.get()
children_changed = self._client.handler.event_object()
def updated(result):
with self._state_change:
children_changed.set()
if self.acquired:
self._set_state(PartitionState.RELEASE)
with self._state_change:
# We can lose connection during processing the event
if not self.allocating:
return
# Remember the state ID to check later for race conditions
state_id = self.state_id
# updated() will be called when children change
async_result.rawlink(updated)
# Check whether the state has changed during the lock acquisition
# and abort the process if so.
def abort_if_needed():
if self.state_id == state_id:
if children_changed.is_set():
# The party has changed. Repartitioning...
self._abort_lock_acquisition()
return True
else:
return False
else:
if self.allocating or self.acquired:
# The connection was lost and user initiated a new
# allocation process. Abort it to eliminate race
# conditions with locks.
with self._state_change:
self._set_state(PartitionState.RELEASE)
return True
# Split up the set
partition_set = self._partition_func(
self._identifier, list(self._party), self._set)
# Proceed to acquire locks for the working set as needed
for member in partition_set:
lock = self._client.Lock(self._lock_path + '/' + str(member))
while True:
try:
# We mustn't lock without timeout because in that case we
# can get a deadlock if the party state will change during
# lock acquisition.
lock.acquire(timeout=self._max_reaction_time)
except LockTimeout:
if abort_if_needed():
return
except KazooException:
return self.finish()
else:
break
self._locks.append(lock)
if abort_if_needed():
return
# All locks acquired. Time for state transition.
with self._state_change:
if self.state_id == state_id and not children_changed.is_set():
self._partition_set = partition_set
self._set_state(PartitionState.ACQUIRED)
self._acquire_event.set()
return
if not abort_if_needed():
# This mustn't happen. Means a logical error.
self._fail_out()
def _release_locks(self):
"""Attempt to completely remove all the locks"""
self._acquire_event.clear()
for lock in self._locks[:]:
try:
lock.release()
except KazooException: # pragma: nocover
# We proceed to remove as many as possible, and leave
# the ones we couldn't remove
pass
else:
self._locks.remove(lock)
def _abort_lock_acquisition(self):
"""Called during lock acquisition if a party change occurs"""
self._release_locks()
if self._locks:
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
self._child_watching(self._allocate_transition, async=True)
def _child_watching(self, func=None, async=False):
"""Called when children are being watched to stabilize
This actually returns immediately, child watcher spins up a
new thread/greenlet and waits for it to stabilize before
any callbacks might run.
"""
watcher = PatientChildrenWatch(self._client, self._party_path,
self._time_boundary)
asy = watcher.start()
if func is not None:
# We spin up the function in a separate thread/greenlet
# to ensure that the rawlink's it might use won't be
# blocked
if async:
func = partial(self._client.handler.spawn, func)
asy.rawlink(func)
return asy
def _establish_sessionwatch(self, state):
"""Register ourself to listen for session events, we shut down
if we become lost"""
with self._state_change:
if self.failed:
pass
elif state == KazooState.LOST:
self._client.handler.spawn(self._fail_out)
elif not self.release:
self._set_state(PartitionState.RELEASE)
return | |
"""Form class groups.
#TODO: add references to literature.
"""
import logging
from math import floor, sqrt, log, log2
from sec_groups.tools.secgcd import (
extended_euclid_xgcd,
secure_gcd,
secure_xgcd,
secure_binary_xgcd,
secure_division,
)
from sec_groups.tools.bitlen import bit_length_integrated
from sec_groups.tools.repeat import secure_pow
from mpyc.runtime import mpc
import mpyc.gmpy as gmpy2
from sec_groups.tools.find_primes import find_primes_for_schnorr, _find_ike_prime
logger_cg = logging.getLogger("classgroups")
logger_cg.setLevel(logging.INFO)
def xgcd_(a, b):
"""Wraps extended euclid from secgcd module."""
return extended_euclid_xgcd(a, b)
def discriminant(f):
a, b, c = f[0], f[1], f[2]
return b ** 2 - 4 * a * c
def lincong(a, b, m):
"""Solve ax = b mod m
return mu, nu such that x = mu + nu n for all n in Z.
Based on Lipa Long, "Binary Quadratic Forms", 2019.
See: https://github.com/Chia-Network/vdf-competition/blob/master/classgroups.pdf
"""
g, d, e = xgcd_(a, m)
logger_cg.debug(f"In lincong, done xgcd: {g}, {d}, {e} = xgcd({a}, {m})")
q, r = divmod(b, g)
logger_cg.debug(f"In lincong, done {q}, {r} = division({b}, {g}).")
# L19 Thm. 7.1: Congruence has a solution iff gcd(a,m) | b.
if r != 0:
raise ValueError("The linear congruence has no solution")
else:
mu = (q * d) % m
logger_cg.debug(f"In lincong, done _, {mu} = division({q}*{d}, {m}).")
nu = m // g
return mu, nu
def secure_lincong(a, b, m):
"""Solve ax = b mod m
return mu, nu such that x = mu + nu n for all n in Z.
"""
g, d, e = secure_xgcd(a, m)
logger_cg.debug(f"In lincong, done secure_xgcd().")
# q = floor(b/g)
# q = b // g
# r = b % g
# q, r = secure_division(b, g)
q = b / g
r = 0
logger_cg.debug(f"In lincong, done secure_division(b, g).")
if isinstance(r, int) and r != 0:
raise ValueError("The congruence has no solution")
else:
# mu = (q * d) % m
_, mu = secure_division(q * d, m)
logger_cg.debug(f"In lincong, done secure_division(q*d, m).")
# nu = m // g
# nu, _ = secure_division(m, g)
nu = m / g
return mu, nu
def check_well_formed(f):
a, b, c = f[0], f[1], f[2]
disc = b ** 2 - 4 * a * c
if a > 0 and disc < 0:
pass
else:
raise ValueError(
f"Form ({a}, {b}, {c}) does not have a > 0 and discriminant < 0: a={a}, disc={disc} "
)
def check_reduced(f):
a, b, c = f[0], f[1], f[2]
if -a < b and b <= a: # check normalized
pass
else:
return False
if a <= c:
pass
else:
return False
if a == c:
if b >= 0:
pass
else:
return False
return True
def normalize(f):
a, b, c = f[0], f[1], f[2]
group = type(f)
check_well_formed(f)
r = (a - b) // (2 * a)
eta = (a, b + 2 * r * a, a * r ** 2 + b * r + c)
return group(eta)
def reduce_form(f):
group = type(f)
check_well_formed(f)
f = normalize(f)
while not check_reduced(f):
a, b, c = f[0], f[1], f[2]
s = (c + b) // (2 * c)
f = group((c, -1 * b + 2 * s * c, c * s ** 2 - b * s + a))
return f
@mpc.coroutine
async def secure_binary_reduce(f, size_b = None, leak_size_b = True):
"""Binary reduction algorithm by Agarwal and Frandsen.
Based on Algorithm 3 from AF06: 'A New GCD Algorithm for Quadratic Number
Rings with Unique Factorization' by <NAME> Frandsen, 2006 (Aarhus)
https://users-cs.au.dk/gudmund/Documents/38870030.pdf
Requires:
f is positive definite (iff discriminant < 0 and a > 0).
NB: Option to open (leak) size(b) is default; to reduce number of
iterations of main loop. Alternative is to pass size_b bound.
"""
def size(a):
# Requires non-negative values
return bit_length_integrated(mpc, a)
def right_action_S_on_f(f):
return [f[2], -f[1], f[0]]
def right_action_Tm_on_f(m, f):
fa, fb, fc = f[0], f[1], f[2]
return [fa, fb + 2 * m * fa, (m ** 2) * fa + m * fb + fc]
sec_grp = type(f)
await mpc.returnType(sec_grp)
secint = sec_grp.sectype_of_value
a, b, c = f[0], f[1], f[2]
if size_b:
n = size_b
elif not size_b and leak_size_b:
n = await mpc.output(size(b)) # TODO: find good bound for for-loop
else:
raise NotImplementedError
for i in range(n):
sgn_b = 1 - 2 * mpc.sgn(
b, l=n + 3, LT=True
) # TODO: check l; if n + 0, sgn_b produces inccorect values <-1
abs_b_gt_abs_2a = sgn_b * b > 2 * a
abs_a_gt_abs_c = a > c # a always postive, because f positive definite
ab_gt_0 = (sgn_b * sgn_b + sgn_b) // 2 # a always postive, because f positive definite
size_abs_b = size(sgn_b * b)
size_a = size(a)
# TODO: find bound for (bit-length of) j.
j = size_abs_b - size_a - 1
# take |j| to avoid negative secint exponents. 2**j is used when |B|>2|A| and original j is positive
sgn_j = 1 - 2 * mpc.sgn(j, l=n, LT=True)
abs_j = sgn_j * j
abs_j_bits = mpc.to_bits(abs_j, n)
m = secure_pow(2, abs_j_bits, secint)
m = mpc.if_else(ab_gt_0, -m, m)
a, b, c = mpc.if_else(
abs_b_gt_abs_2a,
right_action_Tm_on_f(m, (a, b, c)),
mpc.if_else(abs_a_gt_abs_c, right_action_S_on_f((a, b, c)), [a, b, c]),
)
print(f"Secure binary reduction: {round(100*i/n)}%", end="\r")
assert f.group.discriminant < 0
m = mpc.if_else(b > 0, secint(-1), secint(1))
abs_b_gt_a = mpc.abs(b) > a
a, b, c = mpc.if_else(abs_b_gt_a, right_action_Tm_on_f(m, (a, b, c)), [a, b, c])
a_gt_c = a > c
a, b, c = mpc.if_else(
abs_b_gt_a * a_gt_c, right_action_S_on_f((a, b, c)), [a, b, c]
)
a, b, c = mpc.if_else((b < 0) * (a == c), right_action_S_on_f((a, b, c)), [a, b, c])
a, b, c = mpc.if_else(
(b < 0) * (a == -b), right_action_Tm_on_f(1, (a, b, c)), [a, b, c]
)
return sec_grp((a, b, c))
def parteucl(a, b, L):
"""Extended partial Euclides following Cohen Section 5.4.
"""
# Step 1 Initialize
v = 0
d = a
v2 = 1
v3 = b
z = 0
while abs(v3) > L:
# Step 3 Euclidean step
q, t3 = d//v3, d%v3
t2 = v - q*v2
v = v2
d = v3
v2 = t2
v3 = t3
z = z+1
# Step 2 Finished?
if z % 2:
v2 = -v2
v3 = -v3
return d, v, v2, v3, z
def nudupl(f):
"""Square(f) following Cohen, Alg. 5.4.8.
"""
L = int(((abs(f.discriminant))/4)**(1/4))
a, b, c = f[0], f[1], f[2]
# Step 1 Euclidean step
d1, u, v = extended_euclid_xgcd(b, a)
A = a//d1
B = b//d1
C = (-c*u) % A
C1 = A-C
if C1 < C:
C = -C1
# Step 2 Partial reduction
d, v, v2, v3, z = parteucl(A, C, L)
# Step 3 Special case
if z==0:
g = (B*v3+c)//d
a2 = d**2
c2 = v3**2
b2 = b + (d+v3)**2 - a2 - c2
c2 = c2 + g*d1
else:
# Step 4 Final computations
e = (c*v + B*d)//A
g = (e*v2 - B)//v
b2 = e*v2 + v*g
if d1>1:
b2 = d1*b2
v = d1*v
v2 = d1*v2
a2 = d**2
c2 = v3**2
b2 = b2 + (d+v3)**2 - a2 - c2
a2 = a2 + e*v
c2 = c2 + g*v2
f2 = type(f)((a2, b2, c2))
return f2
def square(f):
"""Square form"""
group = type(f)
a, b, c = f[0], f[1], f[2]
mu, _ = lincong(b, c, a)
A = a ** 2
B = b - 2 * a * mu
C = mu ** 2 - (b * mu - c) // a
return group((A, B, C))
def secure_square(f):
sectype = type(f)
"""Square form"""
a, b, c = f[0], f[1], f[2]
mu, _ = secure_lincong(b, c, a)
A = a ** 2
B = b - 2 * a * mu
# C = mu ** 2 - (b * mu - c) // a
C = mu ** 2 - (b * mu - c) / a
return sectype((A, B, C))
def repeat_square(f, n):
new_f = f
for i in range(n):
new_f = reduce(square(new_f))
return new_f
def nucomp(phi1, phi2):
"""Nucomp algorithm for | |
if epsilon > rg.random(dtype=np.float32):
# Epsilon-greedy behaviour
# Turning to exploration
explotation = False
if explotation: #Explotation step. Sampling all the expected values to chose greedy accordingly to rollout algorithm.
best_action, best_cost, res = sampler(env, h_mode=H, alpha=alpha, n_samples=N_samples, k=K,
lookahead=lookahead, min_obj=min_objective, seed=int(rg.random()*10**2))
else: # Option to take an exploration step
actions = list(env.action_set)
e_action = rg.choice(actions) # Chose a random action and sample its cost with a lookahead 1.
best_action, best_cost, _ = sampler(env, h_mode=H, alpha=alpha, n_samples=N_samples, k=K,
lookahead=1, action_set=[e_action], min_obj=min_objective,
seed=int(rg.random()*10**2))
return best_action, best_cost
class Experiment():
"""
Class design to run a complete rollout experiment with the options to generate graphs,
animations, save results in pickle form.
As this is a RL-Rollout implementation it needs a base heuristic or base policy to
call and to compare to start improving the min/max of the cost function.
-- This is the GPU only version. It does not support Heuristic as function or object --
Parameters
----------
ENV : Environment Object
Reference to the environment object. This funcion is constructed to the EnvMakerForestFire
environment.
The next methods and variables are expected in env:
- action_set
- step()
- Encode()
- copy()
- make_checkpoint()
- load_checkpoint()
- frame()
- make_gif()
H : Function object
Object or function that references the heurist to execute. These type of functions
requiere to support their inputs as a dict with at least the 'observation', and 'env'
keys on it. It must return an action type.
H_mode : int
Inside the rollout_sampler_gpu.Heuristic there is a set of heuristics to address
passing this argument. For a heuristic to work in this version, it needs to be writen
inside the function to compile to device. For the results to be accurate they need to
output the same values per state.
PI : Policy object
Here one can pass an policy already started. If None it generates a new one.
N_TRAIN : int
Number of tests to run the experiment with .run() for constructing
an unique policy with rollout.
The result of this is the average of the costs between all the tests for each run.
Notice that inside a run every test starts with the same initial state.
Each test has execute all the follwing variables.
N_STEPS : int
Number of steps that the environment takes. Speaking about the Helicopter environment, the variable
freeze has an effect to update the environment each FREEZE steps. Therefore, the agent in total execute
N_STEPS * FREEZE steps.
N_SAMPLES : int
Number of samples required to calculate the expected value of the
cost function.
K : int
Number of steps to keep executing the heuristic.
LOOKAHEAD : int
Numbert of steps that the rollout algorithm can take in greedy form, forming a series of controls,
that minimizes of maximizes the cost function. This increases the cost of the computation, and
discards all the controls but except the first.
ALPHA : float
Discount factor for the cost function.
EPSILON : float
It must be a quantity of probability in the range 0<epsilon<=1 to take an exploration
action. This makes up the behaviour in a epsilon-greedy technique. In this case greedy
been the heuristic H.
EPSILON_DECAY : float
The rate between 0 and 1, in which the value of epsilon decays every time it is used on the
rollout executions.
MIN_OBJECTIVE : bool
Variable to define if the objective is to maximize of minimize the cost function.
This is problem-model dependant.
RUN_GIF : bool
Variable to control the behavior if the last execution of run generates frame for each
agent step for being able to generate a .gif with the .gif method.
Methods
-------
Experiment.run()
Experiment.run_multiple_LH()
Experiment.policy_test()
Experiment.make_graph()
Experiment.make_gif()
Experiment.pickle()
Dump the sequence of costs obtainen and the policy object
Experiment.reset()
Cleans buffers for graphs and gifs. Restart countes.
"""
def __init__(
self,
ENV,
H,
H_mode=0,
PI = None,
N_TRAIN = 10,
N_STEPS = 25,
N_SAMPLES = 29,
K = 100,
LOOKAHEAD = 1,
ALPHA = 0.99,
EPSILON = 0,
EPSILON_DECAY = 0.99,
MIN_OBJECTIVE = True,
RUN_GIF = False,
):
def check_ints(suspect):
assert (suspect >= 1) and isinstance(suspect, int),\
"This number must an integer of at least 1. {} = {} given instead.".format(type(suspect), suspect)
return suspect
def check_prob(suspect):
assert (suspect <= 1) and (suspect >= 0),\
"This value must be between 0 and 1. {} was given".format(ALPHA)
return suspect
# Saving references to objects and classes.
self.env = ENV
self.env_h = None # Var to env copy for applying the heuristic
self.H = H
self.H_mode = H_mode
self.min_obj = MIN_OBJECTIVE
assert isinstance(MIN_OBJECTIVE, bool), "With a True/False indicate if minimize is the objective. Invalid type {} passed".format(type(MIN_OBJECTIVE))
if PI is None:
# Creates a new policy
self.PI = Policy(min_objective=MIN_OBJECTIVE)
else:
self.PI = PI
# Loading variables
self.N_TRAIN = check_ints(N_TRAIN)
self.N_STEPS = check_ints(N_STEPS)
self.N_SAMPLES = check_ints(N_SAMPLES)
if K < 0:
self.K = -1
else:
self.K = check_ints(K)
self.LOOKAHEAD = check_ints(LOOKAHEAD)
self.alpha = check_prob(ALPHA)
self.epsilon_op = check_prob(EPSILON)
self.epsilon = check_prob(EPSILON)
self.epsilon_decay = check_prob(EPSILON_DECAY)
self.init_logger = False
self.last_time = 0
self.init_logger = self.logger("Logger initialized.",False)
self.logger(" - GPU Experiment -",False, False)
env_desc = "Environment Parameters -- Grid: {} Cost_f: '{}'\n Cost_Tree: {} Cost_Fire: {} Cost_hit: {}\n\
Cost_Empty: {} Cost_step: {} Cost_move: {}\n\
Min_obj: {} P_Fire: {} P_Tree: {}\n Steps_To_Update {}".format(ENV.grid.shape, ENV.reward_type, ENV.reward_tree, ENV.reward_fire, ENV.reward_hit,
ENV.reward_empty, ENV.reward_step, ENV.reward_move,
MIN_OBJECTIVE, ENV.p_fire, ENV.p_tree, ENV.moves_before_updating)
self.logger(env_desc,False,False)
# This class has its own random generator.
self.rg = np.random.Generator(np.random.SFC64())
self.runs_rollout_results = []
self.runs_rollout_results_step = []
self.runs_heu_results = []
self.runs_heu_results_step = []
self.runs_rollout_archive = []
self.runs_heu_archive = []
self.c_runs = 0
self.theres_run_gif = False
self.theres_test_gif = False
self.RUN_GIF = RUN_GIF
self.frames_run_r = []
self.frames_run_h = []
self.frames_test_r = []
self.run_h_tcell = []
self.run_r_tcell = []
self.mod = "Cost"
def __del__(self):
self.logger("The experiment is OVER!")
self.logfile.close()
del self.env_h
del self.env
del self.PI
return None
def reset(self):
# Free memory.
self.env.checkpoints = []
self.env_h.checkpoints = []
self.runs_rollout_results = []
self.runs_rollout_results_step = []
self.runs_heu_results = []
self.runs_heu_results_step = []
self.runs_rollout_archive = []
self.runs_heu_archive = []
self.frames_run_r = []
self.frames_run_h = []
self.frames_test_r = []
self.theres_run_gif = False
self.theres_test_gif = False
self.c_runs = 0
self.epsilon = self.epsilon_op
def run(self, GIF=None, GRAPH=True):
"""
Creates an initial state from reseting the environment and runs all the number of train
iterations and so on. This updates the policy with more states or with better actions.
Parameters
----------
GIF : bool
Variable to indicate if you desired to generate frames for the last
train loop of the run, if the class was initialized with this behavior on this one
changes nothing. Default False.
GRAPH : bool
Draws and saves the graphs from the experiment. If there's not a graph generated and
one does not restarts the class
"""
if not GIF is None:
RUN_GIF = GIF
else:
RUN_GIF = self.RUN_GIF
# Reseting env and storing the initial observations
observation = self.env.reset()
observation_1 = observation
#Making copy of the env to apply the heuristic
self.env_h = self.env.copy()
# Making checkpoints
checkpoint_env = self.env.make_checkpoint()
checkpoint_env_h = self.env_h.make_checkpoint()
# Lists to save the results from the N_TRAIN
RO_RESULTS=[]
H_RESULTS=[]
RO_RESULTS_C=[]
H_RESULTS_C=[]
# Measuring time of execution.
self.logger("Run {} - Metadata: {}\n |".format(self.c_runs, self.metadata_str), True, True, True)
# First loop to execute an rollout experiment.
for n_test in range(self.N_TRAIN):
# In order to compare the advance between the two environments
# their random generator is reseeded with the same seed.
# This ones should advance equally on the all the run, but the samples
# as they are copies they generate a new random gen, so the samples wont suffer
# from this
M_SEED = int(self.rg.random()*10**4)
self.env.rg = np.random.Generator(np.random.SFC64(M_SEED))
self.env_h.rg = np.random.Generator(np.random.SFC64(M_SEED))
self.logger(" |-- Test : {} of {}".format(n_test+1, self.N_TRAIN))
# Making a checkpoint from the initial state | |
<reponame>kinect59/ad_examples
import os
import numpy as np
import logging
from aad.aad_globals import *
from aad.aad_support import *
from common.expressions import get_feature_meta_default, convert_feature_ranges_to_rules, \
get_max_len_in_rules, convert_conjunctive_rules_to_feature_ranges
from bayesian_ruleset.bayesian_ruleset import BayesianRuleset
def get_most_anomalous_subspace_indexes(model, n_top=30):
wd = np.multiply(model.w, model.d)
ordered_wd_idxs = np.argsort(-wd)[0:n_top] # sort in reverse order
# logger.debug("ordered_wd:\n%s" % str(wd[ordered_wd_idxs]))
return ordered_wd_idxs
def get_region_indexes_for_instances(x, model=None, n_top=-1):
region_idxs = np.array(model.get_region_ids(x))
# logger.debug("#region_idxs: %d" % len(region_idxs))
# logger.debug("region_idxs:\n%s" % str(list(region_idxs)))
if n_top < 0:
n_top = len(region_idxs)
wd = np.multiply(model.w[region_idxs], model.d[region_idxs])
ordered_wd_idxs = np.argsort(-wd)[0:min(n_top, len(wd))] # sort in reverse order
# logger.debug("ordered_wd_idxs:\n%s" % str(list(ordered_wd_idxs)))
return region_idxs[ordered_wd_idxs]
def get_region_volumes(model, region_indexes, feature_ranges):
volumes = np.zeros(len(region_indexes), dtype=np.float32)
d = feature_ranges.shape[0] # number of features
for i, ridx in enumerate(region_indexes):
region = model.all_regions[ridx].region
# logger.debug(str(region))
region_ranges = np.zeros(d, dtype=np.float32)
for j in range(feature_ranges.shape[0]):
rmin = feature_ranges[j][0] if np.isinf(region[j][0]) else region[j][0]
rmax = feature_ranges[j][1] if np.isinf(region[j][1]) else region[j][1]
# logger.debug("%d: %f, %f" % (j, rmin, rmax))
if rmax == rmin:
# logger.debug("%d: %f, %f" % (j, rmin, rmax))
# If the range of a variable is a single value, we just ignore it.
region_ranges[j] = 1.0
else:
region_ranges[j] = rmax - rmin
volumes[i] = np.prod(region_ranges)
# logger.debug("volumes:\n%s" % str(volumes))
return volumes
def get_instances_for_description(x=None, labels=None, metrics=None, instance_indexes=None):
""" Returns indexes of instances for which we need descriptions
The instances are selected as follows:
- If the instance indexes are directly passed, then select those
- If instance indexes are not passed, then the queried indexes which are
labeled as true anomalies will be selected.
- If there are no queried instances, then indexes of all true anomalies will
be selected.
:param x: np.ndarray
The instances in *original feature* space
:param labels: np.array(int)
True labels
:param metrics: MetricsStructure
:param instance_indexes: np.array(int)
Indexes of instances whose region memberships need to be checked
:return:
"""
if instance_indexes is not None:
return instance_indexes
elif metrics is not None and metrics.queried is not None:
queried = np.array(metrics.queried)
eval_indexes = np.where(labels[queried] == 1)[0]
instance_indexes = queried[eval_indexes]
else:
instance_indexes = np.where(labels == 1)[0]
# logger.debug("instance_indexes: %d\n%s" % (len(instance_indexes), str(list(instance_indexes))))
return instance_indexes
def get_regions_for_description(x, instance_indexes=None, model=None, region_score_only=False, n_top=-1):
""" Get the set of candidate regions for describing the instances
Ensures that atmost n_top most anomalous regions that an instance belongs to
will be present in the output list of regions
:param x: np.ndarray
The instances in *original feature* space
:param instance_indexes: np.array(int)
Indexes of instances whose region memberships need to be checked
:param model: Aad
AAD model
:param region_score_only: bool
If False, score regions by the multiplying region anomaly scores with corresponding weights
If True, score regions by only their anomaly scores
:param n_top: int
Number of top ranked regions (by score) per data instance to use
:return: np.array(int)
"""
# instance_region_idxs = np.array(model.get_region_ids(x[instance_indexes, :]))
# logger.debug(instance_region_idxs)
if region_score_only:
nwd = -model.d
else:
nwd = -np.multiply(model.w, model.d)
regions = set()
if n_top < 0:
n_top = len(nwd)
for i, inst in enumerate(instance_indexes):
inst_regs = np.array(model.get_region_ids(x[[inst], :]))
idxs = np.argsort(nwd[inst_regs])
ordered_inst_regs = inst_regs[idxs]
ordered_inst_regs = ordered_inst_regs[0:min(len(ordered_inst_regs), n_top)]
regions.update(ordered_inst_regs)
regions = list(regions)
# logger.debug("selected regions: %d\n%s" % (len(regions), str(regions)))
return np.array(regions, dtype=int)
def get_region_memberships(x, model=None,
instance_indexes=None, region_indexes=None):
""" Returns which regions the required instances belong to.
:param x: np.ndarray
The instances in *original feature* space
:param labels: np.array(int)
True labels
:param model: Aad
AAD model
:param metrics: MetricsStructure
:param instance_indexes: np.array(int)
Indexes of instances whose region memberships need to be checked
:param region_indexes: np.array(int)
Indexes of the candidate regions within which to contain the instances
:return: np.array(int), np.ndarray(int)
The first value is the list of instances which belong to any of the regions passed.
The second is a matrix of binary values, one row per instance. The columns correspond
to the regions. '1' indicates that an instance belongs to the corresponding region,
'0' otherwise.
"""
if instance_indexes is None or len(instance_indexes) == 0:
return None, None
nregions = len(region_indexes)
member_insts = list()
region_membership_indicators = list()
for i in instance_indexes:
inds = np.zeros(nregions, dtype=int)
for j, ridx in enumerate(region_indexes):
inds[j] = is_in_region(x[i, :], model.all_regions[ridx].region)
if np.sum(inds) > 0:
member_insts.append(i)
region_membership_indicators.append(np.reshape(inds, newshape=(1, nregions)))
else:
# logger.debug("No region selected for instance %d" % i)
pass
member_insts = np.array(member_insts, dtype=int)
# logger.debug("#region_indexes: %d, #instance_indexes: %d, #region_membership_indicators: %d" %
# (len(region_indexes), len(instance_indexes), len(region_membership_indicators)))
if len(region_membership_indicators) > 0:
region_membership_indicators = np.vstack(region_membership_indicators)
else:
region_membership_indicators = None
return member_insts, region_membership_indicators
def get_compact_regions(x, model=None, instance_indexes=None, region_indexes=None, volumes=None, p=1):
""" Returns the most compact set of regions among region_indexes that contain the required instances
:param x: np.ndarray
The instances in *original feature* space
:param model: Aad
AAD model
:param region_indexes: np.array(int)
Indexes of the candidate regions within which to contain the instances
:param volumes: np.array(float)
The volumes of the regions whose indexes are provided in region_indexes
:param p: int
Determines how much to penalize the size of the regions (based on their volumes).
If this is large, then bigger regions will be strongly discouraged from getting selected.
:return:
"""
import cvxopt
from cvxopt import glpk
member_insts, member_inds = get_region_memberships(x, model=model,
instance_indexes=instance_indexes,
region_indexes=region_indexes)
# logger.debug("anom indexes in selected regions (%d):\n%s" % (len(member_anoms), str(list(member_anoms))))
# logger.debug("member_inds (%s):\n%s" % (str(member_inds.shape), str(member_inds)))
nvars = member_inds.shape[1]
glpk.options['msg_lev'] = 'GLP_MSG_OFF'
c = cvxopt.matrix([float(v**p) for v in volumes], tc='d') # minimize total volume**p
# below states that each anomaly should be included in atleast one region
G = cvxopt.matrix(-member_inds, tc='d')
h = cvxopt.matrix([-1] * member_inds.shape[0], tc='d')
bin_vars = [i for i in range(nvars)]
(status, soln) = cvxopt.glpk.ilp(c, G, h, B=set(bin_vars))
# logger.debug("ILP status: %s" % status)
if soln is not None:
soln = np.reshape(np.array(soln), newshape=(nvars,))
# logger.debug("ILP solution:\n%s" % str(soln))
idxs = np.where(soln == 1)[0]
if False:
logger.debug("\nregion_indexes: %d\n%s\nmember_insts: %d\n%s" %
(len(idxs), str(list(region_indexes[idxs])),
len(member_insts), str(list(member_insts))))
return region_indexes[idxs]
else:
return None
class InstancesDescriber(object):
def __init__(self, x, y, model, opts, sample_negative=False):
"""
:param x: np.ndarray
The instance matrix with ALL instances
:param y: np.array
:param model: Aad
:param opts: AadOpts
:param sample_negative: bool
"""
self.x = x
self.y = y
self.model = model
self.opts = opts
self.sample_negative = sample_negative
self.meta = None
def sample_instances(self, exclude, n):
s = np.ones(self.x.shape[0], dtype=np.int32)
s[exclude] = 0
s = np.where(s == 1)[0]
np.random.shuffle(s)
return s[:n]
def convert_regions_to_rules(self, regions, region_indexes=None):
if self.meta is None:
raise ValueError("must set metadata before calling this function")
rules, str_rules = convert_feature_ranges_to_rules(regions, self.meta)
if region_indexes is not None:
for rule, index in zip(rules, region_indexes):
rule.id = index
return rules, str_rules
def get_top_regions(self, instance_indexes):
""" Gets the regions having highest anomaly scores
:param instance_indexes: np.array
:return: tuple, list(map)
tuple: (region indexes, #instances among instance_indexes that fall in the region)
list(map): list of region extents where each region extent is a
map {feature index: feature range}
"""
region_indexes = get_regions_for_description(self.x, instance_indexes=instance_indexes,
model=self.model,
n_top=self.opts.describe_n_top
)
regions = [self.model.all_regions[ridx].region for ridx in region_indexes]
return region_indexes, regions
def describe(self, instance_indexes):
""" Generates descriptions for positive instances among those passed as input
:param instance_indexes: indexes of instances
:param sample_negative: Sample random instances and mark them as negative
Might help in avoiding false positives.
Number of sampled instances will be len(instance_indexes)
:return: Rules
"""
pass
class CompactDescriber(InstancesDescriber):
""" Generates compact descriptions for instances
This is different from the method get_compact_regions() in that it
reduces false positives by excluding negative examples while always
including positive examples.
"""
def __init__(self, x, y, model, opts, sample_negative=False):
InstancesDescriber.__init__(self, x, y, model, opts, sample_negative)
self.prec_threshold = 0.4
self.neg_penalty = 1.0
self.meta = get_feature_meta_default(x, y)
# will be used to compute volumes
self.feature_ranges = get_sample_feature_ranges(self.x)
def get_complexity(self, regions):
""" Gets the complexity of rules derived from feature ranges that define the regions
Compute the finite values in defining the regions. These finite values
become part of the rule. Fewer such values, smaller the rule in length.
E.g.: let a region be:
{0: (-inf, 2), 1: (3, 5), 2: (-inf, inf)}
This region will become a rule of length 3 and complexity = 2^(3-1) = 4:
feature0 <= 2 & feature1 > 3 & feature1 <= 5
:param regions: list of dict
:return: np.array
"""
complexity = np.zeros(len(regions), dtype=np.float32)
# | |
#!/usr/bin/env python
"""Convolutional Neural Network Training Functions
Functions for building and training a (UNET) Convolutional Neural Network on
images of the Mars and binary ring targets.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import h5py
from keras.models import Model
from keras.layers.core import Dropout, Reshape
from keras.regularizers import l2
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras import backend as K
import deepmars.features.template_match_target as tmt
import deepmars.utils.processing as proc
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import os
from joblib import Parallel, delayed
from tqdm import tqdm, trange
# Check Keras version - code will switch API if needed.
from keras import __version__ as keras_version
K.set_image_dim_ordering('tf')
k2 = True if keras_version[0] == '2' else False
# If Keras is v2.x.x, create Keras 1-syntax wrappers.
if not k2:
from keras.models import load_model
from keras.layers import merge, Input
from keras.layers.convolutional import (Convolution2D, MaxPooling2D,
UpSampling2D)
else:
from keras.models import load_model
from keras.layers import Concatenate, Input
from keras.layers.convolutional import (Conv2D, MaxPooling2D,
UpSampling2D)
def merge(layers, mode=None, concat_axis=None):
"""Wrapper for Keras 2's Concatenate class (`mode` is discarded)."""
return Concatenate(axis=concat_axis)(list(layers))
def Convolution2D(n_filters, FL, FLredundant, activation=None,
init=None, W_regularizer=None, border_mode=None):
"""Wrapper for Keras 2's Conv2D class."""
return Conv2D(n_filters, FL, activation=activation,
kernel_initializer=init,
kernel_regularizer=W_regularizer,
padding=border_mode)
minrad_ = 5
maxrad_ = 40
longlat_thresh2_ = 1.8
rad_thresh_ = 1.0
template_thresh_ = 0.5
target_thresh_ = 0.1
@click.group()
def dl():
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
import sys
sys.path.append(os.getenv("DM_ROOTDIR"))
pass
########################
def get_param_i(param, i):
"""Gets correct parameter for iteration i.
Parameters
----------
param : list
List of model hyperparameters to be iterated over.
i : integer
Hyperparameter iteration.
Returns
-------
Correct hyperparameter for iteration i.
"""
if len(param) > i:
return param[i]
else:
return param[0]
########################
def custom_image_generator(data, target, batch_size=32):
"""Custom image generator that manipulates image/target pairs to prevent
overfitting in the Convolutional Neural Network.
Parameters
----------
data : array
Input images.
target : array
Target images.
batch_size : int, optional
Batch size for image manipulation.
Yields
------
Manipulated images and targets.
"""
D, L, W = data.shape[0], data[0].shape[0], data[0].shape[1]
while True:
shuffle_index = np.arange(D)
# only shuffle once each loop through the data
np.random.shuffle(shuffle_index)
for i in np.arange(0, len(data), batch_size):
index = shuffle_index[i:i + batch_size]
d, t = data[index].copy(), target[index].copy()
# Random color inversion
# for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
# d[j][d[j] > 0.] = 1. - d[j][d[j] > 0.]
# Horizontal/vertical flips
for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
d[j], t[j] = np.fliplr(d[j]), np.fliplr(t[j]) # left/right
for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
d[j], t[j] = np.flipud(d[j]), np.flipud(t[j]) # up/down
# Random up/down & left/right pixel shifts, 90 degree rotations
npix = 15
# Horizontal shift
h = np.random.randint(-npix, npix + 1, batch_size)
# Vertical shift
v = np.random.randint(-npix, npix + 1, batch_size)
# 90 degree rotations
r = np.random.randint(0, 4, batch_size)
for j in range(batch_size):
d[j] = np.pad(d[j], ((npix, npix), (npix, npix), (0, 0)),
mode='constant')[npix + h[j]:L + h[j] + npix,
npix + v[j]:W + v[j] + npix, :]
sh, sv = slice(npix + h[j], L + h[j] + npix),\
slice(npix + v[j], W + v[j] + npix)
t[j] = np.pad(t[j], (npix,), mode='constant')[sh, sv]
d[j], t[j] = np.rot90(d[j], r[j]), np.rot90(t[j], r[j])
yield (d, t)
def t2c(pred, csv, i,
minrad=minrad_,
maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_,
rad_thresh=rad_thresh_,
template_thresh=template_thresh_,
target_thresh=target_thresh_):
return np.hstack([i,
tmt.template_match_t2c(pred, csv,
minrad=minrad,
maxrad=maxrad
longlat_thresh2=longlat_thresh2,
rad_thresh=rad_thresh,
template_thresh=template_thresh,
target_thresh=target_thresh)])
def diagnostic(res, beta):
"""Calculate the metrics from the predictions compared to the CSV.
Parameters
------------
res: list of results containing:
image number, number of matched, number of existing craters, number of
detected craters, maximum radius detected, mean error in longitude,
mean error in latitude, mean error in radius, fraction of duplicates
in detections.
beta : int
Beta value when calculating F-beta score.
Returns
-------
dictionary : metrics stored in a dictionary
"""
counter, N_match, N_csv, N_detect,\
mrad, err_lo, err_la, err_r, frac_duplicates = np.array(res).T
w = np.where(N_match == 0)
w = np.where(N_match > 0)
counter, N_match, N_csv, N_detect,\
mrad, err_lo, err_la, errr_, frac_dupes =\
counter[w], N_match[w], N_csv[w], N_detect[w],\
mrad[w], err_lo[w], err_la[w], err_r[w], frac_duplicates[w]
precision = N_match / (N_match + (N_detect - N_match))
recall = N_match / N_csv
fscore = (1 + beta**2) * (recall * precision) / \
(precision * beta**2 + recall)
diff = N_detect - N_match
frac_new = diff / (N_detect + diff)
frac_new2 = diff / (N_csv + diff)
frac_duplicates = frac_dupes
return dict(precision=precision,
recall=recall,
fscore=fscore,
frac_new=frac_new,
frac_new2=frac_new2,
err_lo=err_lo,
err_la=err_la,
err_r=err_r,
frac_duplicates=frac_duplicates,
maxrad=mrad,
counter=counter, N_match=N_match, N_csv=N_csv)
def get_metrics(data, craters_images, dim, model, name, beta=1, offset=0,
minrad=minrad_, maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_,
rad_thresh=rad_thresh_, template_thresh=template_thresh_,
target_thresh=target_thresh_, rmv_oor_csvs=0):
"""Function that prints pertinent metrics at the end of each epoch.
Parameters
----------
data : hdf5
Input images.
craters : hdf5
Pandas arrays of human-counted crater data.
dim : int
Dimension of input images (assumes square).
model : keras model object
Keras model
beta : int, optional
Beta value when calculating F-beta score. Defaults to 1.
"""
X, Y = data[0], data[1]
craters, images = craters_images
# Get csvs of human-counted craters
csvs = []
# minrad, maxrad = 3, 50
cutrad, n_csvs = 0.8, len(X)
diam = 'Diameter (pix)'
for i in range(len(X)):
imname = images[i] # name = "img_{0:05d}".format(i)
found = False
for crat in craters:
if imname in crat:
csv = crat[imname]
found = True
if not found:
csvs.append([-2])
continue
# remove small/large/half craters
csv = csv[(csv[diam] < 2 * maxrad) & (csv[diam] > 2 * minrad)]
csv = csv[(csv['x'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['y'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['x'] - cutrad * csv[diam] / 2 > 0)]
csv = csv[(csv['y'] - cutrad * csv[diam] / 2 > 0)]
if len(csv) < 3: # Exclude csvs with few craters
csvs.append([-1])
else:
csv_coords = np.asarray((csv['x'], csv['y'], csv[diam] / 2)).T
csvs.append(csv_coords)
# Calculate custom metrics
print("csvs: {}".format(len(csvs)))
print("")
print("*********Custom Loss*********")
recall, precision, fscore = [], [], []
frac_new, frac_new2, mrad = [], [], []
err_lo, err_la, err_r = [], [], []
frac_duplicates = []
if isinstance(model, Model):
preds = None
# print(X[6].min(),X[6].max(),X.dtype,np.percentile(X[6],99))
preds = model.predict(X, verbose=1)
# save
h5f = h5py.File("predictions.hdf5", 'w')
h5f.create_dataset(name, data=preds)
print("Successfully generated and saved model predictions.")
else:
preds = model
# print(csvs)
countme = [i for i in range(n_csvs) if len(csvs[i]) >= 3]
print("Processing {} fields".format(len(countme)))
# preds contains a large number of predictions,
# so we run the template code in parallel.
res = Parallel(n_jobs=24,
verbose=5)(delayed(t2c)(preds[i], csvs[i], i,
minrad=minrad,
maxrad=maxrad,
longlat_thresh2=longlat_thresh2,
rad_thresh=rad_thresh,
template_thresh=template_thresh,
target_thresh=target_thresh)
for i in range(n_csvs) if len(csvs[i]) >= 3)
if len(res) == 0:
print("No valid results: ", res)
return None
# At this point we've processed the predictions with the template matching
# algorithm, now calculate the metrics from the data.
diag = diagnostic(res, beta)
print(len(diag["recall"]))
# print("binary XE score = %f" % model.evaluate(X, Y))
if len(diag["recall"]) > 3:
metric_data = [("N_match/N_csv (recall)", diag["recall"]),
("N_match/(N_match + (N_detect-N_match)) (precision)",
diag["precision"]),
("F_{} score".format(beta), diag["fscore"]),
("(N_detect - N_match)/N_detect" +
"(fraction of craters that are new)",
diag["frac_new"]),
("(N_detect - N_match)/N_csv (fraction" +
"of craters that are new, 2)", diag["frac_new2"])]
for fname, data in metric_data:
print("mean and std of %s = %f, %f" %
(fname, np.mean(data), np.std(data)))
for fname, data in [("fractional longitude diff", diag["err_lo"]),
("fractional latitude diff", diag["err_la"]),
("fractional radius diff", diag["err_r"]),
]:
print("median and IQR %s = %f, 25:%f, 75:%f" %
(fname,
np.median(data),
np.percentile(data, 25),
np.percentile(data, 75)))
print("""mean and std of maximum detected pixel radius in an image =
%f, %f""" % (np.mean(diag["maxrad"]), np.std(diag["maxrad"])))
print("""absolute maximum detected pixel radius over all images =
%f""" % np.max(diag["maxrad"]))
print("")
return diag
########################
def build_model(dim, learn_rate, lmbda, drop, FL, init, n_filters):
"""Function that builds the (UNET) convolutional neural network.
Parameters
----------
dim : int
Dimension of input images (assumes square).
learn_rate : float
Learning rate.
lmbda : float
Convolution2D regularization parameter.
drop : float
Dropout fraction.
FL : int
Filter length.
init : string
Weight initialization | |
#****************************
#* IMPORTS *
#****************************
from Tkinter import *
import tkMessageBox
from socket import*
import threading
import json
from time import sleep
import unicodedata
from pip.cmdoptions import editable
try:
import tkinter.ttk as ttk
except ImportError:
import Tkinter as tk
import ttk
import atexit
import sys
'''
CLASS DECLARATIONS
--------------------------------------
'''
'''
TIME ZONE MANAGER
--------------------------------------
'''
from datetime import datetime,tzinfo,timedelta
class Zone(tzinfo):
def __init__(self,offset,isdst,name):
self.offset = offset
self.isdst = isdst
self.name = name
def utcoffset(self, dt):
return timedelta(hours=self.offset) + self.dst(dt)
def dst(self, dt):
return timedelta(hours=1) if self.isdst else timedelta(0)
def tzname(self,dt):
return self.name
pass #end of zone
#EST = Zone(-5,False,'EST')
#print datetime.utcnow().strftime('%m/%d/%Y %H:%M:%S %Z')
'''
GMT = Zone(8,False,'GMT')
print datetime.now(GMT).strftime('%m/%d/%Y %I:%M:%S %p')
'''
#print datetime.now(EST).strftime('%m/%d/%Y %H:%M:%S %Z')
#t = datetime.strptime('2011-01-21 02:37:21','%Y-%m-%d %H:%M:%S')
#t = t.replace(tzinfo=GMT)
#print t
#print t.astimezone(EST)
'''
MONOCLIENT SERVER CONNECTIVITY
--------------------------------------
'''
class MonoClient():
def __init__(self):
#self.mime_result = {'type':'null','result':'null'}
self.HOST = '127.0.0.1' #'192.168.15.4'
self.PORT = 2224
try:
self.mono_socket = socket(AF_INET, SOCK_STREAM)
self.mono_socket.connect((self.HOST,self.PORT))
except:
self.showInfoMsg("Server Information","Server is unreachable. Please try again.")
print "Server is unreachable"
pass
#self.graph = GUI()
pass #end of construct
def send_request(self,request):
try:
data_json = json.dumps(request, ensure_ascii=False).encode('utf-8')
self.mono_socket.send(data_json)
return self.mono_socket.recv(131072)
except Exception as e:
print e
return "-143" # cannot reach the server
pass # end request
pass # end of class
#****************************
#* MAIN *
#****************************
''' CLASS GUI
---------------------------------------------------------------
'''
class GUI():
def __init__(self):
self.client = MonoClient()
self.broadcast_reciever = threading.Thread(target=self.fetch_broadcast)
self.client_lister = threading.Thread(target=self.fetch_clients)
self.graphics_render = threading.Thread(target=self.showLoginForm)
self.messenger = threading.Thread(target=self.fetch_messages)
self.private_records = []
print "BROADCAST SET"
self.auth_user = "USER"
self.GMT = Zone(8,False,'GMT') # +8 GMT ASIA TAIPEI
print datetime.now(self.GMT).strftime('%m/%d/%Y %I:%M:%S %p')
self.fetch_message_block = 1
# infinite loop all codes below will not be called
#self.showLoginForm()
self.graphics_render.start()
#self.rcv_brod = 0
#self.broadcast_reciever.start()
pass # end init
def showErrorMsg(self,title,message):
window = Tk()
window.wm_withdraw()
window.geometry("3x2+200+200")
tkMessageBox.showerror(title=title,message=message,parent=window)
def showInfoMsg(self,title,msg):
window = Tk()
window.wm_withdraw()
window.geometry("3x2+"+str(window.winfo_screenwidth()/2)+"+"+str(window.winfo_screenheight()/2))
tkMessageBox.showinfo(title=title, message=msg)
def authenticate(self):
request = {}
request['type'] = 'LOGIN'
global text_font
text_font = ('Calibri', '12')
request['username'] = self.txt_user.get()
request['password'] = self.txt_password.get()
data = self.client.send_request(request)
response = json.loads(data)
res = response['result']
if(res == "0"):
self.showErrorMsg("Account Error","Account not found.")
print "Account Not Existing"
elif(res == "-1"):
self.showErrorMsg("Account Error","Incorrect password.")
print "Wrong Password"
elif(res == "2"):
self.showInfoMsg("Account Information","Account is already online. Please use another account.")
print "Account is already online"
elif(res == "3"):
self.showErrorMsg("Account Error","Maximum client reached. Try again later.")
print "MAX CLIENT REACHED"
elif(res == "1"):
self.showInfoMsg("Account Information","Successfully Logged in!")
print "Login Success"
self.auth_user = self.txt_user.get()
self.frm_login.destroy()
self.rcv_brod = 1
self.showMainForm()
else:
self.showErrorMsg("Unknown Error","An error occured. Try again.")
print "An Error Occured"
pass
def register(self):
request = {}
request['type'] = 'REGISTER'
request['username'] = self.reg_username.get()
request['password'] = self.reg_password.get()
data = self.client.send_request(request)
response = json.loads(data)
res = response['result']
if(res == "-1"):
self.showInfoMsg("Account Information","Account already exists.")
print "Account Already Exists"
elif(res == "1"):
self.showInfoMsg("Account Information","Account created.")
print "Account Created"
self.frm_register.destroy()
self.showLoginForm()
else:
self.showErrorMsg("Unknown Error","An error occured. Try again.")
print "An Error Occured"
self.frm_register.destroy()
pass
def verifyPass(self):
username = self.reg_username.get()
passwrd = self.reg_password.get()
reenter = self.reg_confirm.get()
if(username==""):
self.showInfoMsg("Account Information","Please enter your username.")
elif(passwrd==""):
self.showInfoMsg("Account Information","Please enter your password.")
elif(reenter==""):
self.showInfoMsg("Account Information","Please re-enter your password.")
elif(passwrd==reenter):
self.fromRegToLogin()
else:
self.showErrorMsg("Account Error","Password not matched.")
pass
def broadcast(self,event):
msg = self.msgBox.get("1.0",END)
print "msg here: ", msg
bad_words = ['fuck', 'bitch', 'shit', 'damn', 'piss', 'asshole', 'slut', 'tangina', 'puta', 'gago', 'hudas', 'lintik', 'ulol', 'tarantado', 'buwisit',
'burat', 'kupal', 'leche', 'ungas', 'punyeta', 'hinayupak', 'pucha', 'pesteng yawa', 'pakshet', 'tanga']
index=0
ctr=0
while 1:
if(index==len(bad_words)):
break
if(bad_words[index] in msg.lower()):
ctr=1
break
index+=1
if ctr==1:
self.showErrorMsg("Content Error","Please avoid bad or foul words.")
else:
msg_nrm = unicodedata.normalize('NFKD', msg).encode('ascii','ignore').strip()
request = {}
request['type'] = 'BROADCAST'
request['sender'] = self.auth_user
request['content'] = msg_nrm
request['send_date'] = datetime.now(self.GMT).strftime('%m/%d/%Y %I:%M:%S %p')
while(1==1):
try:
data = self.client.send_request(request)
response = json.loads(data)
except:
self.showInfoMsg("Message Information","Retrying to send message.")
print "Retrying to send"
sleep(0.5)
continue
pass
break
try:
if(response['type'] == "BROADCAST"):
print response
self.msgBox.delete("0.0",END)
except Exception as e:
print e
#self.fetch_broadcast()
pass # end of broadcast
def fetch_messages(self):
request = {}
request['type'] = 'FETCH_PRIVATE'
while self.fetch_message_block==1:
# ok
while 1==1:
# ok
try:
data = self.client.send_request(request)
response = json.loads(data)
except Exception as e:
print "Retrieving Messages: ",e
sleep(0.5)
continue
pass
break
pass # end loop
if(response['type'] == "FETCH_PRIVATE"):
#print
try:
self.private_records = []
msg_counter = 0
while(msg_counter<(len(response)-1)):
line = response[str(msg_counter)]
arrange_me = json.loads(line)
msg_counter+=1
self.private_records.append(arrange_me)
pass
#print 'CHATBOX REFRESHED'
except:
self.showErrorMsg("Message Error","Cannot retrieve private messages.")
print "CANNOT RETRIEVED PRIVATE MESSAGES"
break
pass
pass # end of if
sleep(2)
pass # end of infinite loop
pass # end fetch
# this function refreshes the message box
def fetch_broadcast(self):
request = {}
request['type'] = 'FETCH_BROADCAST'
while 1==1:
# ok
sleep(1)
while 1==1:
# ok
try:
data = self.client.send_request(request)
response = json.loads(data)
except Exception as e:
print "Retrieving Messages: ",e
sleep(0.5)
continue
pass
break
pass # end loop
if(response['type'] == "FETCH_BROADCAST"):
#print
msg_counter = 0
public_message_string = ""
#message loop
while(msg_counter<(len(response)-1)):
line = response[str(msg_counter)]
arrange_me = json.loads(line)
msg_counter+=1
public_message_string += (arrange_me['send_date'] +" >>> [ "+arrange_me['sender'] + " ] : " +arrange_me['content'] + "\n")
pass # end of message loop
try:
self.publicList.configure(state='normal')
self.publicList.delete('1.0', END)
self.publicList.insert(END, public_message_string)
self.publicList.see(END)
self.publicList.configure(state='disabled')
#print 'CHATBOX REFRESHED'
except:
self.showErrorMsg("Message Error","Cannot retrieve messages.")
print "CANNOT RETRIEVED MESSAGES"
break
pass
pass # end of if
pass # end of infinite loop
pass # end fetch
def listClick(self,evt):
try:
selected_index = self.clientList.curselection()
select_string = self.clientList.get(selected_index)
st,name = select_string.split("-")
self.showPrivateMsgForm(name.strip())
except:
print "BAD INDEX at 255"
pass
pass
def fetch_clients(self):
request = {}
request['type'] = 'FETCH_CLIENTS'
while 1==1:
# ok
sleep(2)
while 1==1:
# ok
try:
data = self.client.send_request(request)
response = json.loads(data)
except Exception as e:
print "Retrieving CLIENTS: ",e
sleep(0.5)
continue
pass
break
pass # end loop
if(response['type'] == "FETCH_CLIENTS"):
#print
try:
self.publicList.configure(state='disabled')
#-0---------------Populate online client
user_count = (len(response) - 1)
self.clientList.delete(0,END)
x = 0
while(x < user_count):
user_item = response[str(x)]
user_state = "[ " + user_item['state'] + " ] - " + user_item['username']
self.clientList.insert(END,user_state)
x+=1
pass
#---------------------------------------
pass
#print 'CHATBOX REFRESHED'
except:
self.showErrorMsg("Account Error","Cannot retrieve client list.")
print "CANNOT RETRIEVED CLIENT LIST"
break
pass
pass # end of if
pass # end of infinite loop
pass # end fetch
def change_pass(self,old_pass,new_pass):
request = {}
if(old_pass.get()==""):
self.showInfoMsg("Acount Information", "Please enter your old password.")
return 0
elif(new_pass.get() == ""):
self.showInfoMsg("Account Information", "Please enter a valid new password.")
return 0
elif(new_pass.get() != self.change_confirm_pass.get()):
self.showErrorMsg("Account Error", "New password not matched.")
return 0
request['type'] = 'CHANGE_PASS'
request['user'] = self.auth_user
request['old_pass'] = <PASSWORD>()
request['new_pass'] = <PASSWORD>()
data = self.client.send_request(request)
response = json.loads(data)
res = response['result']
if(res=="-1"):
self.showErrorMsg("Acount Error","Old password not matched.")
elif(res=="1"):
self.showInfoMsg("Account Information", "Password sucessfully changed.")
print res
pass
def logout(self):
self.frm_public.destroy()
exit()
pass
def change_profile(self):
self.showInfoMsg("Application Information","No available process.")
def change_font(self, event):
global combo_box
print combo_box.get()
font = combo_box.get()
if(font=="Arial Black"):
self.publicList.configure(height=22)
text_font = (font,'9')
elif(font=="Cambria"):
self.publicList.configure(width=70)
self.publicList.configure(height=25)
text_font = (font,'10')
elif(font=="Arial"):
self.publicList.configure(width=70)
self.publicList.configure(height=25)
text_font = (font,'9')
else:
self.publicList.configure(height=20)
text_font = (font,'12')
print text_font
self.publicList.configure(font=text_font)
self.clientList.configure(font=text_font)
self.msgBox.configure(font=text_font)
def btn_pm(self):
self.showInfoMsg("Application Information","Double click the user you want to send private message.")
#****************************************************
# THEMES
#****************************************************
def theme1(self):
self.frm_public.configure(background='dodgerblue2')
print "theme1"
pass
def theme2(self):
self.frm_public.configure(background='springgreen2')
print "theme2"
pass
def theme3(self):
self.frm_public.configure(background='midnight blue')
print "theme3"
pass
def theme4(self):
self.frm_public.configure(background='dark slate gray')
print "theme4"
pass
def theme5(self):
self.frm_public.configure(background='Coral')
print "theme5"
pass
def default(self):
self.frm_public.configure(background='white smoke')
print "default"
pass
'''
------------------------------------------------------------------------------------------------------
UI MODULE
------------------------------------------------------------------------------------------------------
'''
def showLoginForm(self):
#createWindow("Login", "350x400+100+200")
self.frm_login = Tk()
self.frm_login.geometry("430x430+"+str((430/2)+(430/2))+"+"+str(430/2-70))
self.frm_login.title("Login")
self.frm_login.resizable(width="false", height="false")
#self.frm_login.geometry("430x430+100+200")
lbl1 = Label(self.frm_login, text="Login", width=10, height=3, fg="#1A4AA0", font="Calibri 19")
lbl1.pack(side=TOP)
usernameFrame = Frame(self.frm_login)
usernameFrame.pack()
lbl2 = Label(usernameFrame, text="Username:", width=10, fg="#1A4AA0", font="Calibri 14")
lbl2.pack(side=LEFT)
self.txt_user = Entry(usernameFrame, fg="#1A4AA0", font="Calibri 14")
self.txt_user.pack(side=LEFT)
passFrame = Frame(self.frm_login)
passFrame.pack()
lbl3 = Label(passFrame, text="Password:", width=10, height=3, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT)
self.txt_password = Entry(passFrame, show="*", fg="#1A4AA0", font="Calibri 14")
self.txt_password.pack(side=LEFT)
buttonFrame = Frame(self.frm_login)
buttonFrame.pack(side=RIGHT, padx=25)
btnLogin = Button(buttonFrame, text="Login", height=1, width=12,
command=self.authenticate, fg="#F0F0F0", bg="#2A3540", font="Calibri 14")
btnLogin.pack(pady=5)
btnRegister = Button(buttonFrame, text="Register",
height=1, width=12, command=self.showRegisterForm,
fg="#F0F0F0", bg="#2A3540", font="Calibri 14")
btnRegister.pack(pady=5)
btnSettings = Button(buttonFrame, text="Connection Settings",
height=2, width=17, command=self.showConnectionForm,
fg="#F0F0F0", bg="#2A3540", font="Calibri 10")
#btnSettings.pack(pady=5)
self.frm_login.mainloop()
def showConnectionForm(self):
con_set = | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 13:50:18 2021
Script for computating the annual and seasonal probabilities of the WTs.
It will also calculate the occurence prob. of the WTs in respect to:
1) Southern Annular Mode indeces
2) El-Niño Southern Oscilation indeces
3) Madden Jullian Oscilation phase
@author: danilocoutodsouza
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mticker
from matplotlib.ticker import (MultipleLocator, FixedLocator)
def get_data():
'''
Use Pandas to get data from the csv files.
The first file contains all dates from 1979 to 2010
with all corresponding Wts. The other files contain data
from the climate modes: Southern Oscillation Index (SOI),
Marshall Southern Annular Mode (SAM) index (ttation-based) and
Real-time Multivariate Madden-Julian Oscillation (MJO) series (RMM).
Sources:
SOI: https://www.ncdc.noaa.gov/teleconnections/enso/indicators/soi/
SAM: https://climatedataguide.ucar.edu/climate-data/marshall-southern-annular-mode-sam-index-station-based
RMM: http://www.bom.gov.au/climate/mjo/graphics/rmm.74toRealtime.txt
'''
# WT data
file = '../all_WT_dates2.txt'
data = pd.read_csv(file)
# ENSO data
data_soi= pd.read_csv('../SOI.txt')
# SAM data
data_sam= pd.read_csv('../SAM.txt')
# MJO data
data_mjo = pd.read_csv('../MJO.txt')
# Temporal range of data
years = np.arange(1979,2011)
mons = np.arange(1,13)
# month indeces for SAM dataframe indexing
months = ["JAN","FEB","MAR","APR","MAY","JUN","JUL",
"AUG","SEP","OCT","NOV","DEC"]
# Update the 'data' dataframe to include all climate indeces
for y in years:
for m in mons:
# --- SAM ---
# Get SAM index for respective year and month
tmp_sam = data_sam[data_sam['YEAR'] == y][months[m-1]].values[0]
# Attribute the SAM index to the original dataframe
data.loc[(data['year'] == y) & \
(data['month'] == m), ['SAM']] = tmp_sam
# --- SOI ---
# Get SOI index for respective year and month
tmp_soi = data_soi[(data_soi['Year'] == y) & \
(data_soi['Month'] == m)]['Value'].values[0]
# Attribute the SOI index to the original dataframe
data.loc[(data['year'] == y) & \
(data['month'] == m), ['SOI']] = tmp_soi
# --- Loop through days ----
days = data[(data['year']==y) & \
(data['month']==m)]['day']
# --- MJO ---
# Get MJO phase for respective day
for d in days:
tmp_mjo = data_mjo[(data_mjo['year'] == y) & \
(data_mjo['month'] == m) & \
(data_mjo['day'] == d)]['phase'].values[0]
data.loc[(data['year'] == y) & \
(data['month'] == m) & \
(data['day'] == d), ['MJO']] = tmp_mjo
return data
#------------------------
# ANNUAL PROBABILITY
#------------------------
def counter_total(data,wt):
'''
Counter for a given WT in the whole time series
'''
return len(data[data['WT'] == wt])
def counter_per_year(data,wt,year):
'''
Counter for a given WT in a given year
'''
return len(data[(data['year'] == year) & (data['WT'] == wt)])
def calc_probability_per_year(data,wt,year):
'''
Calculates the occurence probability of a given WT in a given year
'''
ct_wt = counter_per_year(data,wt,year)
ct_tot = len(data[(data['year'] == year)])
return (ct_wt/ct_tot)*100
def make_annual_ts(data,wt):
'''
Make a time series for annual occurence probability of given wt
'''
years = np.arange(1979,2011)
wt_ts = []
for year in years:
wt_ts.append(calc_probability_per_year(data,wt,year))
return wt_ts
#------------------------
# INTERANNUAL PROBABILITY
#------------------------
def calc_interannual_prob(data,wt):
'''
Calculates the occurence probability of a given WT across all years in data
'''
# counter for all days in data
ct_tot = len(data)
# counter for WT occurence within data
wt_ct_all_years = counter_total(data,wt)
return (wt_ct_all_years/ct_tot)*100
#------------------------
# SEASONAL PROBABILITY
#------------------------
def calc_seasonal_prob(data,wt, season):
'''
Calculates the occurence probability of a given WT in DJF months
'''
# months and seasons
DJF = [12,1,2]
MAM = [3,4,5]
JJA = [6,7,8]
SON = [9,10,11]
seasons_dict = {'DJF':DJF,'MAM':MAM,'JJA':JJA,'SON':SON}
# select season for indexing
s = seasons_dict[season]
# counters
ct_wt = len(data[(data['month'].isin(s)) & (data['WT'] == wt)])
ct_tot = len(data[(data['month'].isin(s))])
return (ct_wt/ct_tot)*100
#------------------------
# ENSO
#------------------------
def counter_ENSO(data,wt,phase):
'''
Count how many times a given WT occur in Nino or Nina "events"
'''
if phase == 'Nino':
ct_wt = len(data[(data['SOI'] < -1) & (data['WT'] == wt)])
elif phase == 'Nina':
ct_wt = len(data[(data['SOI'] > 1) & (data['WT'] == wt)])
return ct_wt
def calc_probability_ENSO(data,wt,phase):
'''
Calculates the occurence probability of a given WT for a given ENSO phase
'''
# counter for all days in data matching the given ENSO phase
if phase == 'Nino':
ct_tot = len(data[(data['SOI'] < -1)])
elif phase == 'Nina':
ct_tot = len(data[(data['SOI'] > 1)])
# counter for WT occurence in data matching the given ENSO phase
ct_wt = counter_ENSO(data,wt,phase)
return (ct_wt/ct_tot)*100
#------------------------
# SAM
#------------------------
def counter_SAM(data,wt,phase):
'''
Count how many times a given WT occur in Positive/Negative SAM "events"
'''
if phase == 'Neg':
ct_wt = len(data[(data['SAM'] < -2) & (data['WT'] == wt)])
elif phase == 'Pos':
ct_wt = len(data[(data['SAM'] > 2) & (data['WT'] == wt)])
return ct_wt
def calc_probability_SAM(data,wt,phase):
'''
Calculates the occurence probability of a given WT for a given SAM phase
'''
# counter for all days in data matching the given SAM phase
if phase == 'Neg':
ct_tot = len(data[(data['SAM'] < -2)])
elif phase == 'Pos':
ct_tot = len(data[(data['SAM'] > 2)])
# counter for WT occurence in data matching the given SAM phase
ct_wt = counter_SAM(data,wt,phase)
return (ct_wt/ct_tot)*100
#------------------------
# MJO
#------------------------
def counter_MJO(data,wt,phase):
'''
Count how many times a given WT occur in a given MJO phase
'''
ct_wt = len(data[(data['MJO'] == phase) & (data['WT'] == wt)])
return ct_wt
def calc_probability_MJO(data,wt,phase):
'''
Calculates the occurence probability of a given WT for a given MJO phase
'''
# counter for all days in data matching the given MJO phase
ct_tot = len(data[(data['MJO'] == phase)])
# counter for WT occurence in data matching the given ENSO phase
ct_wt = counter_MJO(data,wt,phase)
return (ct_wt/ct_tot)*100
#------------------------
# PLOTS
#------------------------
col1 = ['#f4f0f0','#e7d5be','#afcfdf','#497fc9','#485fb0']
col2 = ['#ffcdb2','#ffb4a2','#e5989b','#b5838d','#6d6875']
col3 = ['#6a040f','#9d0208','#d00000','#dc2f02','#e85d04', '#f48c06','#faa307']
col4 = ['#6a040f','#f3722c','#f8961e','#f9844a','#f9c74f',
'#90be6d','#43aa8b','#4d908e','#577590','#277da1']
col5 = ['#1a535c','#4ecdc4','#f7fff7','#ff6b6b','#ffe66d']
col6 = ['#d9ed92','#b5e48c','#99d98c','#76c893','#52b69a',
'#34a0a4','#168aad','#1a759f','#1e6091','#184e77']
col7 = ['#f0ead2','#dde5b6','#adc178','#a98467','#6c584c']
cmap = LinearSegmentedColormap.from_list(
'MyMap', col1, N=10)
def annotate_wts(ax):
# Loop over data dimensions and create text annotations.
ct = 1
for j in range(0,6):
for i in range(0,6):
ax.text(j, i, ct,
ha="center", va="center",
color="k", fontsize = 12)
ct += 1
def plot_annual_prob(data,ax,fig):
probs = []
for wt in range(1,37):
probs.append(calc_interannual_prob(data,wt))
probs = np.reshape(probs,(2,18),order='F')
cf1 = ax.imshow(probs, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# colorbar
min_, max_ = np.amin(probs), np.amax(probs)
pos = ax.get_position()
cbar_ax = fig.add_axes([pos.x0, pos.y0-0.025, pos.width, pos.height/4])
cbar = plt.colorbar(cf1, ticks=[min_, max_],
cax=cbar_ax, orientation='horizontal')
cbar.ax.set_xticklabels(['Low', 'High'])
cbar.ax.tick_params(labelsize=14)
# annotate WTs
ct = 1
for j in range(0,18):
for i in range(0,2):
ax.text(j, i, ct,
ha="center", va="center",
color="k", fontsize = 12)
ct += 1
def plot_season_prob(data,season,ax):
probs = []
for wt in range(1,37):
probs.append(calc_seasonal_prob(data,wt, season))
probs = np.reshape(probs,(6,6),order='F')
ax.imshow(probs, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
annotate_wts(ax)
def plot_mjo_prob(data,phase,ax):
probs = []
for wt in range(1,37):
probs.append(calc_probability_MJO(data,wt, phase))
probs = np.reshape(probs,(6,6),order='F')
ax.imshow(probs, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
annotate_wts(ax)
def plot_soi_prob(data,phase,ax):
probs = []
for wt in range(1,37):
probs.append(calc_probability_ENSO(data,wt, phase))
probs = np.reshape(probs,(6,6),order='F')
ax.imshow(probs, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
annotate_wts(ax)
def plot_sam_prob(data,phase,ax):
probs = []
for wt in range(1,37):
probs.append(calc_probability_SAM(data,wt, phase))
probs = np.reshape(probs,(6,6),order='F')
ax.imshow(probs, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
annotate_wts(ax)
# Plot time series of probability
# 1) 5-year smoothed
# 2) All probs
def smooth_ts(ts):
means = []
for i in range(0,len(ts)+1,5):
means.append(np.mean(ts[i:i+5]))
return means
def plot_ts(data,wt,ax):
years = np.arange(1979,2011)
ts = make_annual_ts(data,wt)
ax.plot(years,ts, linewidth=2,linestyle=(0, (5, 1)))
smoothed = smooth_ts(ts)
ax.plot(years[::5],smoothed, linewidth=2, color='k',alpha=0.9)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
ax.xaxis.set_major_locator(FixedLocator(years[1::10]))
ax.xaxis.set_minor_locator(MultipleLocator(5))
plt.xticks(rotation=90)
# ---------------
def main(data):
# data = get_data()
fig = plt.figure(figsize=(19.5,12) , constrained_layout=False)
gs0 = gridspec.GridSpec(1, 2, wspace=0.07,hspace=0)
gs00 = gridspec.GridSpecFromSubplotSpec(5, 4, subplot_spec=gs0[0],
wspace=0,hspace=0.2)
gs01 = gridspec.GridSpecFromSubplotSpec(6, 6, subplot_spec=gs0[1],
wspace=0.05,hspace=0.05)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# === Plot probabilities ===
axs00 = []
# Annual
axs00.append(plt.subplot(gs00[0, :4]))
ax = axs00[-1]
plot_annual_prob(data,ax,fig)
ax.text(0.45,1.07, 'Annual', fontsize = 16, transform=ax.transAxes)
# Seasons
for col,season in zip(range(4),['DJF','MAM','JJA','SON']):
axs00.append(plt.subplot(gs00[1, col]))
ax = axs00[-1]
plot_season_prob(data,season,ax)
ax.text(0.35,1.05, season, fontsize = 16, transform=ax.transAxes)
# MJO
phase = 1
for row in range(2,4):
for col in range(4):
axs00.append(plt.subplot(gs00[row, col]))
ax = axs00[-1]
plot_mjo_prob(data,phase,ax)
ax.text(0.3,1.05, 'MJO '+str(phase), fontsize = 16, transform=ax.transAxes)
phase += 1
# SOI
titles = ['SOI > 1', 'SOI < -1']
for col, phase, title in zip(range(2),['Nina','Nino'], titles):
axs00.append(plt.subplot(gs00[4, col]))
ax = axs00[-1]
plot_soi_prob(data,phase,ax)
ax.text(0.2,1.05, title, fontsize = 16, transform=ax.transAxes)
# SAM
titles = ['SAM > 2', 'SAM < -2']
for col, phase, title in zip(range(2,4),['Pos','Neg'], titles):
axs00.append(plt.subplot(gs00[4, col]))
ax = axs00[-1]
plot_sam_prob(data,phase,ax)
ax.text(0.2,1.05, title, fontsize = 16, transform=ax.transAxes)
# === Time sreies ===
axs01= []
for wt in range(1,37):
axs01.append(plt.subplot(gs01[wt-1]))
ax = | |
dict_lat_long = {
"1000": {"lat": 50.8427501, "lng": 4.3515499},
"1020": {"lat": 50.884218, "lng": 4.3580002},
"1030": {"lat": 50.8625502, "lng": 4.37966},
"1040": {"lat": 50.8335737, "lng": 4.3943477},
"1050": {"lat": 50.8235717, "lng": 4.3766927},
"1060": {"lat": 50.8288755, "lng": 4.3437338},
"1070": {"lat": 50.8238652, "lng": 4.2986584},
"1080": {"lat": 50.8569755, "lng": 4.3193694},
"1081": {"lat": 50.8638742, "lng": 4.3246335},
"1082": {"lat": 50.8652842, "lng": 4.2943085},
"1083": {"lat": 50.8747022, "lng": 4.3102195},
"1090": {"lat": 50.8747739, "lng": 4.3314996},
"1120": {"lat": 50.8936568, "lng": 4.3845296},
"1130": {"lat": 50.8927187, "lng": 4.4094535},
"1140": {"lat": 50.8719806, "lng": 4.4061878},
"1150": {"lat": 50.8286387, "lng": 4.4494347},
"1160": {"lat": 50.8108671, "lng": 4.4372273},
"1170": {"lat": 50.7881573, "lng": 4.4180065},
"1180": {"lat": 50.7904904, "lng": 4.3559193},
"1190": {"lat": 50.8125885, "lng": 4.3218906},
"1200": {"lat": 50.850308, "lng": 4.4278228},
"1210": {"lat": 50.8544587, "lng": 4.3708841},
"1300": {"lat": 50.7179009, "lng": 4.6192995},
"1301": {"lat": 50.7198592, "lng": 4.569574},
"1310": {"lat": 50.7345253, "lng": 4.4594749},
"1315": {"lat": 50.7168011, "lng": 4.7758129},
"1320": {"lat": 50.7798613, "lng": 4.7614181},
"1325": {"lat": 50.6785047, "lng": 4.6941205},
"1330": {"lat": 50.7109736, "lng": 4.5306325},
"1331": {"lat": 50.7346415, "lng": 4.5461847},
"1332": {"lat": 50.722754, "lng": 4.4950562},
"1340": {"lat": 50.6672105, "lng": 4.5859684},
"1341": {"lat": 50.6615232, "lng": 4.532},
"1342": {"lat": 50.6812199, "lng": 4.5601556},
"1348": {"lat": 50.6738567, "lng": 4.6151693},
"1350": {"lat": 50.6877943, "lng": 4.9719204},
"1357": {"lat": 50.7497791, "lng": 4.9829359},
"1360": {"lat": 50.6429684, "lng": 4.8008233},
"1367": {"lat": 50.6496669, "lng": 4.8897781},
"1370": {"lat": 50.7235257, "lng": 4.864936},
"1380": {"lat": 50.687284, "lng": 4.4706637},
"1390": {"lat": 50.7415171, "lng": 4.6796007},
"1400": {"lat": 50.5886574, "lng": 4.3308649},
"1401": {"lat": 50.6137787, "lng": 4.3533583},
"1402": {"lat": 50.5917437, "lng": 4.3871558},
"1404": {"lat": 50.6041731, "lng": 4.261999},
"1410": {"lat": 50.7053833, "lng": 4.4049735},
"1420": {"lat": 50.6969022, "lng": 4.3538379},
"1421": {"lat": 50.6560168, "lng": 4.3350676},
"1428": {"lat": 50.6443017, "lng": 4.3705251},
"1430": {"lat": 50.6793466, "lng": 4.1215403},
"1435": {"lat": 50.6347598, "lng": 4.6180981},
"1440": {"lat": 50.674025, "lng": 4.2695246},
"1450": {"lat": 50.5929852, "lng": 4.6236886},
"1457": {"lat": 50.637198, "lng": 4.6872076},
"1460": {"lat": 50.6326887, "lng": 4.2632352},
"1461": {"lat": 50.6497513, "lng": 4.2937473},
"1470": {"lat": 50.6233954, "lng": 4.5102727},
"1471": {"lat": 50.5977336, "lng": 4.434661},
"1472": {"lat": 50.6222503, "lng": 4.4043555},
"1473": {"lat": 50.6311704, "lng": 4.4430615},
"1474": {"lat": 50.627288, "lng": 4.4766613},
"1476": {"lat": 50.5804921, "lng": 4.4102122},
"1480": {"lat": 50.6767109, "lng": 4.1954896},
"1490": {"lat": 50.6214692, "lng": 4.5597931},
"1495": {"lat": 50.5605218, "lng": 4.5250276},
"1500": {"lat": 50.7324848, "lng": 4.2349129},
"1501": {"lat": 50.7379887, "lng": 4.2611018},
"1502": {"lat": 50.7010009, "lng": 4.2425767},
"1540": {"lat": 50.7233164, "lng": 4.0536565},
"1541": {"lat": 50.6997768, "lng": 3.9822484},
"1547": {"lat": 50.7068251, "lng": 3.9276615},
"1560": {"lat": 50.7660388, "lng": 4.4519958},
"1570": {"lat": 50.7465643, "lng": 3.9827122},
"1600": {"lat": 50.7842441, "lng": 4.2427430},
"1601": {"lat": 50.7882883, "lng": 4.2955941},
"1602": {"lat": 50.8054095, "lng": 4.2334883},
"1620": {"lat": 50.795533, "lng": 4.3079826},
"1630": {"lat": 50.7668322, "lng": 4.3455373},
"1640": {"lat": 50.7530988, "lng": 4.3812856},
"1650": {"lat": 50.7686913, "lng": 4.3120069},
"1651": {"lat": 50.7725332, "lng": 4.2783289},
"1652": {"lat": 50.7473028, "lng": 4.3274016},
"1653": {"lat": 50.7302243, "lng": 4.296757},
"1654": {"lat": 50.7478294, "lng": 4.2761035},
"1670": {"lat": 50.7360287, "lng": 4.1360863},
"1671": {"lat": 50.7812236, "lng": 4.1737326},
"1673": {"lat": 50.7353399, "lng": 4.1796177},
"1674": {"lat": 50.7404048, "lng": 4.1560806},
"1700": {"lat": 50.8572288, "lng": 4.2413289},
"1701": {"lat": 50.8340174, "lng": 4.2346965},
"1702": {"lat": 50.8745835, "lng": 4.2658227},
"1703": {"lat": 50.8378402, "lng": 4.2009828},
"1730": {"lat": 50.9054755, "lng": 4.2048959},
"1731": {"lat": 50.886979, "lng": 4.2833417},
"1740": {"lat": 50.8698353, "lng": 4.1808317},
"1741": {"lat": 50.849539, "lng": 4.165339},
"1742": {"lat": 50.8740836, "lng": 4.1347029},
"1745": {"lat": 50.9636894, "lng": 4.1823601},
"1750": {"lat": 50.8026788, "lng": 4.1645967},
"1755": {"lat": 50.7845808, "lng": 4.0873546},
"1760": {"lat": 50.8314294, "lng": 4.0879776},
"1761": {"lat": 50.8512275, "lng": 4.129167},
"1770": {"lat": 50.8625365, "lng": 4.0925208},
"1780": {"lat": 50.9068107, "lng": 4.3114128},
"1785": {"lat": 50.9451904, "lng": 4.2609401},
"1790": {"lat": 50.9036003, "lng": 4.1109971},
"1800": {"lat": 50.9377435, "lng": 4.4605278},
"1820": {"lat": 50.9205036, "lng": 4.5017642},
"1830": {"lat": 50.9125185, "lng": 4.4376749},
"1831": {"lat": 50.891301, "lng": 4.4468571},
"1840": {"lat": 51.0075124, "lng": 4.2704431},
"1850": {"lat": 50.9356235, "lng": 4.3785596},
"1851": {"lat": 50.9667658, "lng": 4.3834362},
"1852": {"lat": 50.9530611, "lng": 4.3638681},
"1853": {"lat": 50.9083671, "lng": 4.3398332},
"1860": {"lat": 50.9478621, "lng": 4.3302827},
"1861": {"lat": 50.9695502, "lng": 4.3085432},
"1880": {"lat": 50.9993245, "lng": 4.3508231},
"1910": {"lat": 50.9338827, "lng": 4.5605498},
"1930": {"lat": 50.8795024, "lng": 4.4826212},
"1932": {"lat": 50.8705648, "lng": 4.4435639},
"1933": {"lat": 50.8573308, "lng": 4.5167076},
"1950": {"lat": 50.851357, "lng": 4.4687138},
"1970": {"lat": 50.8479299, "lng": 4.4901061},
"1980": {"lat": 50.9802185, "lng": 4.4421349},
"1981": {"lat": 50.9936415, "lng": 4.5009462},
"1982": {"lat": 50.972874, "lng": 4.4976053},
"2000": {"lat": 51.2198771, "lng": 4.4011356},
"2018": {"lat": 51.2037695, "lng": 4.4112637},
"2020": {"lat": 51.1890846, "lng": 4.3836284},
"2030": {"lat": 51.2763963, "lng": 4.3624604},
"2040": {"lat": 51.3418306, "lng": 4.2964605},
"2050": {"lat": 51.2287575, "lng": 4.3740221},
"2060": {"lat": 51.2269388, "lng": 4.4276298},
"2070": {"lat": 51.2202012, "lng": 4.3220449},
"2100": {"lat": 51.2145255, "lng": 4.4731932},
"2110": {"lat": 51.2334311, "lng": 4.5270315},
"2140": {"lat": 51.2138078, "lng": 4.4439088},
"2150": {"lat": 51.1940393, "lng": 4.4866237},
"2160": {"lat": 51.2021676, "lng": 4.5219648},
"2170": {"lat": 51.2472392, "lng": 4.4403455},
"2180": {"lat": 51.2817354, "lng": 4.4299536},
"2200": {"lat": 51.1741777, "lng": 4.8290413},
"2220": {"lat": 51.0633847, "lng": 4.7210601},
"2221": {"lat": 51.0460108, "lng": 4.7625115},
"2222": {"lat": 51.1083726, "lng": 4.7731528},
"2223": {"lat": 51.0277811, "lng": 4.6960515},
"2230": {"lat": 51.0462622, "lng": 4.8832192},
"2235": {"lat": 51.0647242, "lng": 4.816508},
"2240": {"lat": 51.2021793, "lng": 4.6554212},
"2242": {"lat": 51.2264531, "lng": 4.7040702},
"2243": {"lat": 51.1957105, "lng": 4.68616},
"2250": {"lat": 51.1666078, "lng": 4.885151},
"2260": {"lat": 51.1086092, "lng": 4.8941289},
"2270": {"lat": 51.1356598, "lng": 4.7460081},
"2275": {"lat": 51.2574943, "lng": 4.8434786},
"2280": {"lat": 51.1832616, "lng": 4.7349959},
"2288": {"lat": 51.162442, "lng": 4.7314667},
"2290": {"lat": 51.2189405, "lng": 4.7601857},
"2300": {"lat": 51.3377191, "lng": 4.9344756},
"2310": {"lat": 51.3550139, "lng": 4.7706425},
"2320": {"lat": 51.403297, "lng": 4.7468218},
"2321": {"lat": 51.4516115, "lng": 4.7229316},
"2322": {"lat": 51.444058, "lng": 4.7794791},
"2323": {"lat": 51.4006985, "lng": 4.8105746},
"2328": {"lat": 51.4694102, "lng": 4.8031381},
"2330": {"lat": 51.361787, "lng": 4.861625},
"2340": {"lat": 51.3211024, "lng": 4.8288945},
"2350": {"lat": 51.3030836, "lng": 4.8834494},
"2360": {"lat": 51.3136124, "lng": 5.0073017},
"2370": {"lat": 51.3421481, "lng": 5.0768229},
"2380": {"lat": 51.3762015, "lng": 5.0185568},
"2381": {"lat": 51.4190551, "lng": 5.0004736},
"2382": {"lat": 51.4586402, "lng": 5.0592852},
"2387": {"lat": 51.4046963, "lng": 4.9044992},
"2390": {"lat": 51.3043858, "lng": 4.7236195},
"2400": {"lat": 51.2213789, "lng": 5.1962097},
"2430": {"lat": 51.0873823, "lng": 5.0242977},
"2431": {"lat": 51.0687686, "lng": 4.9699406},
"2440": {"lat": 51.1776625, "lng": 4.9957571},
"2450": {"lat": 51.121838, "lng": 5.0752598},
"2460": {"lat": 51.2324099, "lng": 4.9414896},
"2470": {"lat": 51.2686279, "lng": 5.0763087},
"2480": {"lat": 51.2455742, "lng": 5.1235191},
"2490": {"lat": 51.1578305, "lng": 5.2100666},
"2491": {"lat": 51.1338157, "lng": 5.160699},
"2500": {"lat": 51.1208918, "lng": 4.5911588},
"2520": {"lat": 51.1942577, "lng": 4.5905266},
"2530": {"lat": 51.1664397, "lng": 4.4971700},
"2531": {"lat": 51.1781205, "lng": 4.5339282},
"2540": {"lat": 51.1488217, "lng": 4.4785055},
"2547": {"lat": 51.1270385, "lng": 4.5003232},
"2550": {"lat": 51.1267179, "lng": 4.4401844},
"2560": {"lat": 51.1595224, "lng": 4.6737338},
"2570": {"lat": 51.097092, "lng": 4.5302842},
"2580": {"lat": 51.0565154, "lng": 4.6312078},
"2590": {"lat": 51.1025498, "lng": 4.6578677},
"2600": {"lat": 51.1923982, "lng": 4.4347991},
"2610": {"lat": 51.1652924, "lng": 4.3893798},
"2620": {"lat": 51.1463366, "lng": 4.3358751},
"2627": {"lat": 51.1250483, "lng": 4.3451903},
"2630": {"lat": 51.1340539, "lng": 4.3844742},
"2640": {"lat": 51.1728522, "lng": 4.4665225},
"2650": {"lat": 51.152982, "lng": 4.4380338},
"2660": {"lat": 51.1771171, "lng": 4.3532966},
"2800": {"lat": 51.0208132, "lng": 4.4737954},
"2801": {"lat": 51.0494135, "lng": 4.4090115},
"2811": {"lat": 51.0236637, "lng": 4.3986574},
"2812": {"lat": 51.013932, "lng": 4.5168262},
"2820": {"lat": 51.0169318, "lng": 4.5741123},
"2830": {"lat": 51.0463255, "lng": 4.3518478},
"2840": {"lat": 51.0910115, "lng": 4.4155779},
"2845": {"lat": 51.1071825, "lng": 4.3329557},
"2850": {"lat": 51.0924256, "lng": 4.3779474},
"2860": {"lat": 51.0614235, "lng": 4.5055555},
"2861": {"lat": 51.0639394, "lng": 4.5754857},
"2870": {"lat": 51.068976, "lng": 4.3050065},
"2880": {"lat": 51.092523, "lng": 4.2331087},
"2890": {"lat": 51.0558846, "lng": 4.2338458},
"2900": {"lat": 51.2706562, "lng": 4.5139892},
"2910": {"lat": 51.4406708, "lng": 4.464856},
"2920": {"lat": 51.398852, "lng": 4.4581628},
"2930": {"lat": 51.3133626, "lng": 4.4954295},
"2940": {"lat": 51.339028, "lng": 4.3722637},
"2950": {"lat": 51.3361728, "lng": 4.4481538},
"2960": {"lat": 51.3193688, "lng": 4.6111527},
"2970": {"lat": 51.2468546, "lng": 4.5864545},
"2980": {"lat": 51.2636928, "lng": 4.6912075},
"2990": {"lat": 51.4049021, "lng": 4.5741285},
"3000": {"lat": 50.8815197, "lng": 4.6967578},
"3001": {"lat": 50.8612986, "lng": 4.680773},
"3010": {"lat": 50.890366, "lng": 4.7360777},
"3012": {"lat": 50.912617, "lng": 4.7019807},
"3018": {"lat": 50.9338612, "lng": 4.6929276},
"3020": {"lat": 50.9091893, "lng": 4.6572688},
"3040": {"lat": 50.7858734, "lng": 4.611704},
"3050": {"lat": 50.8282606, "lng": 4.659534},
"3051": {"lat": 50.8048632, "lng": 4.6484154},
"3052": {"lat": 50.8247884, "lng": 4.7106019},
"3053": {"lat": 50.8085156, "lng": 4.6970956},
"3054": {"lat": 50.8261908, "lng": 4.6902508},
"3060": {"lat": 50.8632924, "lng": 4.6307637},
"3061": {"lat": 50.8440508, "lng": 4.5898822},
"3070": {"lat": 50.8855654, "lng": 4.5374772},
"3071": {"lat": 50.8968496, "lng": 4.5733837},
"3078": {"lat": 50.8761164, "lng": 4.5699968},
"3080": {"lat": 50.8257675, "lng": 4.5241553},
"3090": {"lat": 50.7681029, "lng": 4.5336216},
"3110": {"lat": 50.9535967, "lng": 4.7215113},
"3111": {"lat": 50.9515561, "lng": 4.7717425},
"3118": {"lat": 50.9748563, "lng": 4.7124551},
"3120": {"lat": 50.9956159, "lng": 4.7159551},
"3128": {"lat": 51.0049803, "lng": 4.7428591},
"3130": {"lat": 51.0034426, "lng": 4.7806054},
"3140": {"lat": | |
and self.times is not None:
_dict['times'] = self.times.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CpuHealthStats object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CpuHealthStats') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CpuHealthStats') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CpuHealthStatsTimes():
"""
CpuHealthStatsTimes.
:attr float idle: (optional) ms CPU is in idle.
:attr float irq: (optional) ms CPU is in irq.
:attr float nice: (optional) ms CPU is in nice.
:attr float sys: (optional) ms CPU is in sys.
:attr float user: (optional) ms CPU is in user.
"""
def __init__(self,
*,
idle: float = None,
irq: float = None,
nice: float = None,
sys: float = None,
user: float = None) -> None:
"""
Initialize a CpuHealthStatsTimes object.
:param float idle: (optional) ms CPU is in idle.
:param float irq: (optional) ms CPU is in irq.
:param float nice: (optional) ms CPU is in nice.
:param float sys: (optional) ms CPU is in sys.
:param float user: (optional) ms CPU is in user.
"""
self.idle = idle
self.irq = irq
self.nice = nice
self.sys = sys
self.user = user
@classmethod
def from_dict(cls, _dict: Dict) -> 'CpuHealthStatsTimes':
"""Initialize a CpuHealthStatsTimes object from a json dictionary."""
args = {}
if 'idle' in _dict:
args['idle'] = _dict.get('idle')
if 'irq' in _dict:
args['irq'] = _dict.get('irq')
if 'nice' in _dict:
args['nice'] = _dict.get('nice')
if 'sys' in _dict:
args['sys'] = _dict.get('sys')
if 'user' in _dict:
args['user'] = _dict.get('user')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CpuHealthStatsTimes object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'idle') and self.idle is not None:
_dict['idle'] = self.idle
if hasattr(self, 'irq') and self.irq is not None:
_dict['irq'] = self.irq
if hasattr(self, 'nice') and self.nice is not None:
_dict['nice'] = self.nice
if hasattr(self, 'sys') and self.sys is not None:
_dict['sys'] = self.sys
if hasattr(self, 'user') and self.user is not None:
_dict['user'] = self.user
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CpuHealthStatsTimes object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CpuHealthStatsTimes') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CpuHealthStatsTimes') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateCaBodyConfigOverride():
"""
Set `config_override` to create the root/initial enroll id and enroll secret as well
as enabling custom CA configurations (such as using postgres). See the [Fabric CA
configuration
file](https://hyperledger-fabric-ca.readthedocs.io/en/release-1.4/serverconfig.html)
for more information about each parameter.
The field `tlsca` is optional. The IBP console will copy the value of
`config_override.ca` into `config_override.tlsca` if `config_override.tlsca` is
omitted (which is recommended).
*The nested field **names** below are not case-sensitive.*.
:attr ConfigCACreate ca:
:attr ConfigCACreate tlsca: (optional)
"""
def __init__(self,
ca: 'ConfigCACreate',
*,
tlsca: 'ConfigCACreate' = None) -> None:
"""
Initialize a CreateCaBodyConfigOverride object.
:param ConfigCACreate ca:
:param ConfigCACreate tlsca: (optional)
"""
self.ca = ca
self.tlsca = tlsca
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateCaBodyConfigOverride':
"""Initialize a CreateCaBodyConfigOverride object from a json dictionary."""
args = {}
if 'ca' in _dict:
args['ca'] = ConfigCACreate.from_dict(_dict.get('ca'))
else:
raise ValueError('Required property \'ca\' not present in CreateCaBodyConfigOverride JSON')
if 'tlsca' in _dict:
args['tlsca'] = ConfigCACreate.from_dict(_dict.get('tlsca'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateCaBodyConfigOverride object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'ca') and self.ca is not None:
_dict['ca'] = self.ca.to_dict()
if hasattr(self, 'tlsca') and self.tlsca is not None:
_dict['tlsca'] = self.tlsca.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateCaBodyConfigOverride object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateCaBodyConfigOverride') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateCaBodyConfigOverride') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateCaBodyResources():
"""
CPU and memory properties. This feature is not available if using a free Kubernetes
cluster.
:attr ResourceObject ca: This field requires the use of Fabric v1.4.* and
higher.
"""
def __init__(self,
ca: 'ResourceObject') -> None:
"""
Initialize a CreateCaBodyResources object.
:param ResourceObject ca: This field requires the use of Fabric v1.4.* and
higher.
"""
self.ca = ca
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateCaBodyResources':
"""Initialize a CreateCaBodyResources object from a json dictionary."""
args = {}
if 'ca' in _dict:
args['ca'] = ResourceObject.from_dict(_dict.get('ca'))
else:
raise ValueError('Required property \'ca\' not present in CreateCaBodyResources JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateCaBodyResources object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'ca') and self.ca is not None:
_dict['ca'] = self.ca.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateCaBodyResources object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateCaBodyResources') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateCaBodyResources') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateCaBodyStorage():
"""
Disk space properties. This feature is not available if using a free Kubernetes
cluster.
:attr StorageObject ca:
"""
def __init__(self,
ca: 'StorageObject') -> None:
"""
Initialize a CreateCaBodyStorage object.
:param StorageObject ca:
"""
self.ca = ca
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateCaBodyStorage':
"""Initialize a CreateCaBodyStorage object from a json dictionary."""
args = {}
if 'ca' in _dict:
args['ca'] = StorageObject.from_dict(_dict.get('ca'))
else:
raise ValueError('Required property \'ca\' not present in CreateCaBodyStorage JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateCaBodyStorage object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'ca') and self.ca is not None:
_dict['ca'] = self.ca.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateCaBodyStorage object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateCaBodyStorage') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateCaBodyStorage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateOrdererRaftBodyResources():
"""
CPU and memory properties. This feature is not available if using a free Kubernetes
cluster.
:attr ResourceObject orderer: This field requires the use of Fabric v1.4.* and
higher.
:attr ResourceObject proxy: (optional) This field requires the use of Fabric
v1.4.* and higher.
"""
def __init__(self,
orderer: 'ResourceObject',
*,
proxy: 'ResourceObject' = None) -> None:
"""
Initialize a CreateOrdererRaftBodyResources object.
:param ResourceObject orderer: This field requires the use of Fabric v1.4.*
and higher.
:param ResourceObject proxy: (optional) This field requires the use of
Fabric v1.4.* and higher.
"""
self.orderer = orderer
self.proxy = proxy
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateOrdererRaftBodyResources':
"""Initialize a CreateOrdererRaftBodyResources object from a json dictionary."""
args = {}
if 'orderer' in _dict:
args['orderer'] = ResourceObject.from_dict(_dict.get('orderer'))
else:
raise ValueError('Required property \'orderer\' not present in CreateOrdererRaftBodyResources JSON')
if 'proxy' in _dict:
args['proxy'] = ResourceObject.from_dict(_dict.get('proxy'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateOrdererRaftBodyResources object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = | |
<filename>rle/tests/test_decode.py
"""Tests for decoding RLE data."""
from copy import deepcopy
from struct import pack
import numpy as np
import pytest
try:
from pydicom import dcmread
from pydicom.encaps import generate_pixel_data_frame
from pydicom.pixel_data_handlers.rle_handler import (
_parse_rle_header, _rle_decode_frame, _rle_decode_segment
)
from pydicom.pixel_data_handlers.util import (
pixel_dtype, reshape_pixel_array
)
from pydicom.uid import RLELossless
HAVE_PYDICOM = True
except ImportError:
HAVE_PYDICOM = False
from rle.data import get_indexed_datasets
from rle._rle import decode_segment, decode_frame, parse_header
from rle.utils import generate_frames, pixel_array
INDEX = get_indexed_datasets('1.2.840.10008.1.2.5')
HEADER_DATA = [
# (Number of segments, offsets)
(0, []),
(1, [64]),
(2, [64, 16]),
(8, [64, 16, 31, 55, 62, 110, 142, 551]),
(14, [64, 16, 31, 55, 62, 110, 142, 551, 641, 456, 43, 11, 6, 55]),
(15, [64, 16, 31, 55, 62, 110, 142, 551, 641, 456, 43, 11, 6, 55, 9821]),
]
class TestParseHeader:
"""Tests for rle._rle.parse_header()."""
def test_invalid_header_length(self):
"""Test exception raised if header is not 64 bytes long."""
msg = r"The RLE header must be 64 bytes long"
for length in [0, 1, 63, 65]:
with pytest.raises(ValueError, match=msg):
parse_header(b'\x00' * length)
@pytest.mark.parametrize('nr_segments, offsets', HEADER_DATA)
def test_parse_header(self, nr_segments, offsets):
"""Test parsing header data."""
# Encode the header
header = bytearray()
header.extend(pack('<L', nr_segments))
header.extend(pack(f'<{len(offsets)}L', *offsets))
# Add padding
header.extend(b'\x00' * (64 - len(header)))
offsets += [0] * (15 - len(offsets))
assert len(header) == 64
assert offsets == parse_header(bytes(header))
class TestDecodeFrame:
"""Tests for rle._rle.decode_frame()."""
def as_bytes(self, offsets):
d = [len(offsets)] + offsets
d += [0] * (16 - len(d))
return pack("<16l", *d)
def test_bits_allocated_zero_raises(self):
"""Test exception raised for BitsAllocated 0."""
msg = (
r"The \(0028,0100\) 'Bits Allocated' value must be 8, 16, 32 or 64"
)
with pytest.raises(ValueError, match=msg):
decode_frame(b'\x00\x00\x00\x00', 1, 0, '<')
def test_bits_allocated_not_octal_raises(self):
"""Test exception raised for BitsAllocated not a multiple of 8."""
msg = (
r"The \(0028,0100\) 'Bits Allocated' value must be 8, 16, 32 or 64"
)
with pytest.raises(ValueError, match=msg):
decode_frame(b'\x00\x00\x00\x00', 1, 12, '<')
def test_bits_allocated_large_raises(self):
"""Test exception raised for BitsAllocated greater than 64."""
msg = (
r"The \(0028,0100\) 'Bits Allocated' value must be 8, 16, 32 or 64"
)
with pytest.raises(ValueError, match=msg):
decode_frame(b'\x00\x00\x00\x00', 1, 72, '<')
def test_insufficient_data_for_header_raises(self):
"""Test exception raised if insufficient data."""
msg = r"Frame is not long enough to contain RLE encoded data"
with pytest.raises(ValueError, match=msg):
decode_frame(b'\x00\x00\x00\x00', 1, 8, '<')
def test_no_data_raises(self):
"""Test exception raised if no data."""
msg = r"Frame is not long enough to contain RLE encoded data"
with pytest.raises(ValueError, match=msg):
decode_frame(b'', 1, 8, '<')
def test_invalid_first_offset_raises(self):
"""Test exception if invalid first offset."""
msg = r"Invalid segment offset found in the RLE header"
d = self.as_bytes([0])
with pytest.raises(ValueError, match=msg):
decode_frame(d, 1, 8, '<')
def test_insufficient_data_for_offsets_raises(self):
"""Test exception if invalid first offset."""
msg = r"Invalid segment offset found in the RLE header"
# Offset 64 with length 64
d = self.as_bytes([64])
with pytest.raises(ValueError, match=msg):
decode_frame(d, 1, 8, '<')
def test_non_increasing_offsets_raises(self):
"""Test exception if offsets not in increasing order."""
msg = r"Invalid segment offset found in the RLE header"
d = self.as_bytes([64, 70, 68])
with pytest.raises(ValueError, match=msg):
decode_frame(d, 1, 8, '<')
def test_invalid_samples_px_raises(self):
"""Test exception if samples per px not 1 or 3."""
msg = r"The \(0028,0002\) 'Samples per Pixel' must be 1 or 3"
d = self.as_bytes([64, 70])
with pytest.raises(ValueError, match=msg):
decode_frame(d + b'\x00' * 8, 1, 8, '<')
def test_insufficient_frame_literal(self):
"""Test segment with excess padding on lit."""
d = self.as_bytes([64])
assert decode_frame(d + b'\x00' * 8, 1, 8, '<') == b"\x00"
def test_insufficient_frame_copy(self):
"""Test segment withe excess padding on copy."""
d = self.as_bytes([64])
assert decode_frame(d + b'\xff\x00\x00', 1, 8, '<') == b"\x00"
def test_insufficient_segment_copy_raises(self):
"""Test exception if insufficient segment data on copy."""
msg = (
r"The end of the data was reached before the segment was "
r"completely decoded"
)
d = self.as_bytes([64])
with pytest.raises(ValueError, match=msg):
decode_frame(d + b'\xff', 8, 8, '<')
def test_insufficient_segment_literal_raises(self):
"""Test exception if insufficient segment data on literal."""
msg = (
r"The end of the data was reached before the segment was "
r"completely decoded"
)
d = self.as_bytes([64])
with pytest.raises(ValueError, match=msg):
decode_frame(d + b'\x0a' * 8, 12, 8, '<')
def test_invalid_byteorder_raises(self):
"""Test exception if invalid byteorder."""
header = (
b'\x01\x00\x00\x00'
b'\x40\x00\x00\x00'
)
header += (64 - len(header)) * b'\x00'
# 2 x 3 data
# 0, 64, 128, 160, 192, 255
data = b'\x05\x00\x40\x80\xA0\xC0\xFF'
# Ok with u8
decode_frame(header + data, 2 * 3, 8, '=')
msg = r"'byteorder' must be '>' or '<'"
with pytest.raises(ValueError, match=msg):
decode_frame(header + data, 1 * 3, 16, '=')
def test_decoded_segment_length_short(self):
"""Test exception if decoded segment length invalid."""
msg = r"The decoded segment length does not match the expected length"
d = self.as_bytes([64])
with pytest.raises(ValueError, match=msg):
decode_frame(d + b'\x00' * 8, 12, 8, '<')
def test_decoded_segment_length_long(self):
"""Test exception if decoded segment length invalid."""
msg = r"The decoded segment length does not match the expected length"
d = self.as_bytes([64, 72])
with pytest.raises(ValueError, match=msg):
decode_frame(d + b'\x00' * 20, 8, 16, '<')
def test_u8_1s(self):
"""Test decoding 8-bit, 1 sample/pixel."""
header = (
b'\x01\x00\x00\x00'
b'\x40\x00\x00\x00'
)
header += (64 - len(header)) * b'\x00'
# 2 x 3 data
# 0, 64, 128, 160, 192, 255
data = b'\x05\x00\x40\x80\xA0\xC0\xFF'
# Big endian
decoded = decode_frame(header + data, 2 * 3, 8, '>')
arr = np.frombuffer(decoded, np.dtype('uint8'))
assert [0, 64, 128, 160, 192, 255] == arr.tolist()
# Little-endian
decoded = decode_frame(header + data, 2 * 3, 8, '<')
arr = np.frombuffer(decoded, np.dtype('uint8'))
assert [0, 64, 128, 160, 192, 255] == arr.tolist()
def test_u8_3s(self):
"""Test decoding 8-bit, 3 sample/pixel."""
header = (
b'\x03\x00\x00\x00' # 3 segments
b'\x40\x00\x00\x00' # 64
b'\x47\x00\x00\x00' # 71
b'\x4E\x00\x00\x00' # 78
)
header += (64 - len(header)) * b'\x00'
# 2 x 3 data
# 0, 64, 128, 160, 192, 255
data = (
b'\x05\x00\x40\x80\xA0\xC0\xFF' # R
b'\x05\xFF\xC0\x80\x40\x00\xFF' # B
b'\x05\x01\x40\x80\xA0\xC0\xFE' # G
)
decoded = decode_frame(header + data, 2 * 3, 8, '<')
arr = np.frombuffer(decoded, np.dtype('uint8'))
# Ordered all R, all G, all B
assert [0, 64, 128, 160, 192, 255] == arr[:6].tolist()
assert [255, 192, 128, 64, 0, 255] == arr[6:12].tolist()
assert [1, 64, 128, 160, 192, 254] == arr[12:].tolist()
def test_u16_1s(self):
"""Test decoding 16-bit, 1 sample/pixel."""
header = (
b'\x02\x00\x00\x00'
b'\x40\x00\x00\x00'
b'\x47\x00\x00\x00'
)
header += (64 - len(header)) * b'\x00'
# 2 x 3 data
data = (
# 0, 1, 256, 255, 65280, 65535
b'\x05\x00\x00\x01\x00\xFF\xFF' # MSB
b'\x05\x00\x01\x00\xFF\x00\xFF' # LSB
)
# Big-endian output
decoded = decode_frame(header + data, 2 * 3, 16, '>')
arr = np.frombuffer(decoded, np.dtype('>u2'))
assert [0, 1, 256, 255, 65280, 65535] == arr.tolist()
# Little-endian output
decoded = decode_frame(header + data, 2 * 3, 16, '<')
arr = np.frombuffer(decoded, np.dtype('<u2'))
assert [0, 1, 256, 255, 65280, 65535] == arr.tolist()
def test_u16_3s(self):
"""Test decoding 16-bit, 3 sample/pixel."""
header = (
b'\x06\x00\x00\x00' # 6 segments
b'\x40\x00\x00\x00' # 64
b'\x47\x00\x00\x00' # 71
b'\x4E\x00\x00\x00' # 78
b'\x55\x00\x00\x00' # 85
b'\x5C\x00\x00\x00' # 92
b'\x63\x00\x00\x00' # 99
)
header += (64 - len(header)) * b'\x00'
# 2 x 3 data
data = (
# 0, 1, 256, 255, 65280, 65535
b'\x05\x00\x00\x01\x00\xFF\xFF' # MSB
b'\x05\x00\x01\x00\xFF\x00\xFF' # LSB
b'\x05\xFF\x00\x01\x00\xFF\x00' # MSB
b'\x05\xFF\x01\x00\xFF\x00\x00' # LSB
b'\x05\x00\x00\x01\x00\xFF\xFF' # MSB
b'\x05\x01\x01\x00\xFF\x00\xFE' # LSB
)
# Big-endian output
decoded = decode_frame(header + data, 2 * 3, 16, '>')
arr = np.frombuffer(decoded, np.dtype('>u2'))
assert [0, 1, 256, 255, 65280, 65535] == arr[:6].tolist()
assert [65535, 1, 256, 255, 65280, 0] == arr[6:12].tolist()
assert [1, 1, 256, 255, 65280, 65534] == arr[12:].tolist()
# Little-endian output
decoded = decode_frame(header + data, 2 * 3, 16, '<')
arr = np.frombuffer(decoded, np.dtype('<u2'))
assert [0, 1, 256, 255, 65280, 65535] == arr[:6].tolist()
assert [65535, 1, 256, 255, 65280, 0] == arr[6:12].tolist()
assert [1, 1, 256, 255, 65280, 65534] == arr[12:].tolist()
def test_u32_1s(self):
"""Test decoding 32-bit, 1 sample/pixel."""
header = (
b'\x04\x00\x00\x00' # 4 segments
b'\x40\x00\x00\x00' # 64 offset
b'\x47\x00\x00\x00' # 71 offset
b'\x4E\x00\x00\x00' # 78 offset
b'\x55\x00\x00\x00' # 85 offset
)
header += (64 - len(header)) * b'\x00'
# 2 x 3 data
data = (
# 0, 16777216, 65536, 256, 4294967295
b'\x05\x00\x01\x00\x00\x00\xFF' # MSB
b'\x05\x00\x00\x01\x00\x00\xFF'
b'\x05\x00\x00\x00\x01\x00\xFF'
b'\x05\x00\x00\x00\x00\x01\xFF' # LSB
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import io
import subprocess
import argparse
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import datetime
import requests
import hashlib
import socket
import shutil
import errno
import json
import xml.etree.cElementTree as cElementTree
import funannotate.library as lib
def calcmd5(file):
md5local = None
with open(file, 'rb') as infile:
data = infile.read()
md5local = hashlib.md5(data).hexdigest()
return md5local
def calcmd5remote(url, max_file_size=100*1024*1024):
remote = urlopen(url)
hash = hashlib.md5()
total_read = 0
while True:
data = remote.read(4096)
total_read += 4096
if not data or total_read > max_file_size:
break
hash.update(data)
return hash.hexdigest()
def check4newDB(name, infoDB):
# check remote md5 with stored in database
if '-' in name:
checkname = name.split('-')[0]
else:
checkname = name
if not checkname in infoDB:
lib.log.error("%s not found in database" % name)
return True
else:
oldmd5 = infoDB[checkname][5]
newmd5 = calcmd5remote(DBURL.get(name))
lib.log.debug("%s database, Old md5: %s; New md5: %s" %
(name, oldmd5, newmd5))
if oldmd5 == newmd5:
lib.log.info("%s database is current." % name)
return False
else:
lib.log.info("%s database is out of date, updating." % name)
return True
def download(url, name, wget=False):
if wget:
# download with wget
cmd = ['wget', '-O', name, '--no-check-certificate', '-t', '2', '-c', url]
subprocess.call(cmd)
else:
file_name = name
try:
u = urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = 0
for x in meta.items():
if x[0].lower() == 'content-length':
file_size = int(x[1])
lib.log.info("Downloading: {0} Bytes: {1}".format(url, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
p = float(file_size_dl) / file_size
status = r"{0} [{1:.2%}]".format(file_size_dl, p)
status = status + chr(8)*(len(status)+1)
sys.stdout.write(status)
sys.stdout.flush()
f.close()
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
pass
def download_busco_v5(dest, taxa=['Eukaryota'], wget=False):
baseurl = 'https://busco-data.ezlab.org/v5/data'
filetsv = '{}/file_versions.tsv'.format(baseurl)
infodir = os.path.join(dest, 'information')
placedir = os.path.join(dest, 'placement_files')
lineagedir = os.path.join(dest, 'lineages')
for d in [dest, infodir, placedir, lineagedir]:
if not os.path.isdir(d):
os.makedirs(d)
# get the file_versions tsv file to parse
localtsv = os.path.join(dest, os.path.basename(filetsv))
download(filetsv, localtsv, wget=wget)
localinfo = os.path.join(infodir, 'lineages_list.2019-11-27.txt')
files2download = []
if not os.path.isfile(localinfo):
files2download.append('{}/information/lineages_list.2019-11-27.txt'.format(baseurl))
# now parse the tsv file and build list of files to download
if not os.path.isfile(localtsv):
lib.log.error('Unable to download {}/file_version.tsv'.format(baseurl))
else:
with open(localtsv, 'r') as infile:
for line in infile:
line = line.rstrip()
odb, odb_date, sha, taxon, filetype = line.split('\t')
if taxon in taxa:
if filetype == 'placement_files':
localurl = os.path.join(placedir, filetype, '{}.{}.txt'.format(odb.split('.txt')[0], odb_date))
def wget(url, name):
# download with wget
cmd = ['wget', '-O', name, '-t', '2', '-c', url]
subprocess.call(cmd)
def meropsDB(info, force=False, args={}):
fasta = os.path.join(FUNDB, 'merops_scan.lib')
filtered = os.path.join(FUNDB, 'merops.formatted.fa')
database = os.path.join(FUNDB, 'merops.dmnd')
if os.path.isfile(fasta) and args.update and not force:
if check4newDB('merops', info):
force = True
if not os.path.isfile(fasta) or force:
lib.log.info('Downloading Merops database')
for x in [fasta, filtered, database]:
if os.path.isfile(x):
os.remove(x)
if args.wget:
wget(DBURL.get('merops'), fasta)
else:
download(DBURL.get('merops'), fasta)
md5 = calcmd5(fasta)
# reformat fasta headers
with open(filtered, 'w') as filtout:
with io.open(fasta, encoding="utf8", errors='ignore') as infile:
for line in infile:
if line.startswith('>'):
line = line.rstrip()
ID = line.split()[0]
family = line.split('#')[1]
filtout.write('{:} {:}\n'.format(ID, family))
else:
filtout.write(line)
lib.log.info('Building diamond database')
cmd = ['diamond', 'makedb', '--in',
'merops.formatted.fa', '--db', 'merops']
lib.runSubprocess(cmd, os.path.join(FUNDB), lib.log)
num_records = lib.countfasta(filtered)
info['merops'] = ('diamond', database, '12.0',
'2017-10-04', num_records, md5)
type, name, version, date, records, checksum = info.get('merops')
lib.log.info('MEROPS Database: version={:} date={:} records={:,}'.format(
version, date, records))
def uniprotDB(info, force=False, args={}):
'''
download swissprot/uniprot database, format for diamond, and output date of database
'''
fasta = os.path.join(FUNDB, 'uniprot_sprot.fasta')
database = os.path.join(FUNDB, 'uniprot.dmnd')
versionfile = os.path.join(FUNDB, 'uniprot.release-date.txt')
if os.path.isfile(fasta) and args.update and not force:
if check4newDB('uniprot-release', info):
force = True
if not os.path.isfile(fasta) or force:
lib.log.info('Downloading UniProtKB/SwissProt database')
for x in [fasta, fasta+'.gz', versionfile, database]:
if os.path.isfile(x):
os.remove(x)
if args.wget:
wget(DBURL.get('uniprot'), fasta+'.gz')
else:
download(DBURL.get('uniprot'), fasta+'.gz')
subprocess.call(
['gunzip', '-f', 'uniprot_sprot.fasta.gz'], cwd=os.path.join(FUNDB))
if args.wget:
wget(DBURL.get('uniprot-release'), versionfile)
else:
download(DBURL.get('uniprot-release'), versionfile)
md5 = calcmd5(versionfile)
unidate = ''
univers = ''
with io.open(versionfile, encoding="utf8", errors='ignore') as infile:
for line in infile:
if line.startswith('UniProtKB/Swiss-Prot Release'):
rest, datepart = line.split(' of ')
unidate = datetime.datetime.strptime(
datepart.rstrip(), "%d-%b-%Y").strftime("%Y-%m-%d")
univers = rest.split(' ')[-1]
lib.log.info('Building diamond database')
cmd = ['diamond', 'makedb', '--in',
'uniprot_sprot.fasta', '--db', 'uniprot']
lib.runSubprocess(cmd, os.path.join(FUNDB), lib.log)
num_records = lib.countfasta(
os.path.join(FUNDB, 'uniprot_sprot.fasta'))
info['uniprot'] = ('diamond', database, univers,
unidate, num_records, md5)
type, name, version, date, records, checksum = info.get('uniprot')
lib.log.info('UniProtKB Database: version={:} date={:} records={:,}'.format(
version, date, records))
def dbCANDB(info, force=False, args={}):
hmm = os.path.join(FUNDB, 'dbCAN.hmm')
familyinfo = os.path.join(FUNDB, 'dbCAN-fam-HMMs.txt')
versionfile = os.path.join(FUNDB, 'dbCAN.changelog.txt')
if os.path.isfile(hmm) and args.update and not force:
if check4newDB('dbCAN', info):
force = True
if not os.path.isfile(hmm) or force:
lib.log.info('Downloading dbCAN database')
for x in [os.path.join(FUNDB, 'dbCAN.tmp'), hmm, familyinfo, versionfile]:
if os.path.isfile(x):
os.remove(x)
if args.wget:
wget(DBURL.get('dbCAN'), os.path.join(FUNDB, 'dbCAN.tmp'))
wget(DBURL.get('dbCAN-tsv'), familyinfo)
wget(DBURL.get('dbCAN-log'), versionfile)
else:
download(DBURL.get('dbCAN'),
os.path.join(FUNDB, 'dbCAN.tmp'))
download(DBURL.get('dbCAN-tsv'), familyinfo)
download(DBURL.get('dbCAN-log'), versionfile)
md5 = calcmd5(os.path.join(FUNDB, 'dbCAN.tmp'))
num_records = 0
dbdate = ''
dbvers = ''
with open(hmm, 'w') as out:
with io.open(os.path.join(FUNDB, 'dbCAN.tmp'), encoding="utf8", errors='ignore') as input:
for line in input:
if line.startswith('NAME'):
num_records += 1
line = line.replace('.hmm\n', '\n')
out.write(line)
with io.open(versionfile, encoding="utf8", errors='ignore') as infile:
head = [next(infile) for x in range(2)]
dbdate = head[1].replace('# ', '').rstrip()
dbvers = head[0].split(' ')[-1].rstrip()
dbdate = datetime.datetime.strptime(
dbdate, "%m/%d/%Y").strftime("%Y-%m-%d")
lib.log.info('Creating dbCAN HMM database')
cmd = ['hmmpress', '-f', 'dbCAN.hmm']
lib.runSubprocess(cmd, os.path.join(FUNDB), lib.log)
info['dbCAN'] = ('hmmer3', hmm, dbvers, dbdate, num_records, md5)
os.remove(os.path.join(FUNDB, 'dbCAN.tmp'))
type, name, version, date, records, checksum = info.get('dbCAN')
lib.log.info('dbCAN Database: version={:} date={:} records={:,}'.format(
version, date, records))
def pfamDB(info, force=False, args={}):
hmm = os.path.join(FUNDB, 'Pfam-A.hmm')
familyinfo = os.path.join(FUNDB, 'Pfam-A.clans.tsv')
versionfile = os.path.join(FUNDB, 'Pfam.version')
if os.path.isfile(hmm) and args.update and not force:
if check4newDB('pfam-log', info):
force = True
if not os.path.isfile(hmm) or force:
for x in [hmm, hmm+'.gz', familyinfo, familyinfo+'.gz', versionfile, versionfile+'.gz']:
if os.path.isfile(x):
os.remove(x)
lib.log.info('Downloading Pfam database')
if args.wget:
wget(DBURL.get('pfam'), hmm+'.gz')
wget(DBURL.get('pfam-tsv'), familyinfo+'.gz')
wget(DBURL.get('pfam-log'), versionfile+'.gz')
else:
download(DBURL.get('pfam'), hmm+'.gz')
download(DBURL.get('pfam-tsv'), familyinfo+'.gz')
download(DBURL.get('pfam-log'), versionfile+'.gz')
subprocess.call(['gunzip', '-f', 'Pfam-A.hmm.gz'],
cwd=os.path.join(FUNDB))
subprocess.call(['gunzip', '-f', 'Pfam-A.clans.tsv.gz'],
cwd=os.path.join(FUNDB))
md5 = calcmd5(versionfile+'.gz')
subprocess.call(['gunzip', '-f', 'Pfam.version.gz'],
cwd=os.path.join(FUNDB))
num_records = 0
pfamdate = ''
pfamvers = ''
with io.open(versionfile, encoding="utf8", errors='ignore') as input:
for line in input:
if line.startswith('Pfam release'):
pfamvers = line.split(': ')[-1].rstrip()
if line.startswith('Pfam-A families'):
num_records = int(line.split(': ')[-1].rstrip())
if line.startswith('Date'):
pfamdate = line.split(': ')[-1].rstrip()
lib.log.info('Creating Pfam HMM database')
cmd = ['hmmpress', '-f', 'Pfam-A.hmm']
lib.runSubprocess(cmd, os.path.join(FUNDB), lib.log)
info['pfam'] = ('hmmer3', hmm, pfamvers, pfamdate, num_records, md5)
type, name, version, date, records, checksum = info.get('pfam')
lib.log.info('Pfam Database: version={:} date={:} records={:,}'.format(
version, date, records))
def repeatDB(info, force=False, args={}):
fasta = os.path.join(FUNDB, 'funannotate.repeat.proteins.fa')
filtered = os.path.join(FUNDB, 'funannotate.repeats.reformat.fa')
database = os.path.join(FUNDB, 'repeats.dmnd')
if os.path.isfile(fasta) and args.update and not force:
if check4newDB('repeats', info):
force = True
if not os.path.isfile(fasta) or force:
lib.log.info('Downloading Repeat database')
for x in [fasta, fasta+'.tar.gz', filtered, database]:
if os.path.isfile(x):
os.remove(x)
if args.wget:
wget(DBURL.get('repeats'), fasta+'.tar.gz')
else:
download(DBURL.get('repeats'), fasta+'.tar.gz')
md5 = calcmd5(fasta+'.tar.gz')
subprocess.call(
['tar', '-zxf', 'funannotate.repeat.proteins.fa.tar.gz'], cwd=os.path.join(FUNDB))
with open(filtered, 'w') as out:
with io.open(fasta, encoding="utf8", errors='ignore') as infile:
for line in infile:
# this repeat fasta file has messed up headers....
if line.startswith('>'):
line = line.replace('#', '_')
line = line.replace('/', '-')
line = line.replace('&', '')
out.write(line)
lib.log.info('Building diamond database')
cmd = ['diamond', 'makedb', '--in', 'funannotate.repeats.reformat.fa',
'--db', 'repeats', '-parse_seqids']
lib.runSubprocess(cmd, os.path.join(FUNDB), lib.log)
num_records = lib.countfasta(filtered)
info['repeats'] = ('diamond', database, '1.0', today, num_records, md5)
type, name, version, date, records, checksum = info.get('repeats')
lib.log.info('Repeat Database: version={:} date={:} records={:,}'.format(
version, date, records))
def outgroupsDB(info, force=False, args={}):
OutGroups = os.path.join(FUNDB, 'outgroups')
if os.path.isdir(OutGroups) and args.update and not force:
if check4newDB('outgroups', info):
force = True
if not os.path.isdir(OutGroups) or force:
lib.log.info('Downloading pre-computed BUSCO outgroups')
if os.path.isdir(os.path.join(FUNDB, 'outgroups')):
shutil.rmtree(os.path.join(FUNDB, 'outgroups'))
if args.wget:
wget(DBURL.get('outgroups'),
os.path.join(FUNDB, 'busco_outgroups.tar.gz'))
else:
download(DBURL.get('outgroups'),
os.path.join(FUNDB, 'busco_outgroups.tar.gz'))
md5 = calcmd5(os.path.join(FUNDB, 'busco_outgroups.tar.gz'))
subprocess.call(['tar', '-zxf', 'busco_outgroups.tar.gz'],
cwd=os.path.join(FUNDB))
num_records = len([name for name in os.listdir(
OutGroups) if os.path.isfile(os.path.join(OutGroups, name))])
info['busco_outgroups'] = (
'outgroups', OutGroups, '1.0', today, num_records, md5)
type, name, version, date, records, checksum = info.get('busco_outgroups')
lib.log.info('BUSCO outgroups: version={:} date={:} records={:,}'.format(
version, date, records))
def goDB(info, force=False, args={}):
goOBO = os.path.join(FUNDB, 'go.obo')
if os.path.isfile(goOBO) and args.update and not force:
if check4newDB('go-obo', info):
force = True
if not os.path.isfile(goOBO) or force:
lib.log.info('Downloading GO Ontology database')
for x in [goOBO]:
if os.path.isfile(x):
os.remove(x)
if args.wget:
wget(DBURL.get('go-obo'), goOBO)
else:
download(DBURL.get('go-obo'), goOBO)
md5 = calcmd5(goOBO)
num_records = 0
version = ''
with io.open(goOBO, | |
<reponame>PhanterJR/phanterpwa<gh_stars>1-10
# -*- coding: utf-8 -*-
from ..helpers import (
DIV,
LABEL,
INPUT,
I,
SPAN,
TEXTAREA,
SELECT,
OPTION,
CONCATENATE,
UL,
LI,
A,
P
)
from ..xmlconstructor import XmlConstructor
class MaterializeInputText(XmlConstructor):
def __init__(self, inputname, label, id_input=None, default=None, error=None, **attributes):
if not default:
default = ""
else:
default = str(default)
if not error:
error = ""
else:
error = str(error)
if not id_input:
id_input = "input-%s" % inputname
attributes["_phanterpwa-materialize-input_name"] = inputname
attributes["_phanterpwa-materialize-input_id"] = id_input
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-input-text"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-input-text"
self.input = INPUT(
_name=inputname,
_id=id_input,
_class="form-control",
_value=default,
_type="text"
)
new_content = [
DIV(I("check", _class="material-icons"),
_id="phanterpwa-materialize-input-check-%s" % inputname,
_class="phanterpwa-materialize-input-check"),
DIV(
self.input,
LABEL(
label,
_for=id_input),
_class="input-field"),
DIV(error,
_title=error,
_id="phanterpwa-materialize-input-error-%s" % inputname,
_class="phanterpwa-materialize-input-error%s" % (" actived" if error != "" else " deactived")),
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
def disable(self):
self.input['_disabled'] = ""
class MaterializeChips(XmlConstructor):
def __init__(self, inputname, label, id_input=None, default="[]", error=None, **attributes):
if not default:
default = ""
else:
default = str(default)
if not error:
error = ""
else:
error = str(error)
if not id_input:
id_input = "input-%s" % inputname
attributes["_phanterpwa-materialize-input_name"] = inputname
attributes["_phanterpwa-materialize-input_id"] = id_input
if "_id" not in attributes:
attributes['_id'] = "phanterpwa-materialize-chips-jquery_plugin-%s" % inputname
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-input-chips"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-input-chips"
new_content = [
DIV(I("check", _class="material-icons"),
_id="phanterpwa-materialize-input-check-%s" % inputname,
_class="phanterpwa-materialize-input-check"),
DIV(
DIV(
_id="phanterpwa-materialize-input-chips_%s" % inputname,
_class="phanterpwa-materialize-input-chips"),
INPUT(
_name=inputname,
_id=id_input,
_class="form-control",
_value=default,
_type="hidden"),
LABEL(
label,
_for=id_input),
_class="input-field"),
DIV(error,
_title=error,
_id="phanterpwa-materialize-input-error-%s" % inputname,
_class="phanterpwa-materialize-input-error%s" % (" actived" if error != "" else " deactived")),
DIV(_id="phanterpwa-materialize-input-chips-options-%s" % inputname, _class="phanterpwa-materialize-input-chips-options")
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
class MaterializeSelectWithHideInput(XmlConstructor):
def __init__(self, inputname, label, id_input=None, default=None, error=None, options=None, **attributes):
if not default:
default = ""
else:
default = str(default)
if not error:
error = ""
else:
error = str(error)
if not id_input:
id_input = "input-%s" % inputname
attributes["_phanterpwa-materialize-input_name"] = inputname
self.general_id = id_input
self.general_name = inputname
self.id_button_add_new = "button-%s" % id_input
self.button_add_new = None
attributes["_phanterpwa-materialize-input_id"] = id_input
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-input-text"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-input-text"
if options is None:
options = []
elif isinstance(options, (list, tuple)):
for x in options:
if not isinstance(x, (OPTION)):
raise TypeError("elements of options must is OPTION instance")
elif isinstance(options, (dict)):
new_options = [OPTION("Escolha uma opção", _value=" ", _disabled="", _selected="")]
for x in options:
new_options.append(OPTION(options[x], _value="%s" % x))
options = new_options
else:
raise TypeError("options must is tuple, list or None")
self._html_select = SELECT(
*options,
_id="select-%s" % id_input,
_class="phanterpwaformselect-withhiddeninput",
_target_input=id_input)
self._html_select_label = LABEL(label, _for="select-%s" % id_input)
self._html_select_concatenate = CONCATENATE(
self._html_select,
self._html_select_label
)
self._html_check = DIV(
I("check", _class="material-icons"),
_id="phanterpwa-materialize-input-check-%s" % inputname,
_class="phanterpwa-materialize-input-check actived-select")
attr_btn_switch = {}
attr_btn_switch["_target-switch"] = "switch-input-select-container-%s" % self.general_id
attr_btn_switch["_target-check"] = "phanterpwa-materialize-input-check-%s" % self.general_name
new_content = [
self._html_check,
DIV(
DIV(
INPUT(
_name=inputname,
_id=id_input,
_class="form-control",
_value=default,
_type="text"),
LABEL(
label,
_for=id_input),
DIV(
I("details", _class="material-icons"),
_class="phanterpwa-materilize-button-show-hidde-input-new doinput waves-effect waves-teal btn link",
**attr_btn_switch),
_id="switch-input-%s" % id_input,
_class="input-field switch-input"),
DIV(
self._html_select_concatenate,
_id="switch-select-%s" % id_input,
_class="input-field switch-select"),
_id="switch-input-select-container-%s" % id_input,
_class="switch-input-select-container actived-select"),
DIV(error,
_title=error,
_id="phanterpwa-materialize-input-error-%s" % inputname,
_class="phanterpwa-materialize-input-error%s" % (" actived" if error != "" else " deactived")),
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
def addOptionInSelect(self, value, label_value, **attributes):
if "_value" in attributes:
del attributes['_value']
option = OPTION(label_value, _value=value, **attributes)
self._html_select.content.append(option)
def setButtonNew(self, label=None, **attributes):
if label is None:
label = I("add", _class="material-icons")
attributes['_id'] = self.id_button_add_new
attributes["_target-switch"] = "switch-input-select-container-%s" % self.general_id
attributes["_target-check"] = "phanterpwa-materialize-input-check-%s" % self.general_name
class_button = "phanterpwa-materilize-button-show-hidde-input-new waves-effect waves-teal btn link"
self._html_check.attributes["_class"] = " ".join(["hasbutton", self._html_check.attributes["_class"]])
if "_class" in attributes:
attributes["_class"] = " ".join([attributes["_class"], class_button])
else:
attributes["_class"] = class_button
self._html_select_concatenate.content = [
DIV(
DIV(
self._html_select,
self._html_select_label,
_class="phanterpwa-materialize-selectwithinput"),
DIV(
DIV(label, **attributes),
_class="phanterpwa-materialize-selectwithinput-button"),
_class="phanterpwa-materialize-select-and-button-container"
)
]
class MaterializeInputTextMultiline(XmlConstructor):
def __init__(self, inputname, label, id_input=None, default=None, error=None, **attributes):
if not default:
default = ""
else:
default = str(default)
if not error:
error = ""
else:
error = str(error)
if not id_input:
id_input = "input-%s" % inputname
attributes["_phanterpwa-materialize-input_name"] = inputname
attributes["_phanterpwa-materialize-input_id"] = id_input
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-input-text"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-input-text"
new_content = [
DIV(I("check", _class="material-icons"),
_id="phanterpwa-materialize-input-check-%s" % inputname,
_class="phanterpwa-materialize-input-check"),
DIV(
TEXTAREA(
_name=inputname,
_id=id_input,
_class="form-control materialize-textarea",
_value=default,
_type="text"),
LABEL(
label,
_for=id_input),
_class="input-field"),
DIV(error,
_title=error,
_id="phanterpwa-materialize-input-error-%s" % inputname,
_class="phanterpwa-materialize-input-error%s" % (" actived" if error != "" else " deactived")),
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
class MaterializeInputHidden(XmlConstructor):
def __init__(self, inputname, label, id_input=None, default=None, error=None, **attributes):
if not default:
default = ""
else:
default = str(default)
if not error:
error = ""
else:
error = str(error)
if not id_input:
id_input = "input-%s" % inputname
attributes["_phanterpwa-materialize-input_name"] = inputname
attributes["_phanterpwa-materialize-input_id"] = id_input
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-input-hidden"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-input-hidden"
new_content = [
DIV(I("check", _class="material-icons"),
_id="phanterpwa-materialize-input-check-%s" % inputname,
_class="phanterpwa-materialize-input-check"),
DIV(
INPUT(
_name=inputname,
_id=id_input,
_class="form-control",
_value=default,
_type="hidden"),
LABEL(
label,
_for=id_input),
_class="input-field"),
DIV(error,
_title=error,
_id="phanterpwa-materialize-input-error-%s" % inputname,
_class="phanterpwa-materialize-input-error%s" % (" actived" if error != "" else " deactived")),
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
class MaterializeInputPassword(XmlConstructor):
def __init__(self, inputname, label, id_input=None, default=None, error="", **attributes):
if not default:
default = ""
else:
default = str(default)
if not error:
error = ""
else:
error = str(error)
if not id_input:
id_input = "input-%s" % inputname
attributes["_phanterpwa-materialize-input_name"] = inputname
attributes["_phanterpwa-materialize-input_id"] = id_input
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-input-password"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-input-password"
new_content = [
DIV(I("check", _class="material-icons"),
_id="phanterpwa-materialize-input-check-%s" % inputname,
_class="phanterpwa-materialize-input-check"),
DIV(
INPUT(
_name=inputname,
_id=id_input,
_class="form-control",
_value=default,
_type="password"),
LABEL(
label,
_for=id_input),
_class="input-field"),
DIV(error,
_title=error,
_id="phanterpwa-materialize-input-error-%s" % inputname,
_class="phanterpwa-materialize-input-error%s" % (" actived" if error != "" else " deactived")),
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
class MaterializeInputCheckBox(XmlConstructor):
def __init__(self, inputname, label, id_input=None, disabled=False, checked=False, filledIn=False, **attributes):
if not id_input:
id_input = "input-%s" % inputname
attributes["_phanterpwa-materialize-input_name"] = inputname
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-input-checkbox"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-input-checkbox"
new_content = [
DIV(
P(
LABEL(
INPUT(
_name=inputname,
_id=id_input,
_class="form-control%s" % (" filled-in" if filledIn else ""),
_checked="checked" if checked else None,
_disabled="disabled" if disabled else None,
_type="checkbox"),
SPAN(label),
_for=id_input
)
),
_class="input-field-checkbox"),
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
class MaterializeButtonForm(XmlConstructor):
def __init__(self, _id, label, **attributes):
self.label = label
initial_class = "phanterpwa-materialize-button-form-container"
attributes["_id"] = _id
self.button_attributes = attributes
if "_class" in self.button_attributes:
self.button_attributes["_class"] = " ".join([
self.button_attributes['_class'].strip(),
"btn phanterpwa-materialize-button-form link"])
else:
self.button_attributes["_class"] = "btn phanterpwa-materialize-button-form link"
if "_title" not in self.button_attributes:
if isinstance(self.label, str):
self.button_attributes["_title"] = self.label
XmlConstructor.__init__(self, 'div', False, _class=initial_class)
self._update_content()
def _update_content(self):
attributes = self.button_attributes
self.content = [
DIV(
DIV(self.label, **attributes),
_class="button-form")
]
class MaterializeSearchBar(XmlConstructor):
def __init__(
self,
inputname,
fields_to_select=None,
label="Pesquisar",
field_select_label="Campo de pesquisa",
**attributes
):
self.field_select = field_select_label
self.inputname = inputname
id_input = "phanterpwa-materialize-search_bar-input-%s" % inputname
if "_class" in attributes:
new_class = " ".join([attributes["_class"].strip(), "phanterpwa-materialize-search_bar"])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-search_bar"
self.select_search_container = CONCATENATE()
self.select_search = None
self.input_search = DIV(
DIV(
INPUT(
_name=inputname,
_id=id_input,
_class="form-control",
_type="text"),
LABEL(
label,
_for=id_input),
_class="input-field"),
DIV(
I("search", _class="material-icons"),
_source_search=id_input,
_source_select="phanterpwa-materialize-select-search-%s" % inputname,
_id="phanterpwa-materialize-search_bar-button-%s" % inputname,
_class="waves-effect waves-teal btn link materialize-search_bar-button"),
_id='materialize-search_bar-input-and-button-%s' % inputname,
_class='materialize-search_bar-input-and-button'
)
new_content = [
CONCATENATE(
DIV(
self.input_search,
self.select_search_container,
_class="row")
)
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
def addOptionInSelect(self, value, label_value, **attributes):
if not self.select_search_container.content:
self.input_search.attributes["_class"] = " ".join([
"has_select col s12 m8 l8",
self.input_search.attributes["_class"]])
self.select_search = SELECT(
_id="phanterpwa-materialize-select-search-%s" % self.inputname,
_class="phanterpwa-materialize-select-search")
select_container = DIV(
self.select_search,
LABEL(self.field_select, _for="phanterpwa-materialize-select-search-%s" % self.inputname),
_class="input-field")
self.select_search_container.content = [
DIV(
select_container,
_class="phanterpwa-materialize-search_bar-select col s12 m4 l4")
]
self.select_search.append(OPTION(label_value, _value=value, **attributes))
def showSelect(self):
if not self.select_search_container.content and self.select_search is None:
self.input_search["_class"] = " ".join([
"has_select col s12 m8 l8",
self.input_search.attributes["_class"]])
self.select_search = SELECT(
_id="phanterpwa-materialize-select-search-%s" % self.inputname,
_class="phanterpwa-materialize-select-search")
select_container = DIV(
self.select_search,
LABEL(self.field_select, _for="phanterpwa-materialize-select-search-%s" % self.inputname),
_class="input-field")
self.select_search_container.content = [
DIV(
select_container,
_class="phanterpwa-materialize-search_bar-select col s12 m4 l4")
]
class MaterializeFloatButton(XmlConstructor):
def __init__(self, icon_name="build", **attributes):
if "_class" in attributes:
new_class = " ".join([
attributes["_class"].strip(),
"phanterpwa-materialize-floating-action-button"
])
attributes["_class"] = new_class
else:
attributes["_class"] = "phanterpwa-materialize-floating-action-button"
self.buttons_floating = UL()
self.principal_icon = CONCATENATE(
A(
I(icon_name, _class="large material-icons"),
_class="btn-floating btn-large blue waves-effect waves-light")
)
new_content = [
DIV(
self.principal_icon,
self.buttons_floating,
_class="fixed-action-btn click-to-toggle")
]
XmlConstructor.__init__(self, 'div', False, *new_content, **attributes)
def addMaterializeActionButton(self, material_icon_name, color="blue", **attributes):
if '_class' in attributes:
attributes['_class'] = " ".join(["btn-floating %s" % color, attributes['_class']])
else:
| |
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import os
class NoPyfits(object):
pass
fitsio = pyfits = None
try:
import fitsio
pyfits = NoPyfits()
except:
try:
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
except:
pyfits = NoPyfits()
import numpy as np
def pyfits_writeto(p, filename, **kwargs):
'''
*p*: HDUList object
*filename*: uh, the filename to write to
'''
# old pyfits versions (eg the one in Ubuntu 10.04)
# fail when used with python2.7 (warning.showwarning changed)
# so work-around pyfits printing a warning when it overwrites an
# existing file.
if os.path.exists(filename):
os.remove(filename)
p.writeto(filename, **kwargs)
def merge_tables(TT, columns=None):
assert(len(TT) > 0)
if columns in [None, 'minimal', 'fillzero']:
cols = set(TT[0].get_columns())
types = {}
if columns == 'fillzero':
for c in cols:
types[c] = TT[0].get(c).dtype
#print('col', c, 'is', types[c])
for T in TT[1:]:
if columns == 'minimal':
if len(cols.symmetric_difference(T.get_columns())):
cols = cols.intersection(T.get_columns())
continue
if columns == 'fillzero':
newcols = set(T.get_columns()) - cols
for c in newcols:
# Assume numpy arrays
types[c] = T.get(c).dtype
#print('col', c, 'is', types[c])
cols = cols.union(T.get_columns())
continue
# They must have the same set of columns
if len(cols.symmetric_difference(T.get_columns())):
print('Tables to merge must have the same set of columns.')
print('First table columns:', cols)
print('Target table columns:', T.get_columns())
print('Difference:', cols.symmetric_difference(T.get_columns()))
assert(len(cols.symmetric_difference(T.get_columns())) == 0)
cols = list(cols)
# Reorder the columns to match their order in TT[0].
ocols = []
for c in TT[0].get_columns():
if c in cols and not c in ocols:
ocols.append(c)
# (for fillzero) -- add the rest of the columns (not in TT[0])
for c in cols:
if not c in ocols:
ocols.append(c)
cols = ocols
else:
for i,T in enumerate(TT):
# ensure they all have the requested columns
if not set(columns).issubset(set(T.get_columns())):
print('Each table to be merged must have the requested columns')
print('Table', i, 'is missing columns:', set(columns)-set(T.get_columns()))
print('columns', columns)
print('T.columns', T.get_columns())
assert(False)
cols = columns
N = sum([len(T) for T in TT])
td = tabledata()
for col in cols:
if col.startswith('_'):
continue
if columns == 'fillzero':
vv = []
# Handle NxD(xEx...) arrays: find first extant array, record its shape
v0 = None
for T in TT:
if col in T.get_columns():
v0 = T.get(col)
break
shape = v0.shape[1:]
for T in TT:
if col in T.get_columns():
vv.append(T.get(col))
else:
vv.append(np.zeros((len(T),)+shape, types[col]))
V = np.concatenate(vv)
td.set(col, V)
continue
v0 = TT[0].getcolumn(col)
if isinstance(v0, np.ndarray):
V = np.concatenate([T.getcolumn(col) for T in TT])
elif type(v0) is list:
V = v0
for T in TT[1:]:
V.extend(T.getcolumn(col))
elif np.isscalar(v0):
#print('merge_tables: copying scalar from first table:', col, '=', v0)
V = v0
else:
raise RuntimeError("pyfits_utils.merge_tables: Don't know how to concatenate type: %s" % str(type(v0)))
td.set(col, V)
#td._columns = cols
assert(td._length == N)
return td
def add_nonstructural_headers(fromhdr, tohdr):
for card in fromhdr.cards:
if ((card.keyword in ['SIMPLE','XTENSION', 'BITPIX', 'END', 'PCOUNT', 'GCOUNT',
'TFIELDS',]) or
card.keyword.startswith('NAXIS') or
card.keyword.startswith('TTYPE') or
card.keyword.startswith('TFORM')):
#card.key.startswith('TUNIT') or
#card.key.startswith('TDISP')):
#print('skipping card', card.key)
continue
cl = tohdr
if 'END' in cl.keys():
i = cl.index_of('END')
else:
i = len(cl)
cl.insert(i, pyfits.Card(card.keyword, card.value, card.comment))
def cut_array(val, I, name=None, to=None):
if type(I) is slice:
if to is None:
return val[I]
else:
val[I] = to
return
if isinstance(val, (np.ndarray, np.core.defchararray.chararray)):
# You can't slice a two-dimensional, length-zero, numpy array,
# with an empty array.
if len(val) == 0:
return val
if to is None:
# Indexing an array with an empty index array works, but
# ONLY if it is of integer or bool type.
# Check for __len__ because "I" can be a plain int too.
if hasattr(I, '__len__') and len(I) == 0:
return np.array([], val.dtype)
return val[I]
else:
val[I] = to
return
inttypes = [int, np.int64, np.int32, np.int]
if type(val) in [list,tuple] and type(I) in inttypes:
if to is None:
return val[I]
else:
val[I] = to
return
# HACK -- emulate numpy's boolean and int array slicing
# (when "val" is a normal python list)
if type(I) is np.ndarray and hasattr(I, 'dtype') and ((I.dtype.type in [bool, np.bool])
or (I.dtype == bool)):
try:
if to is None:
return [val[i] for i,b in enumerate(I) if b]
else:
for i,(b,t) in enumerate(zip(I,to)):
if b:
val[i] = t
return
except:
print('Failed to slice field', name)
#setattr(rtn, name, val)
#continue
if type(I) is np.ndarray and all(I.astype(int) == I):
if to is None:
return [val[i] for i in I]
else:
#[val[i] = t for i,t in zip(I,to)]
for i,t in zip(I,to):
val[i] = t
if (np.isscalar(I) and hasattr(I, 'dtype') and
I.dtype in inttypes):
if to is None:
return val[int(I)]
else:
val[int(I)] = to
return
if hasattr(I, '__len__') and len(I) == 0:
return []
print('Error slicing array:')
print('array is')
print(' type:', type(val))
print(' ', val)
print('cut is')
print(' type:', type(I))
print(' ', I)
raise Exception('Error in cut_array')
class tabledata(object):
class td_iter(object):
def __init__(self, td):
self.td = td
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.td):
raise StopIteration
X = self.td[self.i]
self.i += 1
return X
# py3
__next__ = next
def __init__(self, header=None):
self._length = 0
self._header = header
self._columns = []
def __str__(self):
return 'tabledata object with %i rows and %i columns' % (len(self), len([k for k in self.__dict__.keys() if not k.startswith('_')]))
def __repr__(self):
if len(self) == 1:
vals = []
for k in self.columns():
v = self.get(k)
if (not np.isscalar(v)) and len(v) == 1:
v = v[0]
vals.append(v)
return '<tabledata object with %i rows and %i columns: %s>' % (
len(self), len(self.columns()), ', '.join(['%s=%s' % (k,v) for k,v in zip(self.columns(), vals)]))
return '<tabledata object with %i rows and %i columns: %s>' % (
len(self), len(self.columns()), ', '.join(self.columns()))
def about(self):
keys = [k for k in self.__dict__.keys() if not k.startswith('_')]
print('tabledata object with %i rows and %i columns:' % (len(self), len(keys)))
keys.sort()
for k in keys:
print(' ', k, end=' ')
v = self.get(k)
print('(%s)' % (str(type(v))), end=' ')
if np.isscalar(v):
print(v, end=' ')
elif hasattr(v, 'shape'):
print('shape', v.shape, end=' ')
elif hasattr(v, '__len__'):
print('length', len(v), end=' ')
else:
print(v, end=' ')
if hasattr(v, 'dtype'):
print('dtype', v.dtype, end='')
print()
def __setattr__(self, name, val):
object.__setattr__(self, name, val)
#print('set', name, 'to', val)
if (self._length == 0) and (not (name.startswith('_'))) and hasattr(val, '__len__') and len(val) != 0 and type(val) != str:
self._length = len(val)
if hasattr(self, '_columns') and not name in self._columns:
self._columns.append(name)
def set(self, name, val):
self.__setattr__(name, val)
def getcolumn(self, name):
try:
return self.__dict__[name]
except KeyError:
# try case-insensitive
for k,v in self.__dict__.items():
if k.lower() == name.lower():
return v
raise
#except:
# return self.__dict__[name.lower()]
def get(self, name):
return self.getcolumn(name)
# Returns the list of column names, as they were ordered in the input FITS or text table.
def get_columns(self, internal=False):
if internal:
return self._columns[:]
return [x for x in self._columns if not x.startswith('_')]
# Returns the original FITS header.
def get_header(self):
return self._header
def to_dict(self):
return dict([(k,self.get(k)) for k in self.columns()])
def to_np_arrays(self):
for col in self.get_columns():
self.set(col, np.array(self.get(col)))
def columns(self):
return [k for k in self.__dict__.keys() if not k.startswith('_')]
def __len__(self):
return self._length
def delete_column(self, c):
del self.__dict__[c]
self._columns.remove(c)
def rename(self, c_old, c_new):
setattr(self, c_new, getattr(self, c_old))
self.delete_column(c_old)
def __setitem__(self, I, O):
#### TEST
for name,val in self.__dict__.items():
if name.startswith('_'):
continue
cut_array(val, I, name, to=O.get(name))
return
####
if type(I) is slice:
print('I:', I)
# HACK... "[:]" -> slice(None, None, None)
if I.start is None and I.stop is None and I.step is None:
I = np.arange(len(self))
else:
I = np.arange(I.start, I.stop, I.step)
for name,val in self.__dict__.items():
if name.startswith('_'):
continue
# ?
if np.isscalar(val):
self.set(name, O.get(name))
continue
try:
val[I] = O.get(name)
except Exception:
# HACK -- emulate numpy's boolean and int array slicing...
ok = False
if not ok:
print('Error in slicing an astrometry.util.pyfits_utils.table_data object:')
import pdb; pdb.set_trace()
print('While setting member:', name)
print(' setting elements:', I)
print(' from obj', O)
print(' target type:', type(O.get(name)))
print(' dest type:', type(val))
print('index type:', type(I))
if hasattr(I, 'dtype'):
print(' index | |
import math
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertTokenizer, BertModel
def linear_block(input_dim, hidden_dim):
linear = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.5))
return linear
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers):
super(MLP, self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_dim
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
self.model = nn.Sequential(*layers)
## initilize the model
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
fan_in,_ = nn.init._calculate_fan_in_and_fan_out(m.weight)
bound = 1/math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def forward(self,x):
out = self.model(x)
return out
class SDSN(nn.Module):
"""docstring for SDSNA"""
# Replace simple dot product with SDSNA
# Scoring Lexical Entailment with a supervised directional similarity network
def __init__(self, arg):
super(SDSNA, self).__init__()
self.emb_dim = 300
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.map_linear_left = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.map_linear_right = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.final_linear = nn.Linear(2 * self.hidden_dim + self.emb_dim, 1)
def init_embs(self, w2v_weight):
self.embs = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def forward(self, inputs):
batch_size, _ = inputs.size()
left_w2v = self.embs(inputs[:,0])
right_w2v = self.embs(inputs[:,1])
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
def mlp(self, input_dim, hidden_dim, num_layers):
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
return nn.Sequential(*layers)
class Word2Score(nn.Module):
"""docstring for Word2Score"""
def __init__(self, hidden_dim, num_layers):
super(Word2Score, self).__init__()
self.emb_dim = 300
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.map_linear_left = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.map_linear_right = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
def init_emb(self, w2v_weight):
self.embs = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def mlp(self, input_dim, hidden_dim, num_layers):
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
return nn.Sequential(*layers)
def forward(self, inputs):
# inputs: [batch_size, 2]
batch_size, _ = inputs.size()
left_w2v = self.embs(inputs[:,0])
right_w2v = self.embs(inputs[:,1])
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
output = torch.einsum('ij,ij->i', [left_trans, right_trans])
left_norm = torch.norm(left_trans, dim=1).sum()
right_norm = torch.norm(right_trans, dim=1).sum()
return output, (left_norm+right_norm)
def inference(self, left_w2v, right_w2v):
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
output = torch.einsum('ij,ij->i', [left_trans, right_trans])
return output
class MEAN_Max(nn.Module):
"""docstring for MEAN"""
def __init__(self, input_dim, hidden_dim):
super(MEAN_Max, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, emb]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
oe = torch.cat((embed_input_left, embed_input_right), 2)
oe = oe.mean(2)
oe = self.output_layer(oe)
oe = oe.max(1)[0]
return oe
class MEAN(nn.Module):
"""docstring for MEAN"""
def __init__(self, input_dim, hidden_dim):
super(MEAN, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, emb]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
oe = torch.cat((embed_input_left, embed_input_right), 2)
oe = oe.mean(2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe
class LSTM(nn.Module):
"""docstring for LSTM"""
def __init__(self, input_dim, hidden_dim):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(p=0)
self.left_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.right_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim*2),
nn.ReLU(),
nn.Linear(hidden_dim*2, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = embed_input_left.view(-1, seqlen, self.input_dim)
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = embed_input_right.view(-1, seqlen, self.input_dim)
embed_input_right = self.dropout_layer(embed_input_right)
# hidden = (torch.zeros(1, batch_size*num_context, self.hidden_dim),
# torch.zeros(1, batch_size*num_context, self.hidden_dim))
output_left, (final_hidden_state_left, final_cell_state_left) = self.left_context_encoder(embed_input_left) #, hidden)
output_right,(final_hidden_state_right, final_cell_state_left) = self.right_context_encoder(embed_input_right) #, hidden)
encode_context_left = final_hidden_state_left.view(-1, num_context, self.hidden_dim)
encode_context_right = final_hidden_state_right.view(-1, num_context, self.hidden_dim)
# concat + mean_pooling + fully_connect
oe = torch.cat((encode_context_left, encode_context_right), 2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe
class SelfAttention(nn.Module):
"""docstring for SelfAttention"""
def __init__(self, input_dim, hidden_dim):
super(SelfAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
#print(att_weight.size())
oe = (left_right_context * att_weight).sum(2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe ,att_weight
class HierAttention(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(HierAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.att_h = nn.Linear(input_dim, hidden_dim)
self.att_hv = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
oe = (left_right_context * att_weight).sum(2)
#print(oe.size())
hier_att_weight = torch.matmul(self.att_h(oe), self.att_hv)
#print(hier_att_weight.size())
hier_att_weight = nn.functional.softmax(hier_att_weight, dim=1).view(batch_size, num_context, 1)
#print(hier_att_weight.size())
oe = (oe * hier_att_weight).sum(1)
oe = self.output_layer(oe)
return oe, att_weight, hier_att_weight
class HierAttentionEnsemble(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(HierAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.att_h = nn.Linear(input_dim, hidden_dim)
self.att_hv = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
oe = (left_right_context * att_weight).sum(2)
#print(oe.size())
hier_att_weight = torch.matmul(self.att_h(oe), self.att_hv)
#print(hier_att_weight.size())
hier_att_weight = nn.functional.softmax(hier_att_weight, dim=1).view(batch_size, num_context, 1)
#print(hier_att_weight.size())
oe = (oe * hier_att_weight).sum(1)
oe = self.output_layer(oe)
return oe, att_weight, hier_att_weight
class ATTENTION(nn.Module):
"""docstring for ATTENTION"""
def __init__(self, input_dim, hidden_dim):
super(ATTENTION, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.left_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.right_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.att_w = nn.Linear(hidden_dim*2, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim*2),
nn.ReLU(),
nn.Linear(hidden_dim*2, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim] -> [batch*context, seq, dim]
embed_input_left = embed_input_left.view(-1, seqlen, self.input_dim)
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = embed_input_right.view(-1, seqlen, self.input_dim)
embed_input_right = self.dropout_layer(embed_input_right)
# hidden = (torch.zeros(1, batch_size*num_context, self.hidden_dim),
# torch.zeros(1, batch_size*num_context, self.hidden_dim))
output_left, (final_hidden_state_left, final_cell_state_left) = self.left_context_encoder(embed_input_left) #, hidden)
output_right,(final_hidden_state_right, final_cell_state_left) = self.right_context_encoder(embed_input_right) #, hidden)
encode_context_left = final_hidden_state_left.view(-1, num_context, self.hidden_dim)
encode_context_right = final_hidden_state_right.view(-1, num_context, self.hidden_dim)
# concat + mean_pooling + fully_connect
oe = torch.cat((encode_context_left, encode_context_right), 2)
print(oe.size())
att_weight = torch.matmul(self.att_w(oe), self.att_v)
print(att_weight.size())
att_weight = nn.functional.softmax(att_weight, dim=1).view(batch_size, num_context, 1)
print(att_weight.size())
oe = (oe * att_weight).sum(1)
print("--------")
oe = self.output_layer(oe)
return oe
class BertEncoder(nn.Module):
def __init__(self, bert_dir, model_type="base"):
super(BertEncoder, self).__init__()
self.model_type = model_type
self.model = BertModel.from_pretrained(bert_dir)
self.set_finetune("full")
def set_finetune(self, finetune_type):
if finetune_type == "none":
for param in self.model.parameters():
param.requires_grad = False
elif finetune_type == "full":
for param in self.model.parameters():
param.requires_grad = True
elif finetune_type == "last":
for param in self.model.parameters():
param.require_grad = False
for param in self.encoder.layer[-1].parameters():
param.require_grad = True
def forward(self, input_ids, mask=None):
# [batch_size, context_num, seq_length]
batch_size, context_num, seq_length = input_ids.size()
flat_input_ids = input_ids.reshape(-1, input_ids.size(-1))
flat_mask = mask.reshape(-1, mask.size(-1))
pooled_cls = self.model(input_ids = flat_input_ids, attention_mask=flat_mask)[1]
# [batch_size * context_num, dim]
#print(pooled_cls.size())
reshaped_pooled_cls = pooled_cls.view(batch_size, context_num, -1)
# [batch_size, context_num, dim]
output = reshaped_pooled_cls.mean(1)
# [batch_size, dim]
return output
def get_output_dim(self):
if self.model_type == "large":
return 1024
else:
return 768
class Bert2Score(nn.Module):
def __init__(self, encoder, bert_dir, hidden_dim, drop_prob):
super(Bert2Score, self).__init__()
self.hidden_dim = hidden_dim
if "large" in encoder:
self.encoder = BertEncoder(bert_dir, "large")
else:
self.encoder = BertEncoder(bert_dir)
bert_dim = self.encoder.get_output_dim()
self.mlp1 = nn.Linear(bert_dim, hidden_dim)
self.mlp2 = nn.Linear(bert_dim, hidden_dim)
self.dropout = nn.Dropout(drop_prob)
def forward(self, input_ids, masks):
## input: [batch_size, 2, context, seq]
left_ids = input_ids[:,0,:,:]
right_ids = input_ids[:,1,:,:]
left_masks = masks[:,0,:,:]
right_masks = masks[:,1,:,:]
left_emb = self.encoder(left_ids, left_masks)
right_emb = self.encoder(right_ids, right_masks)
# [batch_size, hidden_dim]
| |
"""
Script for reconstructing particles RELION compatible from full reconstructed tomograms
Input: - STAR file with next columns:
'_rlnMicrographName': tomogram that will be used for reconstruction
'_rlnImageName': tomograms used for picking
'_rlnCtfImage': (optional) CTF model subvolume
'_psSegImage': (optional) mask for particles within the reconstructed tomograms
'_rlnCoordinate{X,Y,Z}': {X,Y,Z} coordinates in Relion format
'_rlnAngle{Rot,Tilt,Psi}': (optional) {Rot,Tilt,Psi} angles
- Path to CTF subvolume, required in case not already included
- Particles gray-value pre-processing settings
Output: - The reconstructed particles
- An output particles STAR files
"""
__author__ = '<NAME>'
# ################ Package import
import gc
import sys
import time
import copy
import random
import pyseg as ps
import numpy as np
import multiprocessing as mp
from pyseg import sub, pexceptions
########## Global variables
ANGLE_NAMES = ['Rot', 'Tilt', 'Psi']
########################################################################################
# PARAMETERS
########################################################################################
####### Input data
ROOT_PATH = '../../../..'
# Input STAR file
in_star = ROOT_PATH + '/data/tutorials/synth_sumb/pick/out/fil_mb_sources_to_no_mb_targets_net_parts.star'
in_ctf = ROOT_PATH + '/data/tutorials/synth_sumb/rec/wedge_130_60.mrc'
# Required if '_psSegImage' not in input STAR
in_mask_norm = ROOT_PATH + '/data/tutorials/synth_sumb/rec/mask_sph_130_60.mrc'
####### Output data
out_part_dir = ROOT_PATH + '/data/tutorials/synth_sumb/rec/particles'
out_star = ROOT_PATH + '/data/tutorials/synth_sumb/rec/particles_rln.star'
####### Particles pre-processing settings
do_bin = 4 # 1
do_ang_prior = ['Tilt', 'Psi'] # ['Rot', 'Tilt', 'Psi']
do_ang_rnd = ['Rot']
do_noise = False
do_use_fg = True
do_norm = True
do_inv = True
####### Multiprocessing settings
mp_npr = 5 # 10
########################################################################################
# Local functions
########################################################################################
class Settings(object):
out_part_dir = None
out_star = None
do_bin = None
do_inv = False
do_ang_prior = None
do_ang_rnd = None
do_noise = None
do_use_fg = None
do_norm = None
in_mask_norm = None
in_ctf = None
def pr_worker(pr_id, star, sh_star, rows, settings, qu):
"""
Function which implements the functionality for the paralled workers.
Each worker process a pre-splited set of rows of Star object
:param pr_id: process ID
:param star: Star object with input information
:param rln_star: shared output Star object
:param rows: list with Star rows to process for the worker
:param settings: object with the settings
:param qu: queue to store the output Star object
:return: stored the reconstructed tomograms and insert the corresponding entries in the
input Star object
"""
# Mapping settings
out_part_dir = settings.out_part_dir
do_ang_prior = settings.do_ang_prior
do_ang_rnd = settings.do_ang_rnd
do_noise = settings.do_noise
do_use_fg = settings.do_use_fg
do_norm = settings.do_norm
do_inv = settings.do_inv
in_mask_norm = settings.in_mask_norm
hold_ctf = settings.in_ctf
tomo_bin = settings.do_bin
if in_mask_norm is not None:
mask_norm = ps.disperse_io.load_tomo(in_mask_norm)
# Making a copy of the shared object
rln_star = copy.deepcopy(sh_star)
# print '\tLoop for particles: '
count, n_rows = 0, len(rows)
for row in rows:
# print '\t\t\t+Reading the entry...'
# in_pick_tomo = star.get_element('_rlnImageName', row)
in_rec_tomo = star.get_element('_rlnMicrographName', row)
if hold_ctf is not None:
in_ctf = hold_ctf
else:
in_ctf = star.get_element('_rlnCtfImage', row)
x_pick = star.get_element('_rlnCoordinateX', row)
y_pick = star.get_element('_rlnCoordinateY', row)
z_pick = star.get_element('_rlnCoordinateZ', row)
rot, tilt, psi = None, None, None
rot_prior, tilt_prior, psi_prior = None, None, None
if star.has_column('_rlnAngleRot'):
rot = star.get_element('_rlnAngleRot', row)
if ANGLE_NAMES[0] in do_ang_prior:
rot_prior = rot
if ANGLE_NAMES[0] in do_ang_rnd:
rot = 180. * random.random()
if star.has_column('_rlnAngleTilt'):
tilt = star.get_element('_rlnAngleTilt', row)
if ANGLE_NAMES[1] in do_ang_prior:
tilt_prior = tilt
if ANGLE_NAMES[1] in do_ang_rnd:
tilt = 180. * random.random()
if star.has_column('_rlnAnglePsi'):
psi = star.get_element('_rlnAnglePsi', row)
if ANGLE_NAMES[2] in do_ang_prior:
psi_prior = psi
if ANGLE_NAMES[2] in do_ang_rnd:
psi = 180. * random.random()
# print '\t\t\t+Pre-processing input subvolume models...'
ctf_svol = ps.disperse_io.load_tomo(in_ctf, mmap=True)
sv_size, seg_svol = ctf_svol.shape, None
if do_noise and star.has_column('_psSegImage'):
in_seg = star.get_element('_psSegImage', row)
seg_svol = ps.disperse_io.load_tomo(in_seg, mmap=False) > 0
if sv_size != seg_svol.shape:
print('ERROR: CTF model subvolume "' + in_ctf + '" and segmentation "' + \
in_seg + ' sizes does not fit.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
noise_mode = 'bg'
if do_use_fg: noise_mode = 'fg'
# print '\t\t\t+Reconstructing particle subvolume...'
rec_tomo = ps.disperse_io.load_tomo(in_rec_tomo, mmap=True)
# pick_tomo = ps.disperse_io.load_tomo(in_pick_tomo, mmap=True)
# tomo_bin = max(rec_tomo.shape) / max(pick_tomo.shape)
# x_rln, y_rln, z_rln = y_pick * tomo_bin, x_pick * tomo_bin, z_pick * tomo_bin
x_rln, y_rln, z_rln = x_pick * tomo_bin, y_pick * tomo_bin, z_pick * tomo_bin
try:
part_svol = ps.globals.get_sub_copy(rec_tomo, (x_rln, y_rln, z_rln), sv_size).astype(np.float32)
except ValueError:
print('\t\t\tWARNING: This particle was not reconstructed properly, Value Error raised!')
continue
if part_svol.shape != sv_size:
print('\t\t\tWARNING: This particle was not reconstructed properly ' + \
'(usually because is close to tomogram border), skipping to next...')
continue
if seg_svol is not None:
# print '\t\t\t+Padding BG with noise...'
part_svol = ps.globals.randomize_voxel_mask(part_svol, seg_svol, noise_mode)
if do_norm:
# print '\t\t\t+Gray-values normalization...'
if in_mask_norm is None:
part_svol = ps.sub.relion_norm(part_svol, seg_svol, inv=do_inv)
else:
part_svol = ps.sub.relion_norm(part_svol, mask_norm, inv=do_inv)
# Adding entry to particles STAR file
out_part = out_part_dir + '/particle_rln_' + str(row) + '.mrc'
ps.disperse_io.save_numpy(part_svol, out_part)
# Writing in the shared object
# print '\t\t-Process[' + str(pr_id) + '], Particle [' + str(count) + '/' + str(n_rows) + ']: ' + out_part
part_row = {'_rlnMicrographName': in_rec_tomo,
'_rlnCtfImage': in_ctf,
'_rlnImageName': out_part,
'_rlnCoordinateX': x_rln,
'_rlnCoordinateY': y_rln,
'_rlnCoordinateZ': z_rln}
if rln_star.has_column('_rlnAngleRot'):
part_row['_rlnAngleRot'] = rot
if rln_star.has_column('_rlnAngleTilt'):
part_row['_rlnAngleTilt'] = tilt
if rln_star.has_column('_rlnAnglePsi'):
part_row['_rlnAnglePsi'] = psi
if rln_star.has_column('_rlnAngleRotPrior'):
part_row['_rlnAngleRotPrior'] = rot_prior
if rln_star.has_column('_rlnAngleTiltPrior'):
part_row['_rlnAngleTiltPrior'] = tilt_prior
if rln_star.has_column('_rlnAnglePsiPrior'):
part_row['_rlnAnglePsiPrior'] = psi_prior
rln_star.add_row(**part_row)
count += 1
# Finishing the process
qu.put(rln_star)
sys.exit(pr_id)
########################################################################################
# MAIN ROUTINE
########################################################################################
# Print initial message
print('Extracting transmembrane features.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
print('\tInput STAR file: ' + in_star)
if in_ctf is not None:
print('\tInput CTF file: ' + in_ctf)
print('\tOutput directory for reconstructed particles: ' + out_part_dir)
print('\tOutput STAR file: ' + out_star)
print('\tParticles pre-processing settings: ')
if do_bin > 0:
print('\t\t-Particles picked with binning: ' + str(do_bin))
if len(do_ang_prior) > 0:
for ang_prior in do_ang_prior:
if ang_prior not in ['Rot', 'Tilt', 'Psi']:
print('ERROR: unrecognized angle: ' + ang_prior)
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
print('\t\t-Adding prior for angles: ' + ang_prior)
if len(do_ang_rnd) > 0:
for ang_rnd in do_ang_rnd:
if ang_rnd not in ['Rot', 'Tilt', 'Psi']:
print('ERROR: unrecognized angle: ' + ang_rnd)
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
print('\t\t-Setting random values for angles: ' + ang_rnd)
if do_norm:
print('\t\t-Applying relion normalization: ')
if in_mask_norm is not None:
print('\t\t\t-Tomogram for FG: ' + in_mask_norm)
if do_inv:
print('\t\t-Invert density values.')
if do_noise:
print('\t\t-Set gray-values in background (BG) randomly.')
if do_use_fg:
print('\t\t\t+Take FG values as reference.')
else:
print('\t\t\t+Take BG values as reference.')
print('\tMultiprocessing settings: ')
print('\t\t-Number processes: ' + str(mp_npr))
print('')
print('Loading input STAR file...')
star, rln_star = sub.Star(), sub.Star()
try:
star.load(in_star)
except pexceptions.PySegInputError as e:
print('ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if (in_ctf is None) and (not star.has_column('_rlnCtfImage')):
print(' ERROR: No CTF specified and the input STAR file does not conta rlnCtfImage column')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
print('\tInitializing output relion STAR file: ')
rln_star.add_column(key='_rlnMicrographName')
rln_star.add_column(key='_rlnCtfImage')
rln_star.add_column(key='_rlnImageName')
rln_star.add_column(key='_rlnCoordinateX')
rln_star.add_column(key='_rlnCoordinateY')
rln_star.add_column(key='_rlnCoordinateZ')
if ANGLE_NAMES[0] in do_ang_prior:
if star.has_column(key='_rlnAngleRot'):
rln_star.add_column(key='_rlnAngleRot')
rln_star.add_column(key='_rlnAngleRotPrior')
else:
print('ERROR: Prior Rot angle cannot be added since not Rot angle in the input tomogram.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if ANGLE_NAMES[1] in do_ang_prior:
if star.has_column(key='_rlnAngleTilt'):
rln_star.add_column(key='_rlnAngleTilt')
rln_star.add_column(key='_rlnAngleTiltPrior')
else:
print('ERROR: Prior Tilt angle cannot be added since not Tilt angle in the input tomogram.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if ANGLE_NAMES[2] in do_ang_prior:
if star.has_column(key='_rlnAnglePsi'):
rln_star.add_column(key='_rlnAnglePsi')
rln_star.add_column(key='_rlnAnglePsiPrior')
else:
print('ERROR: Prior Psi angle cannot be added since not Psi angle in the input tomogram.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if ANGLE_NAMES[0] in do_ang_rnd:
if not rln_star.has_column(key='_rlnAngleRot'):
rln_star.add_column(key='_rlnAngleRot')
if ANGLE_NAMES[1] in do_ang_rnd:
if not rln_star.has_column(key='_rlnAngleTilt'):
rln_star.add_column(key='_rlnAngleTilt')
if ANGLE_NAMES[2] in do_ang_rnd:
if not rln_star.has_column(key='_rlnAnglePsi'):
rln_star.add_column(key='_rlnAnglePsi')
if do_norm and (not star.has_column('_psSegImage')) and (in_mask_norm is None):
print('ERROR: Unable to do gray-value normalization: ')
print('\tNeither \'_psSegImage\' column in input STAR file nor \'in_mask_norm\' input set.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
print('\tInitializing multiprocessing with ' + str(mp_npr) + ' processes: ')
settings = Settings()
settings.out_part_dir = out_part_dir
settings.out_star = out_star
settings.do_bin = do_bin
settings.do_inv = do_inv
settings.do_ang_prior = do_ang_prior
settings.do_ang_rnd = do_ang_rnd
settings.do_noise = do_noise
settings.do_use_fg = do_use_fg
settings.do_norm = do_norm
settings.in_mask_norm = in_mask_norm
settings.in_ctf = in_ctf
processes = list()
qu = mp.Queue()
spl_ids = np.array_split(list(range(star.get_nrows())), mp_npr)
# Starting the processes
for pr_id in range(mp_npr):
pr = mp.Process(target=pr_worker, args=(pr_id, star, rln_star, spl_ids[pr_id], settings, qu))
pr.start()
processes.append(pr)
# Getting processes results
pr_results, stars = list(), list()
for pr in processes:
stars.append(qu.get())
for pr_id, pr in enumerate(processes):
pr.join()
pr_results.append(pr.exitcode)
if pr_id != pr_results[pr_id]:
print('ERROR: Process ' + str(pr_id) + ' ended incorrectly.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
gc.collect()
# Merging output STAR files
rln_merged_star = sub.Star()
keys = stars[0].get_column_keys()
for key in keys:
rln_merged_star.add_column(key)
for star in | |
FAKE_HOST_2
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.agent.sec_br, 'dump_flows_for',
return_value='mock_flow') as mock_dump_flows:
self.agent.process_event(event)
self.assertTrue(mock_dump_flows.called)
for vnic in vm.vnics:
self.assertIn(vnic.port_uuid, self.agent.devices_to_filter)
self.assertIn(vnic.port_uuid, self.agent.cluster_other_ports)
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
def test_process_event_vm_create_nics_host(self):
self.agent.esx_hostname = FAKE_HOST_1
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.agent.sec_br, 'dump_flows_for',
return_value='mock_flow') as mock_dump_flows:
self.agent.process_event(event)
self.assertTrue(mock_dump_flows.called)
for vnic in vm.vnics:
self.assertIn(vnic.port_uuid, self.agent.devices_to_filter)
self.assertIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertNotIn(vnic.port_uuid, self.agent.cluster_other_ports)
with mock.patch.object(self.agent.sec_br, 'dump_flows_for',
return_value='') as mock_dump_flows, \
mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=True) as mock_get_ports:
self.agent.process_event(event)
self.assertTrue(mock_dump_flows.called)
self.assertTrue(mock_get_ports.called)
def test_process_event_vm_updated_nonhost(self):
self.agent.esx_hostname = FAKE_HOST_2
vm_port1 = SamplePort(FAKE_PORT_1)
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm = VM(FAKE_VM, [vm_port1])
event = SampleEvent(ovsvapp_const.VM_UPDATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm, True)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.process_event(event)
self.assertIn(FAKE_PORT_1, self.agent.cluster_other_ports)
def test_process_event_vm_delete_hosted_vm_vlan(self):
self.agent.esx_hostname = FAKE_HOST_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self._build_lvm(port)
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
) as mock_post_del_vm, \
mock.patch.object(self.LOG, 'debug'), \
mock.patch.object(self.agent.net_mgr.get_driver(),
"delete_network") as mock_del_net:
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertTrue(mock_post_del_vm.called)
self.assertFalse(mock_del_net.called)
self.assertTrue(br.delete_flows.called)
def test_process_event_vm_delete_hosted_vm_vxlan(self):
self.agent.esx_hostname = FAKE_HOST_1
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
return_value=True) as (post_del_vm):
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertTrue(post_del_vm.called)
def test_process_event_vm_delete_non_hosted_vm(self):
self.agent.esx_hostname = FAKE_HOST_2
self.agent.cluster_other_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
return_value=True) as mock_post_del_vm, \
mock.patch.object(self.agent.net_mgr.get_driver(),
"delete_network") as mock_del_net:
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid,
self.agent.cluster_other_ports)
self.assertTrue(mock_post_del_vm.called)
self.assertFalse(mock_del_net.called)
def test_notify_device_added_with_hosted_vm(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=True) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.agent._notify_device_added(vm, host)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_time_sleep.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_added_rpc_exception(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
side_effect=Exception()) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
)as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_added, vm, host)
self.assertTrue(mock_log_exception.called)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_time_sleep.called)
def test_notify_device_added_with_retry(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=False) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.agent._notify_device_added(vm, host)
self.assertTrue(mock_get_ports.called)
self.assertTrue(mock_time_sleep.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_updated_migration_vlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent._add_ports_to_host_ports([FAKE_PORT_1])
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, FAKE_HOST_2, True)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_update_not_found(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
br = self.agent.phys_brs[port['physical_network']]['br']
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
):
self.agent._notify_device_updated(vm, host, True)
self.assertFalse(br.add_drop_flows.called)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
):
self.agent._notify_device_updated(vm, host, True)
self.assertTrue(br.add_drop_flows.called)
def test_notify_device_updated_host_vlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
br = self.agent.phys_brs[port['physical_network']]['br']
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(br.add_flows.called)
def test_notify_device_updated_vlan_rpc_exception(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding",
side_effect=Exception()
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_updated, vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(br.add_flows.called)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(mock_log_exception.called)
def test_notify_device_updated_host_vlan_multiple_nic(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
port1 = self._build_port(FAKE_PORT_1)
port2 = self._build_port(FAKE_PORT_2)
br1 = self._build_phys_brs(port1)
br2 = self._build_phys_brs(port2)
self.agent.ports_dict[port1['id']] = self.agent._build_port_info(port1)
self.agent.ports_dict[port2['id']] = self.agent._build_port_info(port2)
self._build_lvm(port1)
self._build_lvm(port2)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
self.assertEqual(1, mock_update_device_binding.call_count)
self.assertTrue(br1.add_flows.called)
self.assertTrue(br2.add_flows.called)
def _build_lvm(self, port):
try:
self.agent.vlan_manager.add(port['network_id'], port['lvid'],
port['network_type'],
port['physical_network'], '1234')
except vlanmanager.MappingAlreadyExists:
return None
def test_notify_device_updated_host_vxlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
port1 = self._build_port(FAKE_PORT_1)
port1['network_type'] = p_const.TYPE_VXLAN
self.agent.ports_dict[port1['id']] = self.agent._build_port_info(port1)
vm = VM(FAKE_VM, [vm_port1])
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_updated_vxlan_rpc_exception(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding",
side_effect=Exception()
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_updated, vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(mock_log_exception.called)
def test_map_port_to_common_model_vlan(self):
expected_port = self._build_port(FAKE_PORT_1)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
network, port = self.agent._map_port_to_common_model(expected_port)
expected_name = expected_port['network_id'] + "-" + FAKE_CLUSTER_MOID
self.assertEqual(expected_name, network.name)
self.assertEqual(expected_port['id'], port.uuid)
def test_map_port_to_common_model_vxlan(self):
expected_port = self._build_port(FAKE_PORT_1)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
network, port = self.agent._map_port_to_common_model(expected_port, 1)
expected_name = expected_port['network_id'] + "-" + FAKE_CLUSTER_MOID
self.assertEqual(expected_name, network.name)
self.assertEqual(expected_port['id'], port.uuid)
def test_device_create_cluster_mismatch(self):
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_2
with mock.patch.object(self.agent,
'_process_create_ports',
return_value=True) as mock_create_ports, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE)
self.assertTrue(mock_logger_debug.called)
self.assertFalse(mock_create_ports.called)
def test_device_create_non_hosted_vm(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.esx_hostname = FAKE_HOST_2
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.vlan_manager.mapping = {}
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
mock_add_devices_fn.assert_called_with(ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(self.agent.devices_up_list)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.vlan_manager.mapping = {}
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan_sg_rule_missing(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.vlan_manager.mapping = {}
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_MISSING
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan_sg_rule_partial_missing(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.devices_to_filter = set()
self.agent.vlan_manager.mapping = {}
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_PARTIAL
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vxlan(self):
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
ports = [port]
self.agent.vlan_manager.mapping = {}
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.vlan_manager.mapping = {}
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent.sg_agent,
'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'
) as mock_update_device_up, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_prov_local_vlan.called)
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertNotIn(FAKE_PORT_1, self.agent.devices_to_filter)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_update_device_up.called)
def test_device_create_hosted_vm_vxlan_sg_rule_missing(self):
port = self._build_port(FAKE_PORT_1)
port['network_type'] = | |
#! /usr/bin/env python
import os
import zipfile
import argparse
import sys
import logging
import time
import tempfile
import shutil
from logging import config
import requests
from ndexutil.config import NDExUtilConfig
import ndexbiogridloader
from ndexbiogridloader.exceptions import NdexBioGRIDLoaderError
import ndex2
from ndex2.client import Ndex2
import networkx as nx
from ndexutil.cytoscape import Py4CytoscapeWrapper
from ndexutil.cytoscape import DEFAULT_CYREST_API
from ndexutil.ndex import NDExExtraUtils
from tqdm import tqdm
logger = logging.getLogger(__name__)
TSV2NICECXMODULE = 'ndexutil.tsv.tsv2nicecx2'
LOG_FORMAT = "%(asctime)-15s %(levelname)s %(relativeCreated)dms " \
"%(filename)s::%(funcName)s():%(lineno)d %(message)s"
import json
import pandas as pd
import ndexutil.tsv.tsv2nicecx2 as t2n
class Formatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
ORGANISM_STYLE = 'organism_style.cx'
CHEMICAL_STYLE = 'chemical_style.cx'
ORGANISMLISTFILE = 'organism_list.txt'
"""
Name of file containing list of networks to be downloaded
stored within this package
"""
CHEMICALSLISTFILE = 'chemicals_list.txt'
"""
Name of file containing list of networks to be downloaded
stored within this package
"""
TESTSDIR = 'tests'
"""
Name of the test directoryl; used in test_ndexloadtcga.py module
"""
DATADIR = 'biogrid_files'
"""
Name of directory where biogrid archived files will be downloaded to and processed
"""
ORGANISM_LOAD_PLAN = 'organism_load_plan.json'
"""
Name of file containing json load plan
for biogrid protein-protein interactions
"""
CHEM_LOAD_PLAN = 'chem_load_plan.json'
"""
Name of file containing json load plan
for biogrid protein-chemical interactions
"""
def get_package_dir():
"""
Gets directory where package is installed
:return:
"""
return os.path.dirname(ndexbiogridloader.__file__)
def get_organism_style():
"""
Gets the style stored with this package
:return: path to file
:rtype: string
"""
return os.path.join(get_package_dir(), ORGANISM_STYLE)
def get_chemical_style():
"""
Gets the style stored with this package
:return: path to file
:rtype: string
"""
return os.path.join(get_package_dir(), CHEMICAL_STYLE)
def get_organism_load_plan():
"""
Gets the load plan stored with this package
:return: path to file
:rtype: string
"""
return os.path.join(get_package_dir(), ORGANISM_LOAD_PLAN)
def get_chemical_load_plan():
"""
Gets the load plan stored with this package
:return: path to file
:rtype: string
"""
return os.path.join(get_package_dir(), CHEM_LOAD_PLAN)
def get_organismfile():
"""
Gets the networks list stored with this package
:return: path to file
:rtype: string
"""
return os.path.join(get_package_dir(), ORGANISMLISTFILE)
def get_chemicalsfile():
"""
Gets the networks lsist stored with this package
:return: path to file
:rtype: string
"""
return os.path.join(get_package_dir(), CHEMICALSLISTFILE)
def _parse_arguments(desc, args):
"""
Parses command line arguments
:param desc:
:param args:
:return:
"""
parser = argparse.ArgumentParser(description=desc,
formatter_class=Formatter)
parser.add_argument('datadir', help='Directory where BioGRID data downloaded to and processed from')
parser.add_argument('--profile', help='Profile in configuration '
'file to use to load '
'NDEx credentials which means'
'configuration under [XXX] will be'
'used '
'(default '
'ndexbiogridloader)',
default='ndexbiogridloader')
parser.add_argument('--logconf', default=None,
help='Path to python logging configuration file in '
'this format: https://docs.python.org/3/library/'
'logging.config.html#logging-config-fileformat '
'Setting this overrides -v parameter which uses '
' default logger. (default None)')
parser.add_argument('--conf', help='Configuration file to load '
'(default ~/' +
NDExUtilConfig.CONFIG_FILE)
parser.add_argument('--verbose', '-v', action='count', default=0,
help='Increases verbosity of logger to standard '
'error for log messages in this module and'
'in ' + TSV2NICECXMODULE + '. Messages are '
'output at these python logging levels '
'-v = ERROR, -vv = WARNING, -vvv = INFO, '
'-vvvv = DEBUG, -vvvvv = NOTSET (default no '
'logging)')
parser.add_argument('--version', action='version',
version=('%(prog)s ' +
ndexbiogridloader.__version__))
parser.add_argument('--biogridversion',
help='Version of BioGRID Release. To see what '
'versions are available, visit: '
'https://downloads.thebiogrid.org/'
'BioGRID/Release-Archive/',
default='4.2.191')
parser.add_argument('--skipdownload', action='store_true',
help='If set, skips download of data from BioGRID and '
'assumes data already reside in <datadir>'
'directory')
parser.add_argument('--skipupload', action='store_true',
help='If set, upload of networks to NDEx is skipped.'
'This is mainly for testing purposes')
parser.add_argument('--organismloadplan',
help='Use alternate organism load plan file',
default=get_organism_load_plan())
parser.add_argument('--organismfile', default=get_organismfile(),
help='File containing list of organisms to '
'upload to NDEx. By default the list '
'stored with this tool is used')
parser.add_argument('--chemicalloadplan',
help='Use alternate chemical load plan file',
default=get_chemical_load_plan())
parser.add_argument('--chemicalsfile', default=get_chemicalsfile(),
help='File containing list of chemicals to '
'upload to NDEx. By default the list '
'stored with this tool is used')
parser.add_argument('--organismstyle',
help='Use alternate organism style file',
default=get_organism_style())
parser.add_argument('--chemicalstyle',
help='Use alternate chemical style file',
default=get_chemical_style())
parser.add_argument('--noprogressbar', action='store_true',
help='If set, disabled tqdm progress'
'bar from displaying')
parser.add_argument('--maxretries', type=int, default=5,
help='Number of retries to attempt to upload'
'each of network to NDEx')
parser.add_argument('--retry_sleep', type=int, default=30,
help='Number of seconds to wait between '
'retry of failed upload of network to NDEx')
parser.add_argument('--layout', default='-',
help='Specifies layout '
'algorithm to run. If Cytoscape is running '
'any layout from Cytoscape can be used. If '
'this flag is omitted or "-" is passed in '
'force-directed-cl from Cytoscape will '
'be used. If no Cytoscape is available, '
'"spring" from networkx is supported')
parser.add_argument('--cyresturl',
default=DEFAULT_CYREST_API,
help='URL of CyREST API. Default value '
'is default for locally running Cytoscape')
return parser.parse_args(args)
def _setup_logging(args):
"""
Sets up logging based on parsed command line arguments.
If args.logconf is set use that configuration otherwise look
at args.verbose and set logging for this module and the one
in ndexutil specified by TSV2NICECXMODULE constant
:param args: parsed command line arguments from argparse
:raises AttributeError: If args is None or args.logconf is None
:return: None
"""
if args.logconf is None:
level = (50 - (10 * args.verbose))
logging.basicConfig(format=LOG_FORMAT,
level=level)
logging.getLogger(TSV2NICECXMODULE).setLevel(level)
logger.setLevel(level)
return
# logconf was set use that file
logging.config.fileConfig(args.logconf,
disable_existing_loggers=False)
def _cvtfield(f):
"""
If str passed in via 'f' parameter is '-' then
return empty string otherwise return value of 'f'
:param f:
:return: empty string if 'f' is '-' otherwise return 'f'
:rtype: str
"""
if f is None or f != '-':
return f
return ''
class NdexBioGRIDLoader(object):
"""
Class to load content
"""
def __init__(self, args,
py4cyto=Py4CytoscapeWrapper(),
ndexextra=NDExExtraUtils()):
"""
:param args:
"""
self._args = args
self._datadir = os.path.abspath(args.datadir)
self._conf_file = args.conf
self._profile = args.profile
self._organism_load_plan = args.organismloadplan
self._chem_load_plan = args.chemicalloadplan
self._organism_style = args.organismstyle
self._chem_style = args.chemicalstyle
self._user = None
self._pass = None
self._server = None
self._ndex = None
self._biogrid_version = args.biogridversion
self._organism_file_name = os.path.join(self._datadir, 'organism.zip')
self._chemicals_file_name = os.path.join(self._datadir, 'chemicals.zip')
self._biogrid_organism_file_ext = '-' + self._biogrid_version + '.tab2.txt'
self._biogrid_chemicals_file_ext = '-' + self._biogrid_version + '.chemtab.txt'
self._skipdownload = args.skipdownload
self._network = None
self._py4 = py4cyto
self._ndexextra = ndexextra
def _load_chemical_style_template(self):
"""
Loads the CX network specified by self._chem_style into self._chem_style_template
:return:
"""
self._chem_style_template = ndex2.create_nice_cx_from_file(os.path.abspath(self._chem_style))
def _load_organism_style_template(self):
"""
Loads the CX network specified by self._organism_style into self._organism_style_template
:return:
"""
self._organism_style_template = ndex2.create_nice_cx_from_file(os.path.abspath(self._organism_style))
def _get_biogrid_organism_file_name(self, file_extension):
return 'BIOGRID-ORGANISM-' + self._biogrid_version + file_extension
def _get_download_url(self):
return 'https://downloads.thebiogrid.org/Download/BioGRID/Release-Archive/BIOGRID-' + \
self._biogrid_version + '/'
def _build_organism_file_url(self):
url = self._get_download_url() + self._get_biogrid_organism_file_name('.tab2.zip')
return url
def _get_chemicals_file_name(self, file_extension):
return 'BIOGRID-CHEMICALS-' + self._biogrid_version + file_extension
def _build_chemicals_file_url(self):
url = self._get_download_url() + self._get_chemicals_file_name('.chemtab.zip')
return url
def _parse_config(self):
"""
Parses config
:return:
"""
ncon = NDExUtilConfig(conf_file=self._conf_file)
con = ncon.get_config()
self._user = con.get(self._profile, NDExUtilConfig.USER)
self._pass = con.get(self._profile, NDExUtilConfig.PASSWORD)
self._server = con.get(self._profile, NDExUtilConfig.SERVER)
def _get_biogrid_file_name(self, organism_entry):
return organism_entry[0] + self._biogrid_organism_file_ext
def _get_biogrid_chemicals_file_name(self, chemical_entry):
return chemical_entry[0] + self._biogrid_chemicals_file_ext
def _get_header(self, file_path):
with open(file_path, 'r') as f_read:
header_line = f_read.readline().strip()
header_line_split = header_line.split('\t')
return header_line_split, 0
def _download_file(self, url, local_file):
try:
response = requests.get(url)
if response.status_code // 100 == 2:
with open(local_file, "wb") as received_file:
received_file.write(response.content)
else:
return response.status_code
except requests.exceptions.RequestException as e:
logger.exception('Caught exception: ' + str(e))
return 2
return 0
def _download_biogrid_files(self):
biogrid_organism_url = self._build_organism_file_url()
biogrid_chemicals_url = self._build_chemicals_file_url()
download_status = self._download_file(biogrid_organism_url, self._organism_file_name)
if (download_status != 0):
return download_status;
return self._download_file(biogrid_chemicals_url, self._chemicals_file_name)
def _get_organism_or_chemicals_file_content(self, type='organism'):
file_names = []
path_to_file = self._args.organismfile if type == 'organism' else self._args.chemicalsfile
with open(path_to_file, 'r') as f:
for cnt, line in enumerate(f):
line_split = line.strip().split('\t')
line_split[1] = line_split[1].replace('"', '')
file_names.append(line_split)
return file_names
def _unzip_biogrid_file(self, file_name, type='organism'):
try:
if type == 'organism':
with zipfile.ZipFile(self._organism_file_name, "r") as zip_ref:
extracted_file_path = zip_ref.extract(file_name, self._datadir)
else:
with zipfile.ZipFile(self._chemicals_file_name, "r") as zip_ref:
extracted_file_path = zip_ref.extract(file_name, self._datadir)
except Exception as e:
logger.exception('Caught exception: ' + str(e))
return 2, None
return 0, extracted_file_path
def _remove_biogrid_organism_file(self, file_name):
try:
os.remove(file_name)
except OSError as e:
logger.error('Caught error removing file: ' +
file_name + ' : ' + str(e))
return e.errno
return 0
def _get_header_for_generating_organism_tsv(self):
header = [
'Entrez Gene Interactor A',
'Entrez Gene Interactor B',
'Official Symbol Interactor A',
'Official Symbol Interactor B',
'Synonyms Interactor A',
'Synonyms Interactor B',
'Experimental System',
'Experimental System Type',
'Pubmed ID',
'Throughput',
'Score',
'Modification',
'Phenotypes',
'Qualifications',
'Organism Interactor A',
'Organism Interactor B'
]
return header
def _get_header_for_generating_chemicals_tsv(self):
header = [
'Entrez Gene ID',
'Official Symbol',
'Synonyms',
'Action',
'Interaction Type',
'Pubmed ID',
'Chemical Name',
'Chemical Synonyms',
'Chemical Source ID',
'Chemical Type'
]
return header
def _get_user_agent(self):
"""
:return:
"""
return 'biogrid/' + self._biogrid_version
def _create_ndex_connection(self):
"""
creates connection to ndex
:return:
"""
if self._ndex is None:
try:
self._ndex = Ndex2(host=self._server, username=self._user,
| |
other):
return not (self == other)
all_structs.append(getCritiqueBinaryInputArch_args)
getCritiqueBinaryInputArch_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'inputs', [BinaryInputArchitecture, None], None, ), # 2
)
class getCritiqueBinaryInputArch_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype219, _size216) = iprot.readListBegin()
for _i220 in range(_size216):
_elem221 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.success.append(_elem221)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getCritiqueBinaryInputArch_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter222 in self.success:
oprot.writeString(iter222.encode('utf-8') if sys.version_info[0] == 2 else iter222)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getCritiqueBinaryInputArch_result)
getCritiqueBinaryInputArch_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'UTF8', False), None, ), # 0
)
class getCritiqueDiscreteInputArch_args(object):
"""
Attributes:
- problem
- inputs
"""
def __init__(self, problem=None, inputs=None,):
self.problem = problem
self.inputs = inputs
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.inputs = DiscreteInputArchitecture()
self.inputs.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getCritiqueDiscreteInputArch_args')
if self.problem is not None:
oprot.writeFieldBegin('problem', TType.STRING, 1)
oprot.writeString(self.problem.encode('utf-8') if sys.version_info[0] == 2 else self.problem)
oprot.writeFieldEnd()
if self.inputs is not None:
oprot.writeFieldBegin('inputs', TType.STRUCT, 2)
self.inputs.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getCritiqueDiscreteInputArch_args)
getCritiqueDiscreteInputArch_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'inputs', [DiscreteInputArchitecture, None], None, ), # 2
)
class getCritiqueDiscreteInputArch_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype226, _size223) = iprot.readListBegin()
for _i227 in range(_size223):
_elem228 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.success.append(_elem228)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getCritiqueDiscreteInputArch_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter229 in self.success:
oprot.writeString(iter229.encode('utf-8') if sys.version_info[0] == 2 else iter229)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getCritiqueDiscreteInputArch_result)
getCritiqueDiscreteInputArch_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'UTF8', False), None, ), # 0
)
class getArchitectureScoreExplanation_args(object):
"""
Attributes:
- problem
- arch
"""
def __init__(self, problem=None, arch=None,):
self.problem = problem
self.arch = arch
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.arch = BinaryInputArchitecture()
self.arch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getArchitectureScoreExplanation_args')
if self.problem is not None:
oprot.writeFieldBegin('problem', TType.STRING, 1)
oprot.writeString(self.problem.encode('utf-8') if sys.version_info[0] == 2 else self.problem)
oprot.writeFieldEnd()
if self.arch is not None:
oprot.writeFieldBegin('arch', TType.STRUCT, 2)
self.arch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getArchitectureScoreExplanation_args)
getArchitectureScoreExplanation_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'arch', [BinaryInputArchitecture, None], None, ), # 2
)
class getArchitectureScoreExplanation_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype233, _size230) = iprot.readListBegin()
for _i234 in range(_size230):
_elem235 = ObjectiveSatisfaction()
_elem235.read(iprot)
self.success.append(_elem235)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getArchitectureScoreExplanation_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter236 in self.success:
iter236.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getArchitectureScoreExplanation_result)
getArchitectureScoreExplanation_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [ObjectiveSatisfaction, None], False), None, ), # 0
)
class getPanelScoreExplanation_args(object):
"""
Attributes:
- problem
- arch
- panel
"""
def __init__(self, problem=None, arch=None, panel=None,):
self.problem = problem
self.arch = arch
self.panel = panel
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.arch = BinaryInputArchitecture()
self.arch.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.panel = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getPanelScoreExplanation_args')
if self.problem is not None:
oprot.writeFieldBegin('problem', TType.STRING, 1)
oprot.writeString(self.problem.encode('utf-8') if sys.version_info[0] == 2 else self.problem)
oprot.writeFieldEnd()
if self.arch is not None:
oprot.writeFieldBegin('arch', TType.STRUCT, 2)
self.arch.write(oprot)
oprot.writeFieldEnd()
if self.panel is not None:
oprot.writeFieldBegin('panel', TType.STRING, 3)
oprot.writeString(self.panel.encode('utf-8') if sys.version_info[0] == 2 else self.panel)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getPanelScoreExplanation_args)
getPanelScoreExplanation_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'arch', [BinaryInputArchitecture, None], None, ), # 2
(3, TType.STRING, 'panel', 'UTF8', None, ), # 3
)
class getPanelScoreExplanation_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype240, _size237) = iprot.readListBegin()
for _i241 in range(_size237):
_elem242 = ObjectiveSatisfaction()
_elem242.read(iprot)
self.success.append(_elem242)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, | |
Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job_template(
services.DeleteJobTemplateRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TranscoderServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TranscoderServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.TranscoderServiceGrpcTransport,)
def test_transcoder_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TranscoderServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_transcoder_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TranscoderServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_job",
"list_jobs",
"get_job",
"delete_job",
"create_job_template",
"list_job_templates",
"get_job_template",
"delete_job_template",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_transcoder_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_transcoder_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_transcoder_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TranscoderServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TranscoderServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_transcoder_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TranscoderServiceGrpcTransport, grpc_helpers),
(transports.TranscoderServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_transcoder_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"transcoder.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="transcoder.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_transcoder_service_host_no_port():
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="transcoder.googleapis.com"
),
)
assert client.transport._host == "transcoder.googleapis.com:443"
def test_transcoder_service_host_with_port():
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="transcoder.googleapis.com:8000"
),
)
assert client.transport._host == "transcoder.googleapis.com:8000"
def test_transcoder_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TranscoderServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_transcoder_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TranscoderServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_path():
project = "squid"
location = "clam"
job = "whelk"
expected = "projects/{project}/locations/{location}/jobs/{job}".format(
project=project, location=location, job=job,
)
actual = TranscoderServiceClient.job_path(project, location, job)
assert expected == actual
def test_parse_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"job": "nudibranch",
}
path = TranscoderServiceClient.job_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_job_path(path)
assert expected == actual
def test_job_template_path():
project = "cuttlefish"
location = "mussel"
job_template = "winkle"
expected = "projects/{project}/locations/{location}/jobTemplates/{job_template}".format(
project=project, location=location, job_template=job_template,
)
actual = TranscoderServiceClient.job_template_path(project, location, job_template)
assert expected == actual
def test_parse_job_template_path():
expected = {
"project": "nautilus",
"location": "scallop",
"job_template": "abalone",
}
path = TranscoderServiceClient.job_template_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_job_template_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TranscoderServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = TranscoderServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = TranscoderServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = TranscoderServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = TranscoderServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = TranscoderServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = TranscoderServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = TranscoderServiceClient.common_project_path(**expected)
# Check that | |
pageurl_tg, growth_percent, top_rank_tg, visit_date = datum_g6
top10g6.append({"pageUrl": pageurl_tg, "pageTitle": pagetitle_tg,
"pageViews": pageviews_tg, "rank": top_rank_tg, "percentage": growth_percent})
visitdate_6 = datum_g6[7]
for datum_p7 in data_tp7:
property_id, reg_domain, pageviews_tp, pagetitle_tp, pageurl_tp, trend_percent, top_rank_tp, visit_date = datum_p7
top10p7.append({"pageUrl": pageurl_tp, "pageTitle": pagetitle_tp,
"pageViews": pageviews_tp, "rank": top_rank_tp, "percentage": trend_percent})
for datum_g7 in data_tg7:
property_id, reg_domain, pageviews_tg, pagetitle_tg, pageurl_tg, growth_percent, top_rank_tg, visit_date = datum_g7
top10g7.append({"pageUrl": pageurl_tg, "pageTitle": pagetitle_tg,
"pageViews": pageviews_tg, "rank": top_rank_tg, "percentage": growth_percent})
visitdate_7 = datum_g7[7]
for datum_ in data_:
property_id, hostname, users, newUsers, returningUsers, pageviews, time_on_page, bounce_rate, sessions, aveSession, pagesPerSession, aveSessionDuration, visit_date, visit_weekday = datum_
if datum_[12] == visitdate_1:
output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession,
"pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": top10g1, "topTenPageViews": top10p1})
if datum_[12] == visitdate_2:
output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession,
"pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": top10g2, "topTenPageViews": top10p2})
if datum_[12] == visitdate_3:
output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession,
"pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": top10g3, "topTenPageViews": top10p3})
if datum_[12] == visitdate_4:
output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession,
"pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": top10g4, "topTenPageViews": top10p4})
if datum_[12] == visitdate_5:
output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession,
"pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": top10g5, "topTenPageViews": top10p5})
if datum_[12] == visitdate_6:
output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession,
"pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": top10g6, "topTenPageViews": top10p6})
if datum_[12] == visitdate_7:
output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession,
"pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": top10g7, "topTenPageViews": top10p7})
# else:
# output.append({"date": visit_date, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession, "pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers, "topTenGrowth": [], "topTenPageViews": []})
jdata = {
"output": output
}
logging.info(jdata)
return jdata, datum_[0]
# Hourly data output transform function
def prepare_outtype3(data_):
output = []
for datum in data_:
property_id, hostname, users, newUsers, returningUsers, pageviews, time_on_page, bounce_rate, sessions, aveSession, pagesPerSession, aveSessionDuration, visit_hour, visit_weekday = datum
output.append({"visit_hour": visit_hour, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page, "bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession, "pagesPerSession": pagesPerSession, "aveSessionDuration": aveSessionDuration, "newUsers": newUsers, "returningUsers": returningUsers}
)
jdata = {
"output": output
}
logging.info(jdata)
return jdata, datum[0]
# Weekly data output streaming
def update_output_type1(**context):
# Load output of respective agency's property into RDS
xcom_data_1 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_1_toppages')
xcom_data_2 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_1_topgrowth')
if xcom_data_1 or xcom_data_2:
data_out, uaid = prepare_outtype1(xcom_data_1, xcom_data_2)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE1)
# xcom_data_2 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type1_2')
# if xcom_data_2:
# data_out, uaid = prepare_outtype1(xcom_data_2)
# dobs_data_ops.update_output(data_out, uaid, dobs_constants.Analytics_TYPE1)
xcom_data_5 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_3_toppages')
xcom_data_6 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_3_topgrowth')
if xcom_data_5 or xcom_data_6:
data_out, uaid = prepare_outtype1(xcom_data_5, xcom_data_6)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE1)
xcom_data_7 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_4_toppages')
xcom_data_8 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_4_topgrowth')
if xcom_data_7 or xcom_data_8:
data_out, uaid = prepare_outtype1(xcom_data_7, xcom_data_8)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE1)
xcom_data_9 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_5_toppages')
xcom_data_10 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type1_5_topgrowth')
if xcom_data_9 or xcom_data_10:
data_out, uaid = prepare_outtype1(xcom_data_9, xcom_data_10)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE1)
# Daily data output streaming
def update_output_type2(**context):
# Load output of respective agency's property into RDS
xcom_data_1 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1')
xcom_data_2 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tpgs_day1')
xcom_data_3 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tgw_day1')
xcom_data_4 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tpgs_day2')
xcom_data_5 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tgw_day2')
xcom_data_6 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tpgs_day3')
xcom_data_7 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tgw_day3')
xcom_data_8 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tpgs_day4')
xcom_data_9 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tgw_day4')
xcom_data_10 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tpgs_day5')
xcom_data_11 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tgw_day5')
xcom_data_12 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tpgs_day6')
xcom_data_13 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tgw_day6')
xcom_data_14 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tpgs_day7')
xcom_data_15 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_1_tgw_day7')
if xcom_data_1 and (xcom_data_2 or xcom_data_3 or xcom_data_4 or xcom_data_5 or xcom_data_6 or xcom_data_7 or xcom_data_8 or xcom_data_9 or xcom_data_10 or xcom_data_11 or xcom_data_12 or xcom_data_13 or xcom_data_14 or xcom_data_15):
data_out, uaid = prepare_outtype2(xcom_data_1, xcom_data_2, xcom_data_3, xcom_data_4, xcom_data_5, xcom_data_6, xcom_data_7,
xcom_data_8, xcom_data_9, xcom_data_10, xcom_data_11, xcom_data_12, xcom_data_13, xcom_data_14, xcom_data_15)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE2)
# xcom_data_1 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2')
# xcom_data_2 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tpgs_day1')
# xcom_data_3 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tgw_day1')
# xcom_data_4 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tpgs_day2')
# xcom_data_5 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tgw_day2')
# xcom_data_6 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tpgs_day3')
# xcom_data_7 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tgw_day3')
# xcom_data_8 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tpgs_day4')
# xcom_data_9 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tgw_day4')
# xcom_data_10 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tpgs_day5')
# xcom_data_11 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tgw_day5')
# xcom_data_12 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tpgs_day6')
# xcom_data_13 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tgw_day6')
# xcom_data_14 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tpgs_day7')
# xcom_data_15 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type2_2_tgw_day7')
# if xcom_data_1 and (xcom_data_2 or xcom_data_3 or xcom_data_4 or xcom_data_5 or xcom_data_6 or xcom_data_7 or xcom_data_8 or xcom_data_9 or xcom_data_10 or xcom_data_11 or xcom_data_12 or xcom_data_13 or xcom_data_14 or xcom_data_15):
# data_out, uaid = prepare_outtype2(xcom_data_1, xcom_data_2, xcom_data_3, xcom_data_4, xcom_data_5, xcom_data_6, xcom_data_7, xcom_data_8, xcom_data_9, xcom_data_10, xcom_data_11, xcom_data_12, xcom_data_13, xcom_data_14, xcom_data_15)
# dobs_data_ops.update_output(data_out, uaid, dobs_constants.Analytics_TYPE2)
xcom_data_1 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3')
xcom_data_2 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tpgs_day1')
xcom_data_3 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tgw_day1')
xcom_data_4 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tpgs_day2')
xcom_data_5 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tgw_day2')
xcom_data_6 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tpgs_day3')
xcom_data_7 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tgw_day3')
xcom_data_8 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tpgs_day4')
xcom_data_9 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tgw_day4')
xcom_data_10 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tpgs_day5')
xcom_data_11 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tgw_day5')
xcom_data_12 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tpgs_day6')
xcom_data_13 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tgw_day6')
xcom_data_14 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tpgs_day7')
xcom_data_15 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_3_tgw_day7')
if xcom_data_1 and (xcom_data_2 or xcom_data_3 or xcom_data_4 or xcom_data_5 or xcom_data_6 or xcom_data_7 or xcom_data_8 or xcom_data_9 or xcom_data_10 or xcom_data_11 or xcom_data_12 or xcom_data_13 or xcom_data_14 or xcom_data_15):
data_out, uaid = prepare_outtype2(xcom_data_1, xcom_data_2, xcom_data_3, xcom_data_4, xcom_data_5, xcom_data_6, xcom_data_7,
xcom_data_8, xcom_data_9, xcom_data_10, xcom_data_11, xcom_data_12, xcom_data_13, xcom_data_14, xcom_data_15)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE2)
xcom_data_1 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4')
xcom_data_2 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tpgs_day1')
xcom_data_3 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tgw_day1')
xcom_data_4 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tpgs_day2')
xcom_data_5 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tgw_day2')
xcom_data_6 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tpgs_day3')
xcom_data_7 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tgw_day3')
xcom_data_8 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tpgs_day4')
xcom_data_9 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tgw_day4')
xcom_data_10 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tpgs_day5')
xcom_data_11 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tgw_day5')
xcom_data_12 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tpgs_day6')
xcom_data_13 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tgw_day6')
xcom_data_14 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tpgs_day7')
xcom_data_15 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_4_tgw_day7')
if xcom_data_1 and (xcom_data_2 or xcom_data_3 or xcom_data_4 or xcom_data_5 or xcom_data_6 or xcom_data_7 or xcom_data_8 or xcom_data_9 or xcom_data_10 or xcom_data_11 or xcom_data_12 or xcom_data_13 or xcom_data_14 or xcom_data_15):
data_out, uaid = prepare_outtype2(xcom_data_1, xcom_data_2, xcom_data_3, xcom_data_4, xcom_data_5, xcom_data_6, xcom_data_7,
xcom_data_8, xcom_data_9, xcom_data_10, xcom_data_11, xcom_data_12, xcom_data_13, xcom_data_14, xcom_data_15)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE2)
xcom_data_1 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5')
xcom_data_2 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tpgs_day1')
xcom_data_3 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tgw_day1')
xcom_data_4 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tpgs_day2')
xcom_data_5 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tgw_day2')
xcom_data_6 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tpgs_day3')
xcom_data_7 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tgw_day3')
xcom_data_8 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tpgs_day4')
xcom_data_9 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tgw_day4')
xcom_data_10 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tpgs_day5')
xcom_data_11 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tgw_day5')
xcom_data_12 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tpgs_day6')
xcom_data_13 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tgw_day6')
xcom_data_14 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tpgs_day7')
xcom_data_15 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type2_5_tgw_day7')
if xcom_data_1 and (xcom_data_2 or xcom_data_3 or xcom_data_4 or xcom_data_5 or xcom_data_6 or xcom_data_7 or xcom_data_8 or xcom_data_9 or xcom_data_10 or xcom_data_11 or xcom_data_12 or xcom_data_13 or xcom_data_14 or xcom_data_15):
data_out, uaid = prepare_outtype2(xcom_data_1, xcom_data_2, xcom_data_3, xcom_data_4, xcom_data_5, xcom_data_6, xcom_data_7,
xcom_data_8, xcom_data_9, xcom_data_10, xcom_data_11, xcom_data_12, xcom_data_13, xcom_data_14, xcom_data_15)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE2)
# Hourly data output streaming
def update_output_type3(**context):
# Load output of respective agency's property into RDS
xcom_data_1 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type3_1')
if xcom_data_1:
data_out, uaid = prepare_outtype3(xcom_data_1)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE3)
# xcom_data_2 = context['ti'].xcom_pull(task_ids='bigquery_fetch_type3_2')
# if xcom_data_2:
# data_out, uaid = prepare_outtype3(xcom_data_2)
# dobs_data_ops.update_output(data_out, uaid, dobs_constants.Analytics_TYPE3)
xcom_data_3 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type3_3')
if xcom_data_3:
data_out, uaid = prepare_outtype3(xcom_data_3)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE3)
xcom_data_4 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type3_4')
if xcom_data_4:
data_out, uaid = prepare_outtype3(xcom_data_4)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE3)
xcom_data_5 = context['ti'].xcom_pull(
task_ids='bigquery_fetch_type3_5')
if xcom_data_5:
data_out, uaid = prepare_outtype3(xcom_data_5)
dobs_data_ops.update_output(
data_out, uaid, dobs_constants.Analytics_TYPE3)
# DAGs - Write data to RDS
update_type1_to_rds = python_operator.PythonOperator(
task_id='update_type1_to_rds',
python_callable=update_output_type1,
provide_context=True,
op_kwargs=None,
dag=dag,
)
update_type2_to_rds = python_operator.PythonOperator(
task_id='update_type2_to_rds',
python_callable=update_output_type2,
provide_context=True,
op_kwargs=None,
dag=dag,
)
update_type3_to_rds = python_operator.PythonOperator(
task_id='update_type3_to_rds',
python_callable=update_output_type3,
provide_context=True,
op_kwargs=None,
dag=dag,
)
# bigquery_fetch_type1_1
bigquery_data_type1 >> [bigquery_fetch_type1_1_toppages,
bigquery_fetch_type1_1_topgrowth,
# bigquery_fetch_type1_2
# bigquery_fetch_type1_3
bigquery_fetch_type1_3_toppages,
bigquery_fetch_type1_3_topgrowth,
# bigquery_fetch_type1_4
bigquery_fetch_type1_4_toppages,
bigquery_fetch_type1_4_topgrowth,
# bigquery_fetch_type1_5
bigquery_fetch_type1_5_toppages,
bigquery_fetch_type1_5_topgrowth] >> update_type1_to_rds
bigquery_data_type2 >> [bigquery_fetch_type2_1,
bigquery_fetch_type2_1_tpgs_day1,
bigquery_fetch_type2_1_tpgs_day2,
bigquery_fetch_type2_1_tpgs_day3,
bigquery_fetch_type2_1_tpgs_day4,
bigquery_fetch_type2_1_tpgs_day5,
bigquery_fetch_type2_1_tpgs_day6,
bigquery_fetch_type2_1_tpgs_day7,
bigquery_fetch_type2_1_tgw_day1,
bigquery_fetch_type2_1_tgw_day2,
bigquery_fetch_type2_1_tgw_day3,
bigquery_fetch_type2_1_tgw_day4,
bigquery_fetch_type2_1_tgw_day5,
bigquery_fetch_type2_1_tgw_day6,
bigquery_fetch_type2_1_tgw_day7,
# bigquery_fetch_type2_2
bigquery_fetch_type2_3,
bigquery_fetch_type2_3_tpgs_day1,
bigquery_fetch_type2_3_tpgs_day2,
bigquery_fetch_type2_3_tpgs_day3,
bigquery_fetch_type2_3_tpgs_day4,
bigquery_fetch_type2_3_tpgs_day5,
bigquery_fetch_type2_3_tpgs_day6,
bigquery_fetch_type2_3_tpgs_day7,
bigquery_fetch_type2_3_tgw_day1,
bigquery_fetch_type2_3_tgw_day2,
bigquery_fetch_type2_3_tgw_day3,
bigquery_fetch_type2_3_tgw_day4,
bigquery_fetch_type2_3_tgw_day5,
bigquery_fetch_type2_3_tgw_day6,
bigquery_fetch_type2_3_tgw_day7,
bigquery_fetch_type2_4,
bigquery_fetch_type2_4_tpgs_day1,
bigquery_fetch_type2_4_tpgs_day2,
bigquery_fetch_type2_4_tpgs_day3,
bigquery_fetch_type2_4_tpgs_day4,
bigquery_fetch_type2_4_tpgs_day5,
bigquery_fetch_type2_4_tpgs_day6,
bigquery_fetch_type2_4_tpgs_day7,
bigquery_fetch_type2_4_tgw_day1,
bigquery_fetch_type2_4_tgw_day2,
bigquery_fetch_type2_4_tgw_day3,
bigquery_fetch_type2_4_tgw_day4,
| |
<reponame>bvoleti-p/svlearn
"""Utils for ML
"""
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from scipy import interp
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import mutual_info_regression
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_classif
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.cluster import KMeans
from sklearn.svm import LinearSVR
from sklearn.svm import SVR
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from IPython.display import display, Markdown, HTML
import category_encoders
from sklearn import svm
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.covariance import EllipticEnvelope
from itertools import cycle
__HTML = 'HTML'
__DISPLAY = 'DISPLAY'
__line_to_print = "________________________________________"
def print_func(value_to_print, mode=None):
"""Display or Print an object or string
Args:
value_to_print (Union[str, object]): Value to print
mode (optional[str]): Defaults to None.
Accepts either `DISPLAY` or `HTML`
"""
if(mode == __DISPLAY):
display(value_to_print)
elif(mode == __HTML):
display(HTML(value_to_print))
else:
print(value_to_print)
def print_separator():
"""Prints a separator line using 80 underscores
"""
print_func(__line_to_print + __line_to_print)
def print_new_line():
"""Prints a new line
"""
print_func("")
def get_dataframe_from_array(data_array, columns):
"""Convert ndarray to pd.DataFrame for the given list of columns
Args:
data_array (ndarray): Array to convert to pd.DataFrame
columns (Union[array-like]): Column Names for the pd.DataFrame
Returns:
pd.DataFrame
"""
df = pd.DataFrame(data_array, columns=columns)
print_func("-> Loaded dataframe of shape " + str(df.shape))
print_separator()
return df
def about_dataframe(df):
"""Describe DataFrame and show it's information
Args:
df (DataFrame): Pandas DataFrame to describe and info
"""
print_func("-> Data Describe\n")
print_func(df.describe(include = "all").transpose(), mode=__DISPLAY)
print_separator()
print_func("-> Data Info\n")
print_func(df.info(), mode=__DISPLAY)
print_separator()
null_values_info(df)
def null_values_info(df):
"""Show null value information of a DataFrame
Args:
df (DataFrame): Pandas DataFrame for which null values should be displayed
"""
df = df.copy()
kount_of_null_values = df.isnull().sum().sum()
if(kount_of_null_values > 0):
print_func("-> Null Values Info")
df_null_values_sum = df.isnull().sum()
html_to_display = "<table><tbody><tr>"
num_of_columns_with_null = 0
for idx, each_feature in enumerate(sorted(df_null_values_sum.keys())):
if(df_null_values_sum.loc[each_feature] > 0):
html_to_display = (html_to_display + "<td>" + each_feature +
"</td>" + "<td>" +
str(df_null_values_sum.loc[each_feature]) +
"</td>")
num_of_columns_with_null = num_of_columns_with_null + 1
if(num_of_columns_with_null%4 == 0):
html_to_display = html_to_display + "</tr><tr>"
html_to_display = html_to_display + "</tr></tbody></table>"
print_func(html_to_display, mode=__HTML)
else:
print_func("-> No Null Values")
print_separator()
def fill_null_values(df, column, value, row_index):
"""Fill null values in a dataframe column
Args:
df (DataFrame): Pandas DataFrame that will be updated
column (str): Column in the target dataframe that will be updated
value: (Union[int, str, object]): New value that will replace
null values
row_index (Union[Index, array-like]): Index of rows to be updated
"""
num_of_rows = row_index.shape[0]
df.iloc[row_index, df.columns.get_loc(column)] = value
print_func("{0:d} rows updated for '".format(num_of_rows) + column
+ "' with '" + str(value) + "'")
def columns_info(df, cat_count_threshold, show_group_counts=False):
"""Prints and returns column info for a given dataframe
Args:
df (DataFrame): Pandas DataFrame
cat_count_threshold (int): If a column in the dataframe
has unique value count less than this threshold then it
will be tagged as 'categorical'
show_group_counts (boolean): If True then prints the individual
group counts for each column
Example:
>>> object_cat_cols,
>>> numeric_cat_cols,
>>> numeric_cols =
>>> utils.columns_info(data,
>>> cat_count_threshold=5,
>>> show_group_counts = True)
"""
if(cat_count_threshold is None):
cat_count_threshold = 10
all_columns = df.columns
numeric_cat_columns = sorted(df._get_numeric_data().columns)
# https://stackoverflow.com/questions/29803093/
# check-which-columns-in-dataframe-are-categorical
object_cat_columns = sorted(list(set(all_columns)
- set(numeric_cat_columns)))
print_func("-> Columns will be tagged as categorical if number " +
" of categories are less than or equal to " +
str(cat_count_threshold))
print_separator()
kount = 0
selected_object_cat_columns = []
object_columns_not_identified_as_category = []
to_print = "-> Count of 'object' type categorical columns {0:d}\n"
to_print_detail = ""
for object_column in object_cat_columns:
if(df[object_column].unique().shape[0] <= cat_count_threshold):
if(show_group_counts):
to_print_detail = ( to_print_detail +
str(df.groupby(object_column)[object_column].count()) )
to_print_detail = ( to_print_detail +
"\n" +
"\n________________________________________\n\n" )
kount += 1
selected_object_cat_columns.append(object_column)
else:
object_columns_not_identified_as_category.append(object_column)
if(kount > 0):
print_func(to_print.format(kount))
print_func(selected_object_cat_columns)
print_new_line()
if(to_print_detail != ""):
print_func(to_print_detail)
print_separator()
if(len(object_columns_not_identified_as_category) > 0):
print_func("-> Count of 'object' type non categorical columns: " +
str(len(object_columns_not_identified_as_category)) +
"\n")
print_func(object_columns_not_identified_as_category)
print_new_line()
print_separator()
kount = 0
selected_numeric_cat_columns = []
numeric_columns = []
to_print = "-> Count of 'numeric' type categorical columns {0:d}\n"
to_print_detail = ""
for numeric_column in numeric_cat_columns:
if(df[numeric_column].unique().shape[0] <= cat_count_threshold):
if(show_group_counts):
to_print_detail = (to_print_detail +
str(df.groupby(numeric_column)[numeric_column].count()) )
to_print_detail = ( to_print_detail + "\n" +
"\n" + __line_to_print + "\n\n" )
kount += 1
selected_numeric_cat_columns.append(numeric_column)
else:
numeric_columns.append(numeric_column)
if(kount > 0):
print_func(to_print.format(kount))
print_func(selected_numeric_cat_columns)
print_new_line()
if(to_print_detail != ""):
print_func(to_print_detail)
print_separator()
if(len(numeric_columns) > 0):
print_func("Count of 'numeric' type columns: {0:d}\n".
format(len(numeric_columns)))
print_func(numeric_columns)
print_new_line()
print_separator()
return (selected_object_cat_columns,
selected_numeric_cat_columns,
numeric_columns)
def get_X_and_y(df, y_column):
"""Splits pd.dataframe into X (predictors) and y (response)
Args:
df (DataFrame): Pandas DataFrame
y_column (str): The response column name
Returns:
X (DataFrame): All columns except the response will be in X
y (Series): Only the response column from dataframe
"""
X = df[[i for i in list(df.columns) if i != y_column]]
y = df[y_column]
print_func("-> X set to " + ', '.join(
df.columns[~df.columns.isin( [y_column] ) ] ))
print_func("-> y set to " + y_column)
print_separator()
return X, y
def __get_plot_attrs(**kwargs):
if 'hue_column' not in kwargs:
kwargs['hue_column'] = None
if 'split_plots_by' not in kwargs:
kwargs['split_plots_by'] = None
if 'height' not in kwargs:
kwargs['height'] = 4
if 'aspect' not in kwargs:
kwargs['aspect'] = 1
if 'kde' not in kwargs:
kwargs['kde']=True
return ( kwargs['hue_column'],
kwargs['split_plots_by'],
kwargs['height'],
kwargs['aspect'],
kwargs['kde'])
def count_plots(df, columns, **kwargs):
"""Count Plots using seaborn
Display Count plots for the given columns in a DataFrame
Args:
df (DataFrame): Pandas DataFrame
columns (array-like): Columns for which count plot has to be shown
kwargs (array[str]): Keyword Args
KeywordArgs:
hue_column (str): Color
split_plots_by (str): Split seaborn facetgrid by column such as Gender
height (float): Sets the height of plot
aspect (float): Determines the width of the plot based on height
Example:
>>> utils.count_plots(data, object_cat_cols, height=4, aspect=1.5)
"""
(hue_column,
split_plots_by,
height,
aspect,
kde) = __get_plot_attrs(**kwargs)
columns = pd.Series(columns)
i = 0
plt.ion()
columns = columns[~columns.isin([hue_column, split_plots_by])]
for each_col in columns:
order=df.groupby(each_col)
print_func("Count Plot for: " + str(each_col))
g = sns.catplot( x=each_col, hue=hue_column,
col=split_plots_by, kind="count",
data=df, order=order.indices.keys(),
height=height, aspect=aspect )
g.set_xticklabels(rotation=40)
plt.show(block=False)
print_new_line()
# display(HTML("<input type='checkbox' id='" + each_col +
# "' value='" + each_col + "'>" + each_col + "<br />"))
i = i + 1
def count_compare_plots(df1, df1_title, df2, df2_title, column, **kwargs):
"""Show Count Plots of two DataFrames for comparision
Can be used to compare how Fill NA affects the distribution of a column
Args:
Example:
The below example uses nhanes dataset.
>>> for each_column in object_cat_columns:
>>> data[each_column] = data[each_column].fillna(
>>> data.groupby(['Gender'])[each_column].ffill())
>>> for each_column in object_cat_columns:
>>> str_count_of_nas = str(len(
>>> data_raw.index[data_raw.isnull()[each_column]]))
>>> str_count_of_nas = ' (Count of NAs:' + str_count_of_nas + ')'
>>> utils.count_compare_plots(df1=data_raw,
>>> df1_title='Before Fill-NA' + str_count_of_nas,
>>> df2=data,
>>> df2_title='After Fill-NA',
>>> column=each_column,
>>> height=4,
>>> aspect=1.5,
>>> hue_column='Diabetes',
>>> split_plots_by='Gender')
"""
(hue_column,
split_plots_by,
height,
aspect,
kde) = __get_plot_attrs(**kwargs)
print_func("Count Plot for: " + str(column))
f, axes = plt.subplots(2)
g = sns.catplot( x=column, hue=hue_column, col=split_plots_by,
kind="count", data=df1, height=height, aspect=aspect )
g.set_xticklabels(rotation=40)
g.fig.suptitle(df1_title, fontsize=16)
###
g = sns.catplot( x=column, hue=hue_column, col=split_plots_by,
kind="count", data=df2, height=height, aspect=aspect)
g.set_xticklabels(rotation=40)
g.fig.suptitle(df2_title, fontsize=16)
####
plt.close(1)
plt.show()
print_new_line()
def dist_plots(df, columns, **kwargs):
"""Dist Plots using seaborn
Args:
df (DataFrame): Pandas DataFrame.
columns ([str]): Plot only for selected columns.
**kwargs: Keyword arguments.
Keyword Args:
hue_column (str): Color
split_plots_by (str): Split seaborn facetgrid by column such as Gender
height (float): Sets the height of plot
aspect (float): Determines the width of the plot based on height
Example:
>>> utils.dist_plots(data, numeric_cols, height=4, aspect=1.5,
>>> hue_column='class', kde=False)
Returns: Nothing
"""
kwargs['kde']=False
kde_plots(df, columns, **kwargs)
def kde_plots(df, columns, **kwargs):
"""KDE Plots using seaborn
Args:
df (DataFrame): DataFrame
columns ([str]): Plot only for selected columns.
**kwargs: Keyword arguments.
Keyword Args:
hue_column: for color coding
split_plots_by: split seaborn FacetGrid by column, example: Gender
height: sets the height of plot
aspect: determines the widht of the plot based on height
Example:
>>> utils.kde_plots(data, numeric_cols, height=4, aspect=1.5,
>>> hue_column='class')
"""
(hue_column,
split_plots_by,
height,
aspect,
kde) = __get_plot_attrs(**kwargs)
columns = pd.Series(columns)
for each_col in columns:
if(kde):
print_func("KDE Plot for: " + str(each_col))
else:
print_func("Histogram for: " + str(each_col))
if(split_plots_by is None):
if(hue_column is None):
g = sns.FacetGrid(df[[each_col]],
height=height,
aspect=aspect)
else:
g = sns.FacetGrid(df[[each_col, hue_column]],
hue=hue_column,
height=height,
aspect=aspect)
else:
if(hue_column is None):
g = sns.FacetGrid(df[[each_col, split_plots_by]],
col=split_plots_by,
height=height,
aspect=aspect)
else:
g = sns.FacetGrid(df[[each_col, hue_column, split_plots_by]],
hue=hue_column,
col=split_plots_by,
height=height,
aspect=aspect)
g = (g.map(sns.distplot, each_col, hist=True, kde=kde))
g.add_legend()
plt.show()
print_new_line()
def kde_compare_plots(df1, df1_title, df2, df2_title, column, **kwargs):
"""Summary line.
Extended description of function.
Args:
"""
(hue_column,
split_plots_by,
height,
aspect,
kde) = __get_plot_attrs(**kwargs)
print_func("Count Plot for: " + str(column))
f, axes = plt.subplots(2)
if(split_plots_by is None):
g = sns.FacetGrid(df1[[column, hue_column]], hue=hue_column,
col=split_plots_by, height=height,
aspect=aspect)
else:
g = sns.FacetGrid(df1[[column, hue_column, split_plots_by]],
hue=hue_column, col=split_plots_by, height=height,
aspect=aspect)
g = (g.map(sns.distplot, column, hist=True))
g.add_legend()
g.fig.suptitle(df1_title, fontsize=16)
####
if(split_plots_by is None):
g = sns.FacetGrid(df2[[column, hue_column]], hue=hue_column,
col=split_plots_by, height=height,
aspect=aspect)
else:
g = sns.FacetGrid(df2[[column, hue_column, split_plots_by]],
hue=hue_column, col=split_plots_by, height=height,
aspect=aspect)
g = (g.map(sns.distplot, column, hist=True))
g.add_legend()
g.fig.suptitle(df2_title, fontsize=16)
plt.close(1)
plt.show()
print_new_line()
def encode_columns(df, method, columns = []):
"""Summary line.
Extended description of function.
Args:
"""
kount = 0
df = df.copy()
for columnName in columns:
if(method == 'labelencoder'):
label_encoder = LabelEncoder()
df[columnName] = label_encoder.fit_transform(
df[columnName].astype(str))
print_func("-> Transformed [" + columnName +
"] using sklearn.LabelEncoder")
print_func("--> Classes: " + str(label_encoder.classes_))
elif(method == 'binary'):
label_binarizer = LabelBinarizer()
lb_results = label_binarizer.fit_transform(df[columnName])
print_func("-> Transformed [" + columnName +
"] using sklearn.LabelBinarizer")
if(label_binarizer.y_type_ == 'multiclass'):
print_func("--> Type of target data is: " +
label_binarizer.y_type_)
temp_df = pd.DataFrame(lb_results,
columns = label_binarizer.classes_,
index = df.index)
df = df.join(temp_df)
print_func("--> Added following columns to dataframe: " +
str(label_binarizer.classes_))
elif(method == 'onehot'):
one_hot_encoder = OneHotEncoder(sparse=False)
ohe_results = one_hot_encoder.fit_transform(df[[columnName]])
print_func("-> Transformed [" + columnName +
"] using sklearn.OneHotEncoder")
temp_df = pd.DataFrame(ohe_results,
columns = one_hot_encoder.get_feature_names())
df = pd.concat([df,temp_df],axis=1)
print_func("--> Added following columns to returned dataframe: | |
<reponame>fobrice/ansible-unity
#!/usr/bin/python
# Copyright: (c) 2021, DellEMC
"""Ansible module for managing User Quota on Unity"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: dellemc_unity_user_quota
short_description: Manage user quota on the Unity storage system
description:
- Managing User Quota on the Unity storage system includes
Create user quota,
Get user quota,
Modify user quota,
Delete user quota,
Create user quota for quota tree,
Modify user quota for quota tree and
Delete user quota for quota tree.
version_added: "1.2.0"
extends_documentation_fragment:
- dellemc.unity.dellemc_unity.unity
author:
- <NAME> (@panigs7) <<EMAIL>>
options:
filesystem_name:
description:
- The name of the filesystem for which the user quota is created.
- For creation of a user quota either filesystem_name or
filesystem_id is required.
type: str
filesystem_id:
description:
- The ID of the filesystem for which the user quota is created.
- For creation of a user quota either filesystem_id or
filesystem_name is required.
type: str
nas_server_name:
description:
- The name of the NAS server in which the filesystem is created.
- For creation of a user quota either nas_server_name or
nas_server_id is required.
type: str
nas_server_id:
description:
- The ID of the NAS server in which the filesystem is created.
- For creation of a user quota either filesystem_id or
filesystem_name is required.
type: str
hard_limit:
description:
- Hard limitation for a user on the total space available. If exceeded, user cannot write data.
- Value 0 implies no limit.
- One of the values of soft_limit and hard_limit can be 0, however, both cannot be 0
during creation or modification of user quota.
type: int
soft_limit:
description:
- Soft limitation for a user on the total space available. If exceeded,
notification will be sent to the user for the grace period mentioned, beyond
which the user cannot use space.
- Value 0 implies no limit.
- Both soft_limit and hard_limit cannot be 0 during creation or modification
of user quota.
type: int
cap_unit:
description:
- Unit of soft_limit and hard_limit size.
- It defaults to 'GB' if not specified.
choices: ['MB', 'GB', 'TB']
type: str
user_type:
description:
- Type of user creating a user quota.
- Mandatory while creating or modifying user quota.
choices: ['Unix', 'Windows']
type: str
win_domain:
description:
- Fully qualified or short domain name for Windows user type.
- Mandatory when user_type is 'Windows'.
type: str
user_name:
description:
- User name of the user quota when user_type is 'Windows' or 'Unix'.
- user_name must be specified along with win_domain when user_type is 'Windows'.
type: str
uid:
description:
- User ID of the user quota.
type: str
user_quota_id:
description:
- User quota ID generated after creation of a user quota.
type: str
tree_quota_id:
description:
- The ID of the quota tree.
- Either tree_quota_id or path to quota tree is required to
create/modify/delete user quota for a quota tree.
type: str
path:
description:
- The path to the quota tree.
- Either tree_quota_id or path to quota tree is required to
create/modify/delete user quota for a quota tree.
- Path must start with a forward slash '/'.
type: str
state:
description:
- The state option is used to mention the existence of the user quota.
type: str
required: True
choices: ['absent', 'present']
'''
EXAMPLES = r'''
- name: Get user quota details by user quota id
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
user_quota_id: "userquota_171798700679_0_123"
state: "present"
- name: Get user quota details by user quota uid/user name
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_name: "fs_2171"
nas_server_id: "nas_21"
user_name: "test"
state: "present"
- name: Create user quota for a filesystem with filesystem id
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_id: "fs_2171"
hard_limit: 6
cap_unit: "TB"
soft_limit: 5
user_type: "UID"
uid: "111"
state: "present"
- name: Create user quota for a filesystem with filesystem name
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_name: "Test_filesystem"
nas_server_name: "lgl<PASSWORD>"
hard_limit: 6
cap_unit: "TB"
soft_limit: 5
user_type: "UID"
uid: "111"
state: "present"
- name: Modify user quota limit usage by user quota id
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
user_quota_id: "userquota_171798700679_0_123"
hard_limit: 10
cap_unit: "TB"
soft_limit: 8
state: "present"
- name: Modify user quota by filesystem id and user quota uid/user_name
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_id: "fs_2171"
user_type: "Windows"
win_domain: "prod"
user_name: "sample"
hard_limit: 12
cap_unit: "TB"
soft_limit: 10
state: "present"
- name: Delete user quota
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_id: "fs_2171"
win_domain: "prod"
user_name: "sample"
state: "absent"
- name: Create user quota of a quota tree
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
tree_quota_id: "treequota_171798700679_4"
user_type: "Windows"
win_domain: "prod"
user_name: "sample"
soft_limit: 9
cap_unit: "TB"
state: "present"
- name: Create user quota of a quota tree by quota tree path
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_id: "fs_2171"
path: "/sample"
user_type: "Unix"
user_name: "test"
hard_limit: 2
cap_unit: "TB"
state: "present"
- name: Modify user quota of a quota tree
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
tree_quota_id: "treequota_171798700679_4"
user_type: "Windows"
win_domain: "prod"
user_name: "sample"
soft_limit: 10
cap_unit: "TB"
state: "present"
- name: Modify user quota of a quota tree by quota tree path
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_id: "fs_2171"
path: "/sample"
user_type: "Windows"
win_domain: "prod"
user_name: "sample"
hard_limit: 12
cap_unit: "TB"
state: "present"
- name: Delete user quota of a quota tree by quota tree path
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
filesystem_id: "fs_2171"
path: "/sample"
win_domain: "prod"
user_name: "sample"
state: "absent"
- name: Delete user quota of a quota tree by quota tree id
dellemc_unity_user_quota:
unispherehost: "{{unispherehost}}"
username: "{{username}}"
password: "{{password}}"
verifycert: "{{verifycert}}"
tree_quota_id: "treequota_171798700679_4"
win_domain: "prod"
user_name: "sample"
state: "absent"
'''
RETURN = r'''
changed:
description: Whether or not the resource has changed
returned: always
type: bool
get_user_quota_details:
description: Details of the user quota.
returned: When user quota exists
type: complex
contains:
filesystem:
description: Filesystem details for which the user quota is
created.
type: complex
contains:
UnityFileSystem:
description: Filesystem details for which the
user quota is created.
type: complex
contains:
id:
description: ID of the filesystem for
which the user quota is created.
type: str
name:
description: Name of filesystem.
type: str
nas_server:
description: Nasserver details where
filesystem is created.
type: complex
contains:
name:
description: Name of nasserver.
type: str
id:
description: ID of nasserver.
type: str
tree_quota:
description: Quota tree details for which the user quota is
created.
type: complex
contains:
UnityTreeQuota:
description: Quota tree details for which the user
quota is created.
type: complex
contains:
id:
description: ID of the quota tree.
type: str
path:
description: Path to quota tree
type: str
gp_left:
description: The grace period left after the soft limit
for the user quota is exceeded.
type: int
hard_limit:
description: Hard limitation for a user on the total space
available. If exceeded, user cannot write data.
type: int
hard_ratio:
description: The hard ratio is the ratio between the
hard limit size of the user quota
and the amount of storage actually consumed.
type: str
soft_limit:
description: Soft limitation for a user on the total space
available. If exceeded, notification will be
sent to user for the grace period mentioned, beyond
which user cannot use space.
type: int
soft_ratio:
description: The soft ratio is the ratio between
the soft limit size of the user quota
and the amount of storage actually consumed.
type: str
id:
description: User quota ID.
type: str
size_used:
description: Size of used space in the filesystem
by the user files.
type: int
state:
description: State of the user quota.
type: int
uid:
description: User ID of the user.
type: int
unix_name:
description: Unix user name for this user quota's uid.
type: str
windows_names:
description: Windows user name that maps to this quota's uid.
type: str
windows_sids:
description: Windows SIDs that maps | |
A more complicated example of a characteristic 2 field::
sage: E = EllipticCurve(GF(2^4,'alpha'), [0,0,1,0,1])
sage: P = E((1,1))
sage: phi_v = EllipticCurveIsogeny(E, P); phi_v
Isogeny of degree 3 from Elliptic Curve defined by y^2 + y = x^3 + 1 over Finite Field in alpha of size 2^4 to Elliptic Curve defined by y^2 + y = x^3 over Finite Field in alpha of size 2^4
sage: phi_ker_poly = phi_v.kernel_polynomial()
sage: phi_ker_poly
x + 1
sage: ker_poly_list = phi_ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(E, ker_poly_list)
sage: phi_k == phi_v
True
sage: phi_k.rational_maps()
((x^3 + x + 1)/(x^2 + 1), (x^3*y + x^2*y + x*y + x + y)/(x^3 + x^2 + x + 1))
sage: phi_v.rational_maps()
((x^3 + x + 1)/(x^2 + 1), (x^3*y + x^2*y + x*y + x + y)/(x^3 + x^2 + x + 1))
sage: phi_k.degree() == phi_v.degree() == 3
True
sage: phi_k.is_separable()
True
sage: phi_v(E(0))
(0 : 1 : 0)
sage: alpha = E.base_field().gen()
sage: Q = E((0, alpha*(alpha + 1)))
sage: phi_v(Q)
(1 : alpha^2 + alpha : 1)
sage: phi_v(P) == phi_k(P)
True
sage: phi_k(P) == phi_v.codomain()(0)
True
We can create an isogeny that has kernel equal to the full 2
torsion::
sage: E = EllipticCurve(GF(3), [0,0,0,1,1])
sage: ker_list = E.division_polynomial(2).list()
sage: phi = EllipticCurveIsogeny(E, ker_list); phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 + x + 1 over Finite Field of size 3 to Elliptic Curve defined by y^2 = x^3 + x + 1 over Finite Field of size 3
sage: phi(E(0))
(0 : 1 : 0)
sage: phi(E((0,1)))
(1 : 0 : 1)
sage: phi(E((0,2)))
(1 : 0 : 1)
sage: phi(E((1,0)))
(0 : 1 : 0)
sage: phi.degree()
4
We can also create trivial isogenies with the trivial kernel::
sage: E = EllipticCurve(GF(17), [11, 11, 4, 12, 10])
sage: phi_v = EllipticCurveIsogeny(E, E(0))
sage: phi_v.degree()
1
sage: phi_v.rational_maps()
(x, y)
sage: E == phi_v.codomain()
True
sage: P = E.random_point()
sage: phi_v(P) == P
True
sage: E = EllipticCurve(GF(31), [23, 1, 22, 7, 18])
sage: phi_k = EllipticCurveIsogeny(E, [1]); phi_k
Isogeny of degree 1 from Elliptic Curve defined by y^2 + 23*x*y + 22*y = x^3 + x^2 + 7*x + 18 over Finite Field of size 31 to Elliptic Curve defined by y^2 + 23*x*y + 22*y = x^3 + x^2 + 7*x + 18 over Finite Field of size 31
sage: phi_k.degree()
1
sage: phi_k.rational_maps()
(x, y)
sage: phi_k.codomain() == E
True
sage: phi_k.kernel_polynomial()
1
sage: P = E.random_point(); P == phi_k(P)
True
Velu and Kohel also work in characteristic 0::
sage: E = EllipticCurve(QQ, [0,0,0,3,4])
sage: P_list = E.torsion_points()
sage: phi = EllipticCurveIsogeny(E, P_list); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 3*x + 4 over Rational Field to Elliptic Curve defined by y^2 = x^3 - 27*x + 46 over Rational Field
sage: P = E((0,2))
sage: phi(P)
(6 : -10 : 1)
sage: phi_ker_poly = phi.kernel_polynomial()
sage: phi_ker_poly
x + 1
sage: ker_poly_list = phi_ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(E, ker_poly_list); phi_k
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 3*x + 4 over Rational Field to Elliptic Curve defined by y^2 = x^3 - 27*x + 46 over Rational Field
sage: phi_k(P) == phi(P)
True
sage: phi_k == phi
True
sage: phi_k.degree()
2
sage: phi_k.is_separable()
True
A more complicated example over the rationals (of odd degree)::
sage: E = EllipticCurve('11a1')
sage: P_list = E.torsion_points()
sage: phi_v = EllipticCurveIsogeny(E, P_list); phi_v
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: P = E((16,-61))
sage: phi_v(P)
(0 : 1 : 0)
sage: ker_poly = phi_v.kernel_polynomial(); ker_poly
x^2 - 21*x + 80
sage: ker_poly_list = ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(E, ker_poly_list); phi_k
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: phi_k == phi_v
True
sage: phi_v(P) == phi_k(P)
True
sage: phi_k.is_separable()
True
We can also do this same example over the number field defined by
the irreducible two torsion polynomial of `E`::
sage: E = EllipticCurve('11a1')
sage: P_list = E.torsion_points()
sage: K.<alpha> = NumberField(x^3 - 2* x^2 - 40*x - 158)
sage: EK = E.change_ring(K)
sage: P_list = [EK(P) for P in P_list]
sage: phi_v = EllipticCurveIsogeny(EK, P_list); phi_v
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158 to Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-7820)*x + (-263580) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158
sage: P = EK((alpha/2,-1/2))
sage: phi_v(P)
(122/121*alpha^2 + 1633/242*alpha - 3920/121 : -1/2 : 1)
sage: ker_poly = phi_v.kernel_polynomial()
sage: ker_poly
x^2 - 21*x + 80
sage: ker_poly_list = ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(EK, ker_poly_list)
sage: phi_k
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158 to Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-7820)*x + (-263580) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158
sage: phi_v == phi_k
True
sage: phi_k(P) == phi_v(P)
True
sage: phi_k == phi_v
True
sage: phi_k.degree()
5
sage: phi_v.is_separable()
True
The following example shows how to specify an isogeny from domain
and codomain::
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 - 21*x + 80
sage: phi = E.isogeny(f)
sage: E2 = phi.codomain()
sage: phi_s = EllipticCurveIsogeny(E, None, E2, 5)
sage: phi_s
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: phi_s == phi
True
sage: phi_s.rational_maps() == phi.rational_maps()
True
However only cyclic normalized isogenies can be constructed this
way. So it won't find the isogeny [3]::
sage: E.isogeny(None, codomain=E,degree=9)
Traceback (most recent call last):
...
ValueError: The two curves are not linked by a cyclic normalized isogeny of degree 9
Also the presumed isogeny between the domain and codomain must be
normalized::
sage: E2.isogeny(None,codomain=E,degree=5)
Traceback (most recent call last):
...
ValueError: The two curves are not linked by a cyclic normalized isogeny of degree 5
sage: phihat = phi.dual(); phihat
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field
sage: phihat.is_normalized()
False
Here an example of a construction of a endomorphisms with cyclic
kernel on a CM-curve::
sage: K.<i> = NumberField(x^2+1)
sage: E = EllipticCurve(K, [1,0])
sage: RK.<X> = K[]
sage: f = X^2 - 2/5*i + 1/5
sage: phi= E.isogeny(f)
sage: isom = phi.codomain().isomorphism_to(E)
sage: phi = isom * phi
sage: phi.codomain() == phi.domain()
True
sage: phi.rational_maps()
(((4/25*i + 3/25)*x^5 + (4/5*i - 2/5)*x^3 - x)/(x^4 + (-4/5*i + 2/5)*x^2 + (-4/25*i - 3/25)), ((11/125*i + 2/125)*x^6*y + (-23/125*i + 64/125)*x^4*y + (141/125*i + 162/125)*x^2*y + (3/25*i - 4/25)*y)/(x^6 + (-6/5*i + 3/5)*x^4 + (-12/25*i - 9/25)*x^2 + (2/125*i - 11/125)))
Domain | |
# TODO: test 2d+ dims and coos
# TODO: simplify with compress coords?
# TODO: allow -1 in dims to auto place *without* ind? one or other
# Make sure `ops` islist
if isinstance(ops, (np.ndarray, sp.spmatrix)):
ops = (ops,)
dtype = common_type(*ops)
# Make sure dimensions and coordinates have been flattenened.
if np.ndim(dims) > 1:
dims, inds = dim_map(dims, inds)
# Make sure `inds` is list
elif np.ndim(inds) == 0:
inds = (inds,)
# Infer sparsity from list of ops
if sparse is None:
sparse = any(issparse(op) for op in ops)
# Create a sorted list of operators with their matching index
inds, ops = zip(*sorted(zip(inds, itertools.cycle(ops))))
inds, ops = set(inds), iter(ops)
# can't slice "coo" format so use "csr" if ownership specified
eye_kws = {'sparse': sparse,
'stype': "csr" if ownership else "coo",
'dtype': dtype}
def gen_ops():
cff_id = 1 # keeps track of compressing adjacent identities
cff_ov = 1 # keeps track of overlaying op on multiple dimensions
for ind, dim in enumerate(dims):
# check if op should be placed here
if ind in inds:
# check if need preceding identities
if cff_id > 1:
yield eye(cff_id, **eye_kws)
cff_id = 1 # reset cumulative identity size
# check if first subsystem in placement block
if cff_ov == 1:
op = next(ops)
sz_op = op.shape[0]
# final dim (of block or total) -> place op
if cff_ov * dim == sz_op or dim == -1:
yield op
cff_ov = 1
# accumulate sub-dims
else:
cff_ov *= dim
# check if midway through placing operator over several subsystems
elif cff_ov > 1:
cff_ov *= dim
# else accumulate adjacent identites
else:
cff_id *= dim
# check if trailing identity needed
if cff_id > 1:
yield eye(cff_id, **eye_kws)
return kron(*gen_ops(), stype=stype, coo_build=coo_build,
parallel=parallel, ownership=ownership)
@ensure_qarray
def _permute_dense(p, dims, perm):
"""Permute the subsytems of a dense array.
"""
p, perm = np.asarray(p), np.asarray(perm)
d = prod(dims)
if isop(p):
return (p.reshape([*dims, *dims])
.transpose([*perm, *(perm + len(dims))])
.reshape([d, d]))
return (p.reshape(dims)
.transpose(perm)
.reshape([d, 1]))
def _permute_sparse(a, dims, perm):
"""Permute the subsytems of a sparse matrix.
"""
perm, dims = np.asarray(perm), np.asarray(dims)
# New dimensions & stride (i.e. product of preceding dimensions)
new_dims = dims[perm]
odim_stride = np.multiply.accumulate(dims[::-1])[::-1] // dims
ndim_stride = np.multiply.accumulate(new_dims[::-1])[::-1] // new_dims
# Range of possible coordinates for each subsys
coos = (tuple(range(dim)) for dim in dims)
# Complete basis using coordinates for current and new dimensions
basis = np.asarray(tuple(itertools.product(*coos, repeat=1)))
oinds = np.sum(odim_stride * basis, axis=1)
ninds = np.sum(ndim_stride * basis[:, perm], axis=1)
# Construct permutation matrix and apply it to state
perm_mat = sp.coo_matrix((np.ones(a.shape[0]), (ninds, oinds))).tocsr()
if isop(a):
return dot(dot(perm_mat, a), dag(perm_mat))
return dot(perm_mat, a)
def permute(p, dims, perm):
"""Permute the subsytems of state or opeator.
Parameters
----------
p : vector or operator
State or operator to permute.
dims : tuple of int
Internal dimensions of the system.
perm : tuple of int
New order of indexes ``range(len(dims))``.
Returns
-------
pp : vector or operator
Permuted state or operator.
See Also
--------
pkron
Examples
--------
>>> IX = speye(2) & pauli('X', sparse=True)
>>> XI = permute(IX, dims=[2, 2], perm=[1, 0])
>>> np.allclose(XI.A, pauli('X') & eye(2))
True
"""
if issparse(p):
return _permute_sparse(p, dims, perm)
return _permute_dense(p, dims, perm)
def pkron(op, dims, inds, **ikron_opts):
# TODO: multiple ops
# TODO: coo map, coo compress
# TODO: sparse, stype, coo_build?
"""Advanced, padded tensor product.
Construct an operator such that ``op`` acts on ``dims[inds]``, and allow it
to be arbitrarily split and reversed etc., in other words, permute and then
tensor it into a larger space.
Parameters
----------
ops : matrix-like or tuple of matrix-like
Operator to place into the tensor space.
dims : tuple of int
Dimensions of tensor space.
inds : tuple of int
Indices of the dimensions to place operators on. If multiple
operators are specified, ``inds[1]`` corresponds to ``ops[1]`` and
so on.
sparse : bool, optional
Whether to construct the new operator in sparse form.
stype : str, optional
If sparse, which format to use for the output.
coo_build : bool, optional
Whether to build the intermediary matrices using the ``'coo'``
format - can be faster to build sparse in this way, then
convert to chosen format, including dense.
Returns
-------
operator
Operator such that ops act on ``dims[inds]``.
See Also
--------
ikron, permute
Examples
--------
Here we take an operator that acts on spins 0 and 1 with X and Z, and
transform it to act on spins 2 and 0 -- i.e. reverse it and sandwich an
identity between the two sites it acts on.
>>> XZ = pauli('X') & pauli('Z')
>>> ZIX = pkron(XZ, dims=[2, 3, 2], inds=[2, 0])
>>> np.allclose(ZIX, pauli('Z') & eye(3) & pauli('X'))
True
"""
dims, inds = np.asarray(dims), np.asarray(inds)
# total number of subsytems and size
n = len(dims)
sz = prod(dims)
# dimensions of space where op should be placed, and its total size
dims_in = dims[inds]
sz_in = prod(dims_in)
# construct pre-permuted full operator
b = ikron(op, [sz_in, sz // sz_in], 0, **ikron_opts)
# inverse of inds
if len(dims) == len(inds):
inds_out, dims_out = (), ()
else:
inds_out, dims_out = zip(
*((i, x) for i, x in enumerate(dims) if i not in inds))
# current order and dimensions of system
p = [*inds, *inds_out]
dims_cur = (*dims_in, *dims_out)
# find inverse permutation
ip = np.empty(n, dtype=np.int)
ip[p] = np.arange(n)
return permute(b, dims_cur, ip)
def ind_complement(inds, n):
"""Return the indices below ``n`` not contained in ``inds``.
"""
return tuple(i for i in range(n) if i not in inds)
def itrace(a, axes=(0, 1)):
"""General tensor trace, i.e. multiple contractions, for a dense array.
Parameters
----------
a : numpy.ndarray
Tensor to trace.
axes : (2,) int or (2,) array of int
- (2,) int: Perform trace on the two indices listed.
- (2,) array of int: Trace out first sequence of indices with second
sequence indices.
Returns
-------
numpy.ndarray
The tensor remaining after tracing out the specified axes.
See Also
--------
trace, partial_trace
Examples
--------
Trace out a single pair of dimensions:
>>> a = randn(2, 3, 4, 2, 3, 4)
>>> itrace(a, axes=(0, 3)).shape
(3, 4, 3, 4)
Trace out multiple dimensions:
>>> itrace(a, axes=([1, 2], [4, 5])).shape
(2, 2)
"""
# Single index pair to trace out
if isinstance(axes[0], Integral):
return np.trace(a, axis1=axes[0], axis2=axes[1])
elif len(axes[0]) == 1:
return np.trace(a, axis1=axes[0][0], axis2=axes[1][0])
# Multiple index pairs to trace out
gone = set()
for axis1, axis2 in zip(*axes):
# Modify indices to adjust for traced out dimensions
mod1 = sum(x < axis1 for x in gone)
mod2 = sum(x < axis2 for x in gone)
gone |= {axis1, axis2}
a = np.trace(a, axis1=axis1 - mod1, axis2=axis2 - mod2)
return a
@ensure_qarray
def _partial_trace_dense(p, dims, keep):
"""Perform partial trace of a dense matrix.
"""
if isinstance(keep, Integral):
keep = (keep,)
if isvec(p): # p = psi
p = np.asarray(p).reshape(dims)
lose = ind_complement(keep, len(dims))
p = np.tensordot(p, p.conj(), (lose, lose))
d = int(p.size**0.5)
return p.reshape((d, d))
else:
p = np.asarray(p).reshape((*dims, *dims))
total_dims = len(dims)
lose = ind_complement(keep, total_dims)
lose2 = tuple(ind + total_dims for ind in lose)
p = itrace(p, (lose, lose2))
d = int(p.size**0.5)
return p.reshape((d, d))
def _trace_lose(p, dims, lose):
"""Simple partial trace where the single subsytem at ``lose``
is traced out.
"""
p = p if isop(p) else dot(p, dag(p))
dims = np.asarray(dims)
e = dims[lose]
a = prod(dims[:lose])
b = prod(dims[lose + 1:])
rhos = zeros(shape=(a * b, a * b), dtype=np.complex128)
for i in range(a * b):
for j in range(i, a * b):
i_i = e * b * (i // b) + (i % b)
i_f = e * b * (i // b) + (i % b) + (e - 1) * b + 1
j_i = e * b * (j // b) + (j % b)
j_f = e * b * (j // b) + | |
es dirigido"""
g = self.g
if nx.is_directed(g):
dict_ = {}
for i in g.nodes():
dict_[i] = g.out_degree(i)
return dict_
else:
dict_ = {}
for i in g.nodes():
dict_[i] = 0
return dict_
self.metrics_dict['outdegree'] = dict_
def degree(self):
"""Devuelve un diccionario con el valor degree para cada nodo"""
g = self.g
dict_ = {}
for i in g.nodes():
dict_[i] = g.degree(i)
return dict_
self.metrics_dict['degree'] = dict_
#CENTRALITY:
def eccentricity(self):
"""Devuelve un diccionario con el valor eccentricity para cada nodo, si el grafo es conectado"""
try:
g = self.g
self.metrics_dict['eccentricity'] = nx.eccentricity(g)
return self.metrics_dict['eccentricity']
except nx.exception.NetworkXError:
self.logging_message("Excentricity: Graph is not connected.")
return self.empty_dict()
def harmonic_centrality(self):
"""Devuelve un diccionario con el valor harmonic_centrality para cada nodo"""
try:
g = self.g
self.metrics_dict['harmonic'] = nx.harmonic_centrality(g)
return self.metrics_dict['harmonic']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Harmonic: Power iteration failed.")
return self.empty_dict()
def closeness_centrality(self):
"""Devuelve un diccionario con el valor closeness_centrality para cada nodo"""
try:
g = self.g
self.metrics_dict['closeness'] = nx.closeness_centrality(g)
return self.metrics_dict['closeness']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Closeness: Power iteration failed.")
return self.empty_dict()
def eigenvector_centrality(self):
"""Devuelve un diccionario con el valor eigenvector_centrality para cada nodo"""
try:
g = self.g
self.metrics_dict['eigenvector'] = nx.eigenvector_centrality(g)
return self.metrics_dict['eigenvector']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Eigenvector: Power iteration failed.")
return self.empty_dict()
def pagerank(self, alpha=0.85, epsilon=1e-3):
"""Devuelve un diccionario con el valor pagerank para cada nodo"""
try:
g = self.g
self.metrics_dict['pagerank'] = nx.pagerank(g, alpha=alpha, tol=epsilon)
return self.metrics_dict['pagerank']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("PageRank: Power iteration failed.")
return self.empty_dict()
def betweenness(self, normalized=True):
"""Devuelve un diccionario con el valor betweenness para cada nodo"""
g = self.g
self.metrics_dict['betweenness'] = nx.betweenness_centrality(g, normalized=normalized)
return self.metrics_dict['betweenness']
#STRUCTURAL HOLES:
def constraint(self):
"""Devuelve un diccionario con el valor constraint para cada nodo"""
try:
g = self.g
self.metrics_dict['constraint'] = constraint(g)
return self.metrics_dict['constraint']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Constraint:Power iteration failed.")
return self.empty_dict()
def effective_size(self):
"""Devuelve un diccionario con el valor effective_size para cada nodo"""
try:
g = self.g
self.metrics_dict['effective_size'] = effective_size(g)
return self.metrics_dict['effective_size']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Effective size:Power iteration failed.")
return self.empty_dict()
#CLUSTERING:
#V0.0.4
def average_clustering(self):
g = self.g
return nx.average_clustering(g)
def clustering(self):
"""Devuelve un diccionario con el valor clustering para cada nodo"""
try:
g = self.g
self.metrics_dict['clustering'] = nx.clustering(g)
return self.metrics_dict['clustering']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Clustering:Power iteration failed.")
return self.empty_dict()
def square_clustering(self):
"""Devuelve un diccionario con el valor square_clustering para cada nodo"""
try:
g = self.g
self.metrics_dict['square-clustering'] = nx.square_clustering(g)
return self.metrics_dict['square-clustering']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Clustering:Power iteration failed.")
return self.empty_dict()
def triangles(self):
"""Devuelve un diccionario con el valor triangles para cada nodo"""
try:
g = self.g
self.metrics_dict['triangles'] = nx.triangles(g)
return self.metrics_dict['triangles']
except nx.exception.PowerIterationFailedConvergence:
self.logging_message("Triangles:Power iteration failed.")
return self.empty_dict()
except nx.exception.NetworkXNotImplemented:
self.logging_message("Triangles:No implemented for directed graph in networkx.")
g = self.get_undirected()
self.logging_message("Returning results getting from undirected equivalent graph.")
self.metrics_dict['triangles'] = nx.triangles(g)
return self.metrics_dict['triangles']
#TRANSITIVITY V0.0.4
#V0.0.4
def global_transitivity(self):
g = self.g.to_undirected()
return nx.transitivity(g)
#V0.0.4
def node_transitivity(self, node):
"""Devuelve la transitividad del subgrafo que contiene en cada vértice al nodo"""
if self.g.is_directed():
g = self.g.to_undirected()
else:
g = self.g
node_as_trg = [u for u, v in g.edges() if v == node]
node_as_src = [v for u, v in g.edges() if u == node]
nodes_subgraph = [*node_as_trg, *node_as_src, *[node]]
nodes_subgraph = sorted(list(set(nodes_subgraph)))
sub_graph = g.subgraph(nodes_subgraph)
return nx.transitivity(sub_graph)
#V0.0.4
def local_transitivity(self):
"""Devuelve un diccionario con el valor local transitivity para cada nodo"""
g = self.g
local_transitivity = {}
for i in g.nodes():
local_transitivity[i] = self.node_transitivity(i)
self.metrics_dict['local_transitivity'] = local_transitivity
return self.metrics_dict['local_transitivity']
#COMUNITIES: (In construction)
def louvain_communities(self, **kwargs):
"""Devuelve un diccionario con el valor louvain_communities para cada nodo"""
g = nx.Graph(self.g)
communities = cm.best_partition(g, **kwargs)
return communities
def greedy_modularity_communities(self,**kwargs):
"""Devuelve un diccionario con el valor greedy_communities para cada nodo"""
g = nx.Graph(self.g)
c = list(comm.greedy_modularity_communities(g, **kwargs))
communities = {}
for i in range(len(c)):
for j in c[i]:
communities[j] = i
return communities
def k_clique_communities(self, k=2, **kwargs):
"""Devuelve un diccionario con el valor k_clique_communities para cada nodo"""
if k < 2:
k = 2
g = nx.Graph(self.g)
c = list(comm.k_clique_communities(g, k, **kwargs))
communities = {}
for i in range(len(c)):
for j in c[i]:
communities[j] = i
return communities
def asyn_fluidc(self, k=10, **kwargs):
"""Devuelve un diccionario con el valor asyn_fluidc_communities para cada nodo, k = el más tamaño
más pequeño"""
g = nx.Graph(self.g)
communities = comm.asyn_fluidc(g, k, **kwargs)
return self.get_dict(communities)
def girvan_newman_modularity_communities(self, **kwargs):
"""Devuelve un diccionario con el valor girvan_newman_communities para cada nodo"""
g = nx.Graph(self.g)
communities = []
comp = comm.girvan_newman(g, **kwargs)
communities = sorted(map(sorted, next(comp)))
dict_ = {}
for i in range(len(communities)):
for j in communities[i]:
dict_[j] = i
return dict_
def kernighan_lin_bisection(self,**kwargs):
"""Devuelve un diccionario con el valor kernighan_communities para cada nodo"""
g = nx.Graph(self.g)
communities = comm.kernighan_lin_bisection(g, **kwargs)
return self.get_dict(communities)
def asyn_lpa_communities(self, **kwargs):
"""Devuelve un diccionario con el valor asyn_communities para cada nodo"""
g = nx.Graph(self.g)
communities = comm.asyn_lpa_communities(g, **kwargs)
return self.get_dict(communities)
def label_propagation_communities(self):
"""Devuelve un diccionario con el valor label_propagation_communities para cada nodo"""
g = nx.Graph(self.g)
communities = comm.label_propagation_communities(g)
return self.get_dict(communities)
def get_dict(self, object_):
"""Crea un diccionario para un objeto, función interna"""
css = [i for i in object_]
dict_ = {}
for i, lg in enumerate(css):
for node in lg:
dict_[node] = i
return dict_
def get_dc(self, list_):
"""Crea un diccionario desde una lista, función interna"""
dict_ = {}
for i in self.g.nodes:
for k in list_:
if i in k:
dict_[i] = list_.index(k)
return dict_
def dict_of_communities(self, algorithm='asyn_fluidc', k=4, **kwargs):
"""Devuelve un diccionario con las comunidades calculadas según el algoritmo específico"""
"""Default = asyn_fluidc, recomendado en networkx"""
g = self.g
dict_={}
if algorithm == 'greedy':
dict_ = self.greedy_modularity_communities(**kwargs)
elif algorithm == 'girvan_newman':
dict_ = self.girvan_newman_modularity_communities(**kwargs)
#elif algorithm == 'k_clique':
# dict_ = self.get_dict(self.k_clique_communities(k=k, **kwargs))
elif algorithm == 'asyn_fluidc':
dict_ = self.asyn_fluidc(k=k, **kwargs)
elif algorithm == 'kernighan_bisection':
dict_ = self.kernighan_lin_bisection(**kwargs)
elif algorithm == 'lpa':
dict_ = self.label_propagation_communities(**kwargs)
elif algorithm == 'asyn_lpa':
dict_ = self.asyn_lpa_communities(**kwargs)
elif algorithm == 'louvain':
dict_ = self.louvain_communities(**kwargs)
self.metrics_dict['communities'] = dict_
return dict_
def get_partition(self, dict_of_communities):
"""Devuelve la partición en comunidades de un diccionario de comunidades, uso interno"""
d = dict_of_communities
partition = []
for i in list(set(d.values())):
sub_part = []
for j in d.keys():
if d[j] == i:
sub_part.append(j)
partition.append(sub_part)
return partition
def modularity(self, algorithm = 'asyn_fluidc', **kwargs):
"""Devuelve la modularidad según la lista de comunidades"""
g = self.g
print("Returning modularity communities with algorithm %s of networkx."%algorithm)
dict_ = self.dict_of_communities(algorithm=algorithm, **kwargs)
return comm.modularity(g, self.get_partition(dict_))
def metrics_df(self, metrics='all', pr_alpha_epsilon=(0.85,1e-3), algorithm='asyn_fluidc', k=10):
"""Devuelve un dataframe de métricas seleccionadas por lista"""
"""Parámetro principal:
metrics = [nombre de la metrica], all devuelve todas las métricas programadas
"""
g = self.g
nodes = [i for i in g.nodes]
labels = [self.vertex_labels[i] for i in g.nodes]
#Creación dataframe:
cols = ['node']
df = pd.DataFrame(nodes, columns=cols)
df['labels'] = labels
if metrics == 'all':
if g.is_directed():
metrics = ['degree', 'indegree', 'outdegree', 'eccentricity', 'pagerank',
'eigenvector', 'betweenness', 'harmonic', 'closeness',
'communities', 'constraint', 'effective_size', 'local_transitivity',
'clustering', 'triangles', 'square_clustering']
else:
metrics = ['degree', 'eccentricity', 'pagerank', 'eigenvector', 'constraint',
'effective_size','harmonic', 'closeness', 'betweenness', 'communities',
'clustering', 'triangles', 'local_transitivity', 'square_clustering']
for i in metrics:
if i in self.metrics_dict.keys():
df[i] = self.metrics_dict[i].values()
else:
if i == 'degree':
df['degree'] = self.degree().values()
if i == 'indegree':
df['indegree'] = self.indegree().values()
if i == 'outdegree':
df['outdegree'] = self.outdegree().values()
if i == 'eccentricity':
df['eccentricity'] = self.eccentricity().values()
if i == 'pagerank':
df['pagerank'] = self.pagerank(alpha=pr_alpha_epsilon[0], epsilon=pr_alpha_epsilon[1]).values()
if i == 'eigenvector':
df['eigenvector'] = self.eigenvector_centrality().values()
if i == 'harmonic':
df['harmonic'] = self.harmonic_centrality().values()
if i == 'closeness':
df['closeness'] = self.closeness_centrality().values()
if i == 'betweenness':
df['betweenness'] = self.betweenness().values()
if i == 'constraint':
df['constraint'] = self.constraint().values()
if i == 'effective_size':
df['effective_size'] = self.effective_size().values()
if i == 'clustering':
df['clustering'] = self.clustering().values()
if i == 'triangles':
df['triangles'] = self.triangles().values()
if i == 'square_clustering':
df['square_clustering'] = self.square_clustering().values()
if i == 'local_transitivity':
df['local_transitivity'] = self.local_transitivity().values()
if i == 'communities':
df['communities_%s'%algorithm] = self.dict_of_communities(algorithm=algorithm, k=k).values()
return df
def metrics_csv(self, save_as, metrics='all', index=False, **kwargs):
"""Devuelve las metricas networkx seleccionadas en lista, a un csv"""
df = self.metrics_df(metrics=metrics, **kwargs)
df.to_csv(save_as, index=index)
#V.0.0.4
def max_min_normalization(self, datalist):
"""Calcula las métricas como normalización max min entre [0 1], min = 0, max = 1"""
max_value = max(datalist)
min_value = min(datalist)
denominator = float(max_value - min_value)
max_min_list = [float(i - | |
Optional[AttachmentTitle] = None
imageUrl: Optional[AttachmentUrl] = None
buttons: Optional[ButtonsList] = None
class ImportFilterName(Enum):
ImportResourceType = 'ImportResourceType'
class ImportSortBy(ExportSortBy):
"""
Provides information for sorting a list of imports.
"""
pass
class ImportSummary(BaseModel):
"""
Provides summary information about an import in an import list.
"""
importId: Optional[Id] = None
importedResourceId: Optional[ImportedResourceId] = None
importedResourceName: Optional[Name] = None
importStatus: Optional[ExportStatus] = None
mergeStrategy: Optional[MergeStrategy] = None
creationDateTime: Optional[Timestamp] = None
lastUpdatedDateTime: Optional[Timestamp] = None
class ImportSummaryList(BaseModel):
__root__: List[ImportSummary]
class IntentFilterName(Enum):
IntentName = 'IntentName'
class IntentSortBy(BaseModel):
"""
Specifies attributes for sorting a list of intents.
"""
attribute: IntentSortAttribute
order: SortOrder
class KmsKeyArn(BaseModel):
__root__: Annotated[
str,
Field(
max_length=2048,
min_length=20,
regex='^arn:[\\w\\-]+:kms:[\\w\\-]+:[\\d]{12}:(?:key\\/[\\w\\-]+|alias\\/[a-zA-Z0-9:\\/_\\-]{1,256})$',
),
]
class LambdaARN(BaseModel):
__root__: Annotated[
str,
Field(
max_length=2048,
min_length=20,
regex='arn:aws:lambda:[a-z]+-[a-z]+-[0-9]:[0-9]{12}:function:[a-zA-Z0-9-_]+(/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})?(:[a-zA-Z0-9-_]+)?',
),
]
class MaxResults(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=1000.0)]
class NextToken(VoiceId):
pass
class ListBotAliasesRequest(BaseModel):
maxResults: Optional[MaxResults] = None
nextToken: Optional[NextToken] = None
class ListBotVersionsRequest(BaseModel):
sortBy: Optional[BotVersionSortBy] = None
maxResults: Optional[MaxResults] = None
nextToken: Optional[NextToken] = None
class ListBuiltInIntentsRequest(BaseModel):
sortBy: Optional[BuiltInIntentSortBy] = None
maxResults: Optional[BuiltInsMaxResults] = None
nextToken: Optional[NextToken] = None
class ListBuiltInSlotTypesRequest(BaseModel):
sortBy: Optional[BuiltInSlotTypeSortBy] = None
maxResults: Optional[BuiltInsMaxResults] = None
nextToken: Optional[NextToken] = None
class SlotTypeSortBy(BaseModel):
"""
Specifies attributes for sorting a list of slot types.
"""
attribute: SlotTypeSortAttribute
order: SortOrder
class SlotSortBy(BaseModel):
"""
Specifies attributes for sorting a list of bots.
"""
attribute: SlotSortAttribute
order: SortOrder
class ListTagsForResourceRequest(BaseModel):
pass
class PlainTextMessageValue(CustomPayloadValue):
pass
class ServicePrincipal(BaseModel):
__root__: Annotated[
str, Field(max_length=1024, min_length=15, regex='^[0-9a-zA-Z_.]+$')
]
class PrincipalArn(BaseModel):
__root__: Annotated[
str,
Field(
max_length=1024,
min_length=30,
regex='^arn:aws:iam::[0-9]{12}:(root|(user|role)/.*)$',
),
]
class PriorityValue(BaseModel):
__root__: Annotated[int, Field(ge=0.0, le=100.0)]
class PromptMaxRetries(BaseModel):
__root__: Annotated[int, Field(ge=0.0, le=5.0)]
class RegexPattern(BaseModel):
__root__: Annotated[str, Field(max_length=100, min_length=1)]
class S3BucketArn(BaseModel):
__root__: Annotated[
str,
Field(
max_length=2048,
min_length=1,
regex='^arn:[\\w\\-]+:s3:::[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$',
),
]
class SSMLMessageValue(CustomPayloadValue):
pass
class Utterance(VoiceId):
pass
class Value4(BaseModel):
__root__: Annotated[str, Field(max_length=140, min_length=1)]
class SampleValue(BaseModel):
"""
Defines one of the values for a slot type.
"""
value: Value4
class SlotDefaultValueString(BaseModel):
__root__: Annotated[str, Field(max_length=202, min_length=1)]
class SlotDefaultValue(BaseModel):
"""
Specifies the default value to use when a user doesn't provide a value for a slot.
"""
defaultValue: SlotDefaultValueString
class SlotDefaultValueList(BaseModel):
__root__: Annotated[List[SlotDefaultValue], Field(max_items=10, min_items=0)]
class SlotFilterName(Enum):
SlotName = 'SlotName'
class SlotTypeFilterName(Enum):
SlotTypeName = 'SlotTypeName'
class SlotTypeSummary(BaseModel):
"""
Provides summary information about a slot type.
"""
slotTypeId: Optional[Id] = None
slotTypeName: Optional[Name] = None
description: Optional[Description] = None
parentSlotTypeSignature: Optional[SlotTypeSignature] = None
lastUpdatedDateTime: Optional[Timestamp] = None
class SynonymList(BaseModel):
__root__: Annotated[List[SampleValue], Field(max_items=10000, min_items=1)]
class StillWaitingResponseFrequency(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=300.0)]
class StillWaitingResponseTimeout(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=900.0)]
class TagKeyList(BaseModel):
__root__: Annotated[List[TagKey], Field(max_items=200, min_items=0)]
class TagResourceRequest(BaseModel):
tags: TagMap
class TextLogDestination(BaseModel):
"""
Defines the Amazon CloudWatch Logs destination log group for conversation text logs.
"""
cloudWatch: CloudWatchLogGroupLogDestination
class TextLogSetting(BaseModel):
"""
Defines settings to enable text conversation logs.
"""
enabled: Boolean
destination: TextLogDestination
class UntagResourceRequest(BaseModel):
pass
class UpdateBotLocaleRequest(BaseModel):
description: Optional[Description] = None
nluIntentConfidenceThreshold: ConfidenceThreshold
voiceSettings: Optional[VoiceSettings] = None
class UpdateBotRequest(BaseModel):
botName: Name
description: Optional[Description] = None
roleArn: RoleArn
dataPrivacy: DataPrivacy
idleSessionTTLInSeconds: SessionTTL
class UpdateExportRequest(BaseModel):
filePassword: Optional[ImportExportFilePassword] = None
class UpdateResourcePolicyRequest(BaseModel):
policy: Policy
class BuildBotLocaleResponse(BaseModel):
botId: Optional[Id] = None
botVersion: Optional[DraftBotVersion] = None
localeId: Optional[LocaleId] = None
botLocaleStatus: Optional[BotLocaleStatus] = None
lastBuildSubmittedDateTime: Optional[Timestamp] = None
class CreateBotResponse(BaseModel):
botId: Optional[Id] = None
botName: Optional[Name] = None
description: Optional[Description] = None
roleArn: Optional[RoleArn] = None
dataPrivacy: Optional[DataPrivacy] = None
idleSessionTTLInSeconds: Optional[SessionTTL] = None
botStatus: Optional[BotStatus] = None
creationDateTime: Optional[Timestamp] = None
botTags: Optional[TagMap] = None
testBotAliasTags: Optional[TagMap] = None
class TextLogSettingsList(BaseModel):
__root__: Annotated[List[TextLogSetting], Field(max_items=1, min_items=1)]
class CreateBotLocaleResponse(BaseModel):
botId: Optional[Id] = None
botVersion: Optional[DraftBotVersion] = None
localeName: Optional[LocaleName] = None
localeId: Optional[LocaleId] = None
description: Optional[Description] = None
nluIntentConfidenceThreshold: Optional[ConfidenceThreshold] = None
voiceSettings: Optional[VoiceSettings] = None
botLocaleStatus: Optional[BotLocaleStatus] = None
creationDateTime: Optional[Timestamp] = None
class CreateBotVersionResponse(BaseModel):
botId: Optional[Id] = None
description: Optional[Description] = None
botVersion: Optional[NumericalBotVersion] = None
botVersionLocaleSpecification: Optional[BotVersionLocaleSpecification] = None
botStatus: Optional[BotStatus] = None
creationDateTime: Optional[Timestamp] = None
class BotVersionLocaleDetails(BaseModel):
"""
The version of a bot used for a bot locale.
"""
sourceBotVersion: BotVersion
class BotExportSpecification(BaseModel):
"""
Provides the identity of a the bot that was exported.
"""
botId: Id
botVersion: BotVersion
class BotLocaleExportSpecification(BaseModel):
"""
Provides the bot locale parameters required for exporting a bot locale.
"""
botId: Id
botVersion: BotVersion
localeId: LocaleId
class SampleUtterance(BaseModel):
"""
A sample utterance that invokes an intent or respond to a slot elicitation prompt.
"""
utterance: Utterance
class InputContext(BaseModel):
"""
The name of a context that must be active for an intent to be selected by Amazon Lex.
"""
name: Name
class OutputContext(BaseModel):
"""
Describes a session context that is activated when an intent is fulfilled.
"""
name: Name
timeToLiveInSeconds: ContextTimeToLiveInSeconds
turnsToLive: ContextTurnsToLive
class CreateResourcePolicyResponse(BaseModel):
resourceArn: Optional[AmazonResourceName] = None
revisionId: Optional[RevisionId] = None
class CreateResourcePolicyStatementResponse(CreateResourcePolicyResponse):
pass
class Principal(BaseModel):
"""
The IAM principal that you allowing or denying access to an Amazon Lex action. You must provide a <code>service</code> or an <code>arn</code>, but not both in the same statement. For more information, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html"> AWS JSON policy elements: Principal </a>.
"""
service: Optional[ServicePrincipal] = None
arn: Optional[PrincipalArn] = None
class SlotDefaultValueSpecification(BaseModel):
"""
Defines a list of values that Amazon Lex should use as the default value for a slot.
"""
defaultValueList: SlotDefaultValueList
class SampleUtterancesList(BaseModel):
__root__: List[SampleUtterance]
class SlotTypeValue(BaseModel):
"""
Each slot type can have a set of values. Each <code>SlotTypeValue</code> represents a value that the slot type can take.
"""
sampleValue: Optional[SampleValue] = None
synonyms: Optional[SynonymList] = None
class SlotValueRegexFilter(BaseModel):
"""
Provides a regular expression used to validate the value of a slot.
"""
pattern: RegexPattern
class CreateUploadUrlResponse(BaseModel):
importId: Optional[Id] = None
uploadUrl: Optional[PresignedS3Url] = None
class DeleteBotResponse(BaseModel):
botId: Optional[Id] = None
botStatus: Optional[BotStatus] = None
class DeleteBotAliasResponse(BaseModel):
botAliasId: Optional[BotAliasId] = None
botId: Optional[Id] = None
botAliasStatus: Optional[BotAliasStatus] = None
class DeleteBotLocaleResponse(BaseModel):
botId: Optional[Id] = None
botVersion: Optional[DraftBotVersion] = None
localeId: Optional[LocaleId] = None
botLocaleStatus: Optional[BotLocaleStatus] = None
class DeleteBotVersionResponse(BaseModel):
botId: Optional[Id] = None
botVersion: Optional[NumericalBotVersion] = None
botStatus: Optional[BotStatus] = None
class DeleteExportResponse(BaseModel):
exportId: Optional[Id] = None
exportStatus: Optional[ExportStatus] = None
class DeleteImportResponse(BaseModel):
importId: Optional[Id] = None
importStatus: Optional[ExportStatus] = None
class DeleteResourcePolicyResponse(CreateResourcePolicyResponse):
pass
class DeleteResourcePolicyStatementResponse(CreateResourcePolicyResponse):
pass
class DescribeBotResponse(BaseModel):
botId: Optional[Id] = None
botName: Optional[Name] = None
description: Optional[Description] = None
roleArn: Optional[RoleArn] = None
dataPrivacy: Optional[DataPrivacy] = None
idleSessionTTLInSeconds: Optional[SessionTTL] = None
botStatus: Optional[BotStatus] = None
creationDateTime: Optional[Timestamp] = None
lastUpdatedDateTime: Optional[Timestamp] = None
class DescribeResourcePolicyResponse(BaseModel):
resourceArn: Optional[AmazonResourceName] = None
policy: Optional[Policy] = None
revisionId: Optional[RevisionId] = None
class ListBotAliasesResponse(BaseModel):
botAliasSummaries: Optional[BotAliasSummaryList] = None
nextToken: Optional[NextToken] = None
botId: Optional[Id] = None
class ListBotLocalesResponse(BaseModel):
botId: Optional[Id] = None
botVersion: Optional[BotVersion] = None
nextToken: Optional[NextToken] = None
botLocaleSummaries: Optional[BotLocaleSummaryList] = None
class ListBotVersionsResponse(BaseModel):
botId: Optional[Id] = None
botVersionSummaries: Optional[BotVersionSummaryList] = None
nextToken: Optional[NextToken] = None
class ListBotsResponse(BaseModel):
botSummaries: Optional[BotSummaryList] = None
nextToken: Optional[NextToken] = None
class ListBuiltInIntentsResponse(BaseModel):
builtInIntentSummaries: Optional[BuiltInIntentSummaryList] = None
nextToken: Optional[NextToken] = None
localeId: Optional[LocaleId] = None
class ListBuiltInSlotTypesResponse(BaseModel):
builtInSlotTypeSummaries: Optional[BuiltInSlotTypeSummaryList] = None
nextToken: Optional[NextToken] = None
localeId: Optional[LocaleId] = None
class ListImportsResponse(BaseModel):
botId: Optional[Id] = None
botVersion: Optional[DraftBotVersion] = None
importSummaries: Optional[ImportSummaryList] = None
nextToken: Optional[NextToken] = None
class ListTagsForResourceResponse(BaseModel):
tags: Optional[TagMap] = None
class BotImportSpecification(BaseModel):
"""
Provides the bot parameters required for importing a bot.
"""
botName: Name
roleArn: RoleArn
dataPrivacy: DataPrivacy
idleSessionTTLInSeconds: Optional[SessionTTL] = None
botTags: Optional[TagMap] = None
testBotAliasTags: Optional[TagMap] = None
class BotLocaleImportSpecification(BaseModel):
"""
Provides the bot locale parameters required for importing a bot locale.
"""
botId: Id
botVersion: DraftBotVersion
localeId: LocaleId
nluIntentConfidenceThreshold: Optional[ConfidenceThreshold] = None
voiceSettings: Optional[VoiceSettings] = None
class UpdateBotResponse(DescribeBotResponse):
pass
class SlotPriority(BaseModel):
"""
Sets the priority that Amazon Lex should use when eliciting slot values from a user.
"""
priority: PriorityValue
slotId: Id
class UpdateResourcePolicyResponse(CreateResourcePolicyResponse):
pass
class S3BucketLogDestination(BaseModel):
"""
Specifies an Amazon S3 bucket for logging audio conversations
"""
kmsKeyArn: Optional[KmsKeyArn] = None
s3BucketArn: S3BucketArn
logPrefix: LogPrefix
class AudioLogDestination(BaseModel):
"""
The location of audio log files collected when conversation logging is enabled for a bot.
"""
s3Bucket: S3BucketLogDestination
class AudioLogSetting(BaseModel):
"""
Settings for logging audio of conversations between Amazon Lex and a user. You specify whether to log audio and the Amazon S3 bucket where the audio file is stored.
"""
enabled: Boolean
destination: AudioLogDestination
class FilterValues(BaseModel):
__root__: Annotated[List[FilterValue], Field(max_items=1, min_items=1)]
class LambdaCodeHook(BaseModel):
"""
Specifies a Lambda function that verifies requests to a bot or fulfilles the user's request to a bot.
"""
lambdaARN: LambdaARN
codeHookInterfaceVersion: CodeHookInterfaceVersion
class ExportResourceSpecification(BaseModel):
"""
Provides information about the bot or bot locale that you want to export. You can specify the <code>botExportSpecification</code> or the <code>botLocaleExportSpecification</code>, but not both.
"""
botExportSpecification: Optional[BotExportSpecification] = None
botLocaleExportSpecification: Optional[BotLocaleExportSpecification] = None
class CreateExportRequest(BaseModel):
resourceSpecification: ExportResourceSpecification
fileFormat: ImportExportFileFormat
filePassword: Optional[ImportExportFilePassword] = None
class InputContextsList(BaseModel):
__root__: Annotated[List[InputContext], Field(max_items=5, min_items=0)]
class OutputContextsList(BaseModel):
__root__: Annotated[List[OutputContext], Field(max_items=10, min_items=0)]
class | |
from __future__ import absolute_import, print_function
from django.conf.urls import include, patterns, url
from .endpoints.accept_project_transfer import AcceptProjectTransferEndpoint
from .endpoints.relay_heartbeat import RelayHeartbeatEndpoint
from .endpoints.relay_index import RelayIndexEndpoint
from .endpoints.relay_details import RelayDetailsEndpoint
from .endpoints.relay_register import RelayRegisterChallengeEndpoint, \
RelayRegisterResponseEndpoint
from .endpoints.api_applications import ApiApplicationsEndpoint
from .endpoints.api_application_details import ApiApplicationDetailsEndpoint
from .endpoints.api_authorizations import ApiAuthorizationsEndpoint
from .endpoints.api_tokens import ApiTokensEndpoint
from .endpoints.assistant import AssistantEndpoint
from .endpoints.auth_index import AuthIndexEndpoint
from .endpoints.authenticator_index import AuthenticatorIndexEndpoint
from .endpoints.broadcast_details import BroadcastDetailsEndpoint
from .endpoints.broadcast_index import BroadcastIndexEndpoint
from .endpoints.catchall import CatchallEndpoint
from .endpoints.chunk import ChunkUploadEndpoint
from .endpoints.event_details import EventDetailsEndpoint
from .endpoints.event_owners import EventOwnersEndpoint
from .endpoints.event_apple_crash_report import EventAppleCrashReportEndpoint
from .endpoints.group_details import GroupDetailsEndpoint
from .endpoints.group_events import GroupEventsEndpoint
from .endpoints.group_events_latest import GroupEventsLatestEndpoint
from .endpoints.group_events_oldest import GroupEventsOldestEndpoint
from .endpoints.group_hashes import GroupHashesEndpoint
from .endpoints.group_integration_details import GroupIntegrationDetailsEndpoint
from .endpoints.group_integrations import GroupIntegrationsEndpoint
from .endpoints.group_notes import GroupNotesEndpoint
from .endpoints.group_notes_details import GroupNotesDetailsEndpoint
from .endpoints.group_participants import GroupParticipantsEndpoint
from .endpoints.group_similar_issues import GroupSimilarIssuesEndpoint
from .endpoints.group_stats import GroupStatsEndpoint
from .endpoints.group_tags import GroupTagsEndpoint
from .endpoints.group_tagkey_details import GroupTagKeyDetailsEndpoint
from .endpoints.group_tagkey_values import GroupTagKeyValuesEndpoint
from .endpoints.group_tombstone_details import GroupTombstoneDetailsEndpoint
from .endpoints.group_tombstone import GroupTombstoneEndpoint
from .endpoints.group_user_reports import GroupUserReportsEndpoint
from .endpoints.index import IndexEndpoint
from .endpoints.internal_queue_tasks import InternalQueueTasksEndpoint
from .endpoints.internal_quotas import InternalQuotasEndpoint
from .endpoints.internal_stats import InternalStatsEndpoint
from .endpoints.organization_access_request_details import OrganizationAccessRequestDetailsEndpoint
from .endpoints.organization_activity import OrganizationActivityEndpoint
from .endpoints.organization_auditlogs import OrganizationAuditLogsEndpoint
from .endpoints.organization_api_key_index import OrganizationApiKeyIndexEndpoint
from .endpoints.organization_api_key_details import OrganizationApiKeyDetailsEndpoint
from .endpoints.organization_auth_providers import OrganizationAuthProvidersEndpoint
from .endpoints.organization_auth_provider_details import OrganizationAuthProviderDetailsEndpoint
from .endpoints.organization_auth_provider_send_reminders import OrganizationAuthProviderSendRemindersEndpoint
from .endpoints.organization_avatar import OrganizationAvatarEndpoint
from .endpoints.organization_details import OrganizationDetailsEndpoint
from .endpoints.organization_shortid import ShortIdLookupEndpoint
from .endpoints.organization_eventid import EventIdLookupEndpoint
from .endpoints.organization_slugs import SlugsUpdateEndpoint
from .endpoints.organization_issues_new import OrganizationIssuesNewEndpoint
from .endpoints.organization_member_details import OrganizationMemberDetailsEndpoint
from .endpoints.organization_member_index import OrganizationMemberIndexEndpoint
from .endpoints.organization_member_issues_assigned import OrganizationMemberIssuesAssignedEndpoint
from .endpoints.organization_member_issues_bookmarked import OrganizationMemberIssuesBookmarkedEndpoint
from .endpoints.organization_member_issues_viewed import OrganizationMemberIssuesViewedEndpoint
from .endpoints.organization_member_unreleased_commits import OrganizationMemberUnreleasedCommitsEndpoint
from .endpoints.organization_member_team_details import OrganizationMemberTeamDetailsEndpoint
from .endpoints.organization_onboarding_tasks import OrganizationOnboardingTaskEndpoint
from .endpoints.organization_index import OrganizationIndexEndpoint
from .endpoints.organization_projects import OrganizationProjectsEndpoint
from .endpoints.organization_releases import OrganizationReleasesEndpoint
from .endpoints.organization_release_details import OrganizationReleaseDetailsEndpoint
from .endpoints.organization_release_files import OrganizationReleaseFilesEndpoint
from .endpoints.organization_release_file_details import OrganizationReleaseFileDetailsEndpoint
from .endpoints.organization_release_commits import OrganizationReleaseCommitsEndpoint
from .endpoints.organization_repositories import OrganizationRepositoriesEndpoint
from .endpoints.organization_integration_details import OrganizationIntegrationDetailsEndpoint
from .endpoints.organization_integrations import OrganizationIntegrationsEndpoint
from .endpoints.organization_config_integrations import OrganizationConfigIntegrationsEndpoint
from .endpoints.organization_config_repositories import OrganizationConfigRepositoriesEndpoint
from .endpoints.organization_repository_commits import OrganizationRepositoryCommitsEndpoint
from .endpoints.organization_repository_details import OrganizationRepositoryDetailsEndpoint
from .endpoints.organization_stats import OrganizationStatsEndpoint
from .endpoints.organization_teams import OrganizationTeamsEndpoint
from .endpoints.organization_user_issues import OrganizationUserIssuesEndpoint
from .endpoints.organization_user_issues_search import OrganizationUserIssuesSearchEndpoint
from .endpoints.project_avatar import ProjectAvatarEndpoint
from .endpoints.project_details import ProjectDetailsEndpoint
from .endpoints.project_transfer import ProjectTransferEndpoint
from .endpoints.project_create_sample import ProjectCreateSampleEndpoint
from .endpoints.project_docs import ProjectDocsEndpoint
from .endpoints.project_docs_platform import ProjectDocsPlatformEndpoint
from .endpoints.project_environments import ProjectEnvironmentsEndpoint
from .endpoints.project_environment_details import ProjectEnvironmentDetailsEndpoint
from .endpoints.project_integration_details import ProjectIntegrationDetailsEndpoint
from .endpoints.project_platforms import ProjectPlatformsEndpoint
from .endpoints.project_events import ProjectEventsEndpoint
from .endpoints.project_event_details import ProjectEventDetailsEndpoint
from .endpoints.project_filters import ProjectFiltersEndpoint
from .endpoints.project_filter_details import ProjectFilterDetailsEndpoint
from .endpoints.project_group_index import ProjectGroupIndexEndpoint
from .endpoints.project_group_stats import ProjectGroupStatsEndpoint
from .endpoints.project_index import ProjectIndexEndpoint
from .endpoints.project_keys import ProjectKeysEndpoint
from .endpoints.project_key_details import ProjectKeyDetailsEndpoint
from .endpoints.project_key_stats import ProjectKeyStatsEndpoint
from .endpoints.project_member_index import ProjectMemberIndexEndpoint
from .endpoints.project_ownership import ProjectOwnershipEndpoint
from .endpoints.project_plugins import ProjectPluginsEndpoint
from .endpoints.project_plugin_details import ProjectPluginDetailsEndpoint
from .endpoints.project_release_details import ProjectReleaseDetailsEndpoint
from .endpoints.project_release_files import ProjectReleaseFilesEndpoint
from .endpoints.project_release_file_details import ProjectReleaseFileDetailsEndpoint
from .endpoints.project_release_commits import ProjectReleaseCommitsEndpoint
from .endpoints.project_releases import ProjectReleasesEndpoint
from .endpoints.project_releases_token import ProjectReleasesTokenEndpoint
from .endpoints.project_rules import ProjectRulesEndpoint
from .endpoints.project_rules_configuration import ProjectRulesConfigurationEndpoint
from .endpoints.project_rule_details import ProjectRuleDetailsEndpoint
from .endpoints.project_searches import ProjectSearchesEndpoint
from .endpoints.project_search_details import ProjectSearchDetailsEndpoint
from .endpoints.project_stats import ProjectStatsEndpoint
from .endpoints.project_tags import ProjectTagsEndpoint
from .endpoints.project_tagkey_details import ProjectTagKeyDetailsEndpoint
from .endpoints.project_tagkey_values import ProjectTagKeyValuesEndpoint
from .endpoints.project_team_details import ProjectTeamDetailsEndpoint
from .endpoints.project_teams import ProjectTeamsEndpoint
from .endpoints.project_processingissues import ProjectProcessingIssuesEndpoint, \
ProjectProcessingIssuesFixEndpoint, ProjectProcessingIssuesDiscardEndpoint
from .endpoints.project_reprocessing import ProjectReprocessingEndpoint
from .endpoints.project_servicehooks import ProjectServiceHooksEndpoint
from .endpoints.project_servicehook_details import ProjectServiceHookDetailsEndpoint
from .endpoints.project_servicehook_stats import ProjectServiceHookStatsEndpoint
from .endpoints.project_user_details import ProjectUserDetailsEndpoint
from .endpoints.project_user_reports import ProjectUserReportsEndpoint
from .endpoints.project_user_stats import ProjectUserStatsEndpoint
from .endpoints.project_users import ProjectUsersEndpoint
from .endpoints.filechange import CommitFileChangeEndpoint
from .endpoints.issues_resolved_in_release import IssuesResolvedInReleaseEndpoint
from .endpoints.release_deploys import ReleaseDeploysEndpoint
from .endpoints.dsym_files import DSymFilesEndpoint, \
UnknownDSymFilesEndpoint, AssociateDSymFilesEndpoint
from .endpoints.dif_files import DifAssembleEndpoint
from .endpoints.shared_group_details import SharedGroupDetailsEndpoint
from .endpoints.system_health import SystemHealthEndpoint
from .endpoints.system_options import SystemOptionsEndpoint
from .endpoints.team_avatar import TeamAvatarEndpoint
from .endpoints.team_details import TeamDetailsEndpoint
from .endpoints.team_groups_new import TeamGroupsNewEndpoint
from .endpoints.team_groups_trending import TeamGroupsTrendingEndpoint
from .endpoints.team_members import TeamMembersEndpoint
from .endpoints.team_projects import TeamProjectsEndpoint
from .endpoints.team_stats import TeamStatsEndpoint
from .endpoints.useravatar import UserAvatarEndpoint
from .endpoints.user_appearance import UserAppearanceEndpoint
from .endpoints.user_authenticator_index import UserAuthenticatorIndexEndpoint
from .endpoints.user_authenticator_enroll import UserAuthenticatorEnrollEndpoint
from .endpoints.user_authenticator_details import UserAuthenticatorDetailsEndpoint
from .endpoints.user_identity_details import UserIdentityDetailsEndpoint
from .endpoints.user_index import UserIndexEndpoint
from .endpoints.user_details import UserDetailsEndpoint
from .endpoints.user_emails import UserEmailsEndpoint
from .endpoints.user_emails_confirm import UserEmailsConfirmEndpoint
from .endpoints.user_organizations import UserOrganizationsEndpoint
from .endpoints.user_notification_details import UserNotificationDetailsEndpoint
from .endpoints.user_password import UserPasswordEndpoint
from .endpoints.user_notification_fine_tuning import UserNotificationFineTuningEndpoint
from .endpoints.user_social_identities_index import UserSocialIdentitiesIndexEndpoint
from .endpoints.user_social_identity_details import UserSocialIdentityDetailsEndpoint
from .endpoints.user_subscriptions import UserSubscriptionsEndpoint
from .endpoints.event_file_committers import EventFileCommittersEndpoint
from .endpoints.setup_wizard import SetupWizard
urlpatterns = patterns(
'',
# Relay
url(
r'^relays/$',
RelayIndexEndpoint.as_view(),
name='sentry-api-0-relays-index'
),
url(
r'^relays/register/challenge/$',
RelayRegisterChallengeEndpoint.as_view(),
name='sentry-api-0-relay-register-challenge'
),
url(
r'^relays/register/response/$',
RelayRegisterResponseEndpoint.as_view(),
name='sentry-api-0-relay-register-response'
),
url(
r'^relays/heartbeat/$',
RelayHeartbeatEndpoint.as_view(),
name='sentry-api-0-relay-heartbeat'
),
url(
r'^relays/(?P<relay_id>[^\/]+)/$',
RelayDetailsEndpoint.as_view(),
name='sentry-api-0-relays-details'
),
# Api Data
url(
r'^assistant/$',
AssistantEndpoint.as_view(),
name='sentry-api-0-assistant',
),
url(
r'^api-applications/$',
ApiApplicationsEndpoint.as_view(),
name='sentry-api-0-api-applications'
),
url(
r'^api-applications/(?P<app_id>[^\/]+)/$',
ApiApplicationDetailsEndpoint.as_view(),
name='sentry-api-0-api-application-details'
),
url(
r'^api-authorizations/$',
ApiAuthorizationsEndpoint.as_view(),
name='sentry-api-0-api-authorizations'
),
url(r'^api-tokens/$', ApiTokensEndpoint.as_view(),
name='sentry-api-0-api-tokens'),
# Auth
url(r'^auth/$', AuthIndexEndpoint.as_view(), name='sentry-api-0-auth'),
# List Authentiactors
url(r'^authenticators/$',
AuthenticatorIndexEndpoint.as_view(),
name='sentry-api-0-authenticator-index'),
# Broadcasts
url(r'^broadcasts/$', BroadcastIndexEndpoint.as_view(),
name='sentry-api-0-broadcast-index'),
url(r'^broadcasts/(?P<broadcast_id>[^\/]+)/$', BroadcastDetailsEndpoint.as_view()),
# Project transfer
url(r'^accept-transfer/$', AcceptProjectTransferEndpoint.as_view(),
name='sentry-api-0-accept-project-transfer'),
# Users
url(r'^users/$', UserIndexEndpoint.as_view(), name='sentry-api-0-user-index'),
url(
r'^users/(?P<user_id>[^\/]+)/$',
UserDetailsEndpoint.as_view(),
name='sentry-api-0-user-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/avatar/$',
UserAvatarEndpoint.as_view(),
name='sentry-api-0-user-avatar'
),
url(
r'^users/(?P<user_id>[^\/]+)/appearance/$',
UserAppearanceEndpoint.as_view(),
name='sentry-api-0-user-appearance'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/$',
UserAuthenticatorIndexEndpoint.as_view(),
name='sentry-api-0-user-authenticator-index'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<interface_id>[^\/]+)/enroll/$',
UserAuthenticatorEnrollEndpoint.as_view(),
name='sentry-api-0-user-authenticator-enroll'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<auth_id>[^\/]+)/(?P<interface_device_id>[^\/]+)/$',
UserAuthenticatorDetailsEndpoint.as_view(),
name='sentry-api-0-user-authenticator-device-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<auth_id>[^\/]+)/$',
UserAuthenticatorDetailsEndpoint.as_view(),
name='sentry-api-0-user-authenticator-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/emails/$',
UserEmailsEndpoint.as_view(),
name='sentry-api-0-user-emails'
),
url(
r'^users/(?P<user_id>[^\/]+)/emails/confirm/$',
UserEmailsConfirmEndpoint.as_view(),
name='sentry-api-0-user-emails-confirm'
),
url(
r'^users/(?P<user_id>[^\/]+)/identities/(?P<identity_id>[^\/]+)/$',
UserIdentityDetailsEndpoint.as_view(),
name='sentry-api-0-user-identity-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/organizations/$',
UserOrganizationsEndpoint.as_view(),
name='sentry-api-0-user-organizations'
),
url(
r'^users/(?P<user_id>[^\/]+)/notifications/$',
UserNotificationDetailsEndpoint.as_view(),
name='sentry-api-0-user-notifications'
),
url(
r'^users/(?P<user_id>[^\/]+)/password/$',
UserPasswordEndpoint.as_view(),
name='sentry-api-0-user-password'
),
url(
r'^users/(?P<user_id>[^\/]+)/notifications/(?P<notification_type>[^\/]+)/$',
UserNotificationFineTuningEndpoint.as_view(),
name='sentry-api-0-user-notifications-fine-tuning'
),
url(
r'^users/(?P<user_id>[^\/]+)/social-identities/$',
UserSocialIdentitiesIndexEndpoint.as_view(),
name='sentry-api-0-user-social-identities-index'),
url(
r'^users/(?P<user_id>[^\/]+)/social-identities/(?P<identity_id>[^\/]+)/$',
UserSocialIdentityDetailsEndpoint.as_view(),
name='sentry-api-0-user-social-identity-details'),
url(
r'^users/(?P<user_id>[^\/]+)/subscriptions/$',
UserSubscriptionsEndpoint.as_view(),
name='sentry-api-0-user-subscriptions'
),
# Organizations
url(
r'^organizations/(?P<organization_slug>[^\/]+)/chunk-upload/$',
ChunkUploadEndpoint.as_view(),
name='sentry-api-0-chunk-upload'
),
url(
r'^organizations/$', OrganizationIndexEndpoint.as_view(), name='sentry-api-0-organizations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/$',
OrganizationDetailsEndpoint.as_view(),
name='sentry-api-0-organization-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/shortids/(?P<short_id>[^\/]+)/$',
ShortIdLookupEndpoint.as_view(),
name='sentry-api-0-short-id-lookup'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/eventids/(?P<event_id>[^\/]+)/$',
EventIdLookupEndpoint.as_view(),
name='sentry-api-0-event-id-lookup'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/slugs/$',
SlugsUpdateEndpoint.as_view(),
name='sentry-api-0-short-ids-update'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-requests'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/(?P<request_id>\d+)/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-request-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/activity/$',
OrganizationActivityEndpoint.as_view(),
name='sentry-api-0-organization-activity'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/api-keys/$',
OrganizationApiKeyIndexEndpoint.as_view(),
name='sentry-api-0-organization-api-key-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/api-keys/(?P<api_key_id>[^\/]+)/$',
OrganizationApiKeyDetailsEndpoint.as_view(),
name='sentry-api-0-organization-api-key-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/audit-logs/$',
OrganizationAuditLogsEndpoint.as_view(),
name='sentry-api-0-organization-audit-logs'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-provider/$',
OrganizationAuthProviderDetailsEndpoint.as_view(),
name='sentry-api-0-organization-auth-provider'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-providers/$',
OrganizationAuthProvidersEndpoint.as_view(),
name='sentry-api-0-organization-auth-providers'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-provider/send-reminders/$',
OrganizationAuthProviderSendRemindersEndpoint.as_view(),
name='sentry-api-0-organization-auth-provider-send-reminders'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/avatar/$',
OrganizationAvatarEndpoint.as_view(),
name='sentry-api-0-organization-avatar'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/config/integrations/$',
OrganizationConfigIntegrationsEndpoint.as_view(),
name='sentry-api-0-organization-config-integrations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/config/repos/$',
OrganizationConfigRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-config-repositories'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/issues/new/$',
OrganizationIssuesNewEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/$',
OrganizationIntegrationsEndpoint.as_view(),
name='sentry-api-0-organization-integrations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/(?P<integration_id>[^\/]+)/$',
OrganizationIntegrationDetailsEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/$',
OrganizationMemberIndexEndpoint.as_view(),
name='sentry-api-0-organization-member-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/issues/$',
OrganizationUserIssuesSearchEndpoint.as_view(),
name='sentry-api-0-organization-issue-search'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/(?P<user_id>[^\/]+)/issues/$',
OrganizationUserIssuesEndpoint.as_view(),
name='sentry-api-0-organization-user-issues'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/$',
OrganizationMemberDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/unreleased-commits/$',
OrganizationMemberUnreleasedCommitsEndpoint.as_view(),
name='sentry-api-0-organization-member-unreleased-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/assigned/$',
OrganizationMemberIssuesAssignedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-assigned'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/bookmarked/$',
OrganizationMemberIssuesBookmarkedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-bookmarked'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/viewed/$',
OrganizationMemberIssuesViewedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-viewed'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
OrganizationMemberTeamDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-team-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/projects/$',
OrganizationProjectsEndpoint.as_view(),
name='sentry-api-0-organization-projects'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/$',
OrganizationRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-repositories'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/$',
OrganizationRepositoryDetailsEndpoint.as_view(),
name='sentry-api-0-organization-repository-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/commits/$',
OrganizationRepositoryCommitsEndpoint.as_view(),
name='sentry-api-0-organization-repository-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/$',
OrganizationReleasesEndpoint.as_view(),
name='sentry-api-0-organization-releases'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
OrganizationReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-organization-release-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
OrganizationReleaseFilesEndpoint.as_view(),
name='sentry-api-0-organization-release-files'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
OrganizationReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-organization-release-file-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/commitfiles/$',
CommitFileChangeEndpoint.as_view(),
name='sentry-api-0-release-commitfilechange'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/deploys/$',
ReleaseDeploysEndpoint.as_view(),
name='sentry-api-0-organization-release-deploys'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/commits/$',
OrganizationReleaseCommitsEndpoint.as_view(),
name='sentry-api-0-organization-release-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/stats/$',
OrganizationStatsEndpoint.as_view(),
name='sentry-api-0-organization-stats'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/teams/$',
OrganizationTeamsEndpoint.as_view(),
name='sentry-api-0-organization-teams'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/onboarding-tasks/$',
OrganizationOnboardingTaskEndpoint.as_view(),
name='sentry-api-0-organization-onboardingtasks'
),
# Teams
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/$',
TeamDetailsEndpoint.as_view(),
name='sentry-api-0-team-details'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/new/$',
TeamGroupsNewEndpoint.as_view(),
name='sentry-api-0-team-groups-new'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/trending/$',
TeamGroupsTrendingEndpoint.as_view(),
name='sentry-api-0-team-groups-trending'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/members/$',
TeamMembersEndpoint.as_view(),
name='sentry-api-0-team-members'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/projects/$',
TeamProjectsEndpoint.as_view(),
name='sentry-api-0-team-project-index'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/stats/$',
TeamStatsEndpoint.as_view(),
name='sentry-api-0-team-stats'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/avatar/$',
TeamAvatarEndpoint.as_view(),
name='sentry-api-0-team-avatar'
),
# Projects
url(r'^projects/$', ProjectIndexEndpoint.as_view(),
name='sentry-api-0-projects'),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/$',
ProjectDetailsEndpoint.as_view(),
name='sentry-api-0-project-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/avatar/$',
ProjectAvatarEndpoint.as_view(),
name='sentry-api-0-project-avatar'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/create-sample/$',
ProjectCreateSampleEndpoint.as_view(),
name='sentry-api-0-project-create-sample'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/docs/$',
ProjectDocsEndpoint.as_view(),
name='sentry-api-0-project-docs'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/docs/(?P<platform>[\w-]+)/$',
ProjectDocsPlatformEndpoint.as_view(),
name='sentry-api-0-project-docs-platform'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/environments/$',
ProjectEnvironmentsEndpoint.as_view(),
name='sentry-api-0-project-environments'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/environments/(?P<environment>[^/]+)/$',
ProjectEnvironmentDetailsEndpoint.as_view(),
name='sentry-api-0-project-environment-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/integrations/(?P<integration_id>[^/]+)/$',
ProjectIntegrationDetailsEndpoint.as_view(),
name='sentry-api-0-project-integration-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/platforms/$',
ProjectPlatformsEndpoint.as_view(),
name='sentry-api-0-project-platform-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/$',
ProjectEventsEndpoint.as_view(),
name='sentry-api-0-project-events'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/$',
ProjectEventDetailsEndpoint.as_view(),
name='sentry-api-0-project-event-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/committers/$',
EventFileCommittersEndpoint.as_view(),
name='sentry-api-0-event-file-committers'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/owners/$',
EventOwnersEndpoint.as_view(),
name='sentry-api-0-event-owners'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/$',
DSymFilesEndpoint.as_view(),
name='sentry-api-0-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/difs/assemble/$',
DifAssembleEndpoint.as_view(),
name='sentry-api-0-assemble-dif-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/unknown/$',
UnknownDSymFilesEndpoint.as_view(),
name='sentry-api-0-unknown-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/associate/$',
AssociateDSymFilesEndpoint.as_view(),
name='sentry-api-0-associate-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/$',
ProjectFiltersEndpoint.as_view(),
name='sentry-api-0-project-filters'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/(?P<filter_id>[\w-]+)/$',
ProjectFilterDetailsEndpoint.as_view(),
name='sentry-api-0-project-filters'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/$',
ProjectServiceHooksEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/(?P<hook_id>[^\/]+)/$',
ProjectServiceHookDetailsEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/(?P<hook_id>[^\/]+)/stats/$',
ProjectServiceHookStatsEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/$',
ProjectGroupIndexEndpoint.as_view(),
name='sentry-api-0-project-group-index'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/stats/$',
ProjectGroupStatsEndpoint.as_view(),
name='sentry-api-0-project-group-stats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/$',
ProjectKeysEndpoint.as_view(),
name='sentry-api-0-project-keys'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/$',
ProjectKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-key-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/stats/$',
ProjectKeyStatsEndpoint.as_view()
),
url(
r'^projects/(?P<organization_slug>[^/]+)/(?P<project_slug>[^/]+)/members/$',
ProjectMemberIndexEndpoint.as_view(),
name='sentry-api-0-project-member-index'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/$',
ProjectReleasesEndpoint.as_view(),
name='sentry-api-0-project-releases'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/token/$',
ProjectReleasesTokenEndpoint.as_view(),
name='sentry-api-0-project-releases-token'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
ProjectReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-project-release-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/commits/$',
ProjectReleaseCommitsEndpoint.as_view(),
name='sentry-api-0-project-release-commits'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/resolved/$',
IssuesResolvedInReleaseEndpoint.as_view(),
name='sentry-api-0-release-resolved'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
ProjectReleaseFilesEndpoint.as_view(),
name='sentry-api-0-project-release-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
ProjectReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-project-release-file-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/$',
ProjectRulesEndpoint.as_view(),
name='sentry-api-0-project-rules'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/configuration/$',
ProjectRulesConfigurationEndpoint.as_view(),
name='sentry-api-0-project-rules-configuration'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/(?P<rule_id>[^\/]+)/$',
ProjectRuleDetailsEndpoint.as_view(),
name='sentry-api-0-project-rule-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/$',
ProjectSearchesEndpoint.as_view(),
name='sentry-api-0-project-searches'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/(?P<search_id>[^\/]+)/$',
ProjectSearchDetailsEndpoint.as_view(),
name='sentry-api-0-project-search-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/stats/$',
ProjectStatsEndpoint.as_view(),
name='sentry-api-0-project-stats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/$',
ProjectTagsEndpoint.as_view(),
name='sentry-api-0-project-tags'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/$',
ProjectTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-tagkey-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/values/$',
ProjectTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-project-tagkey-values'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/teams/$',
ProjectTeamsEndpoint.as_view(),
name='sentry-api-0-project-teams'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
ProjectTeamDetailsEndpoint.as_view(),
name='sentry-api-0-project-team-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/transfer/$',
ProjectTransferEndpoint.as_view(),
name='sentry-api-0-project-transfer'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/users/$',
ProjectUsersEndpoint.as_view(),
name='sentry-api-0-project-users'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/users/(?P<user_hash>[^/]+)/$',
ProjectUserDetailsEndpoint.as_view(),
name='sentry-api-0-project-user-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:user-feedback|user-reports)/$',
ProjectUserReportsEndpoint.as_view(),
name='sentry-api-0-project-user-reports'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/user-stats/$',
ProjectUserStatsEndpoint.as_view(),
name='sentry-api-0-project-userstats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/$',
ProjectProcessingIssuesEndpoint.as_view(),
name='sentry-api-0-project-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/fix$',
ProjectProcessingIssuesFixEndpoint.as_view(),
name='sentry-api-0-project-fix-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/reprocessing/$',
ProjectReprocessingEndpoint.as_view(),
name='sentry-api-0-project-reprocessing'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/discard/$',
ProjectProcessingIssuesDiscardEndpoint.as_view(),
name='sentry-api-0-project-discard-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/ownership/$',
ProjectOwnershipEndpoint.as_view(),
name='sentry-api-0-project-ownership'
),
# Load plugin project urls
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins/$',
ProjectPluginsEndpoint.as_view(),
name='sentry-api-0-project-plugins'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins/(?P<plugin_id>[^\/]+)/$',
ProjectPluginDetailsEndpoint.as_view(),
name='sentry-api-0-project-plugin-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins?/',
include('sentry.plugins.base.project_api_urls')
),
# Groups
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/$',
GroupDetailsEndpoint.as_view(),
name='sentry-api-0-group-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/$',
GroupEventsEndpoint.as_view(),
name='sentry-api-0-group-events'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/latest/$',
GroupEventsLatestEndpoint.as_view(),
name='sentry-api-0-group-events-latest'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/oldest/$',
GroupEventsOldestEndpoint.as_view(),
name='sentry-api-0-group-events-oldest'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/$',
GroupNotesEndpoint.as_view(),
name='sentry-api-0-group-notes'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/(?P<note_id>[^\/]+)/$',
GroupNotesDetailsEndpoint.as_view(),
name='sentry-api-0-group-notes-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/hashes/$',
GroupHashesEndpoint.as_view(),
name='sentry-api-0-group-events'
),
url(
r'^issues/(?P<issue_id>\d+)/participants/$',
GroupParticipantsEndpoint.as_view(),
name='sentry-api-0-group-stats'
),
| |
# file
self.certpath = certpath
cert_size = os.path.getsize(certpath)
if cert_size > MAX_CERT_SIZE:
raise Exception(error_msg)
try:
f = open(certpath)
rawcert = f.read()
f.close()
except:
raise Exception(error_msg)
else:
rawcert = certpath
if rawcert is None:
raise Exception(error_msg)
self.rawcert = rawcert
# Let's try to get file format : PEM or DER.
fmtstr = 'openssl x509 -text -inform %s -noout'
convertstr = 'openssl x509 -inform %s -outform %s'
cert_header = "-----BEGIN CERTIFICATE-----"
cert_footer = "-----END CERTIFICATE-----"
l = rawcert.split(cert_header, 1)
if len(l) == 2: # looks like PEM
tmp = l[1]
l = tmp.split(cert_footer, 1)
if len(l) == 2:
tmp = l[0]
rawcert = "%s%s%s\n" % (cert_header, tmp, cert_footer)
else:
raise Exception(error_msg)
r,w,e = popen3((fmtstr % "PEM").split(" "))
w.write(rawcert)
w.close()
textcert = r.read()
r.close()
res = e.read()
e.close()
if res == '':
self.format = "PEM"
self.pemcert = rawcert
self.textcert = textcert
cmd = (convertstr % ("PEM", "DER")).split(" ")
self.dercert = self._apply_ossl_cmd(cmd, rawcert)
else:
raise Exception(error_msg)
else: # not PEM, try DER
r,w,e = popen3((fmtstr % "DER").split(" "))
w.write(rawcert)
w.close()
textcert = r.read()
r.close()
res = e.read()
if res == '':
self.format = "DER"
self.dercert = rawcert
self.textcert = textcert
cmd = (convertstr % ("DER", "PEM")).split(" ")
self.pemcert = self._apply_ossl_cmd(cmd, rawcert)
cmd = (convertstr % ("DER", "DER")).split(" ")
self.dercert = self._apply_ossl_cmd(cmd, rawcert)
else:
raise Exception(error_msg)
self.osslcmdbase = ['openssl', 'x509', '-inform', self.format]
r,w,e = popen3('openssl asn1parse -inform DER'.split(' '))
w.write(self.dercert)
w.close()
self.asn1parsecert = r.read()
r.close()
res = e.read()
e.close()
if res != '':
raise Exception(error_msg)
# Grab _raw_ X509v3 Authority Key Identifier, if any.
tmp = self.asn1parsecert.split(":X509v3 Authority Key Identifier", 1)
self.authorityKeyID = None
if len(tmp) == 2:
tmp = tmp[1]
tmp = tmp.split("[HEX DUMP]:", 1)[1]
self.authorityKeyID=tmp.split('\n',1)[0]
# Grab _raw_ X509v3 Subject Key Identifier, if any.
tmp = self.asn1parsecert.split(":X509v3 Subject Key Identifier", 1)
self.subjectKeyID = None
if len(tmp) == 2:
tmp = tmp[1]
tmp = tmp.split("[HEX DUMP]:", 1)[1]
self.subjectKeyID=tmp.split('\n',1)[0]
# Get tbsCertificate using the worst hack. output of asn1parse
# looks like that:
#
# 0:d=0 hl=4 l=1298 cons: SEQUENCE
# 4:d=1 hl=4 l=1018 cons: SEQUENCE
# ...
#
l1,l2 = self.asn1parsecert.split('\n', 2)[:2]
hl1 = int(l1.split("hl=",1)[1].split("l=",1)[0])
rem = l2.split("hl=",1)[1]
hl2, rem = rem.split("l=",1)
hl2 = int(hl2)
l = int(rem.split("cons",1)[0])
self.tbsCertificate = self.dercert[hl1:hl1+hl2+l]
# Parse the -text output of openssl to make things available
tmp = self.textcert.split('\n', 2)[2]
l = tmp.split('\n', 1)
if len(l) != 2:
raise Exception(error_msg)
cur, tmp = l
i = 0
k = self.possible_fields[i] # Version:
cur = cur[len(k):] + '\n'
while k:
l = tmp.split('\n', 1)
if len(l) != 2: # Over
fields_dict[k] = cur
break
l, tmp = l
newkey = 0
# skip fields we have already seen, this is the purpose of 'i'
for j in range(i, self.possible_fields_count):
f = self.possible_fields[j]
if l.startswith(f):
fields_dict[k] = cur
cur = l[len(f):] + '\n'
k = f
newkey = 1
i = j+1
break
if newkey == 1:
continue
cur += l + '\n'
# version
v = fields_dict[" Version:"]
self.version = None
if v:
self.version = int(v[1:2])
if self.version is None:
raise Exception(error_msg)
# serial number
v = fields_dict[" Serial Number:"]
self.serial = None
if v:
v = v.replace('\n', '').strip()
if "0x" in v:
v = v.split("0x", 1)[1].split(')', 1)[0]
v = v.replace(':', '').upper()
if len(v) % 2:
v = '0' + v
self.serial = v
if self.serial is None:
raise Exception(error_msg)
# Signature Algorithm
v = fields_dict[" Signature Algorithm:"]
self.sigAlg = None
if v:
v = v.split('\n',1)[0]
v = v.strip()
self.sigAlg = v
if self.sigAlg is None:
raise Exception(error_msg)
# issuer
v = fields_dict[" Issuer:"]
self.issuer = None
if v:
v = v.split('\n',1)[0]
v = v.strip()
self.issuer = v
if self.issuer is None:
raise Exception(error_msg)
# not before
v = fields_dict[" Not Before:"]
self.notBefore_str = None
if v:
v = v.split('\n',1)[0]
v = v.strip()
self.notBefore_str = v
if self.notBefore_str is None:
raise Exception(error_msg)
try:
self.notBefore = time.strptime(self.notBefore_str,
"%b %d %H:%M:%S %Y %Z")
except:
self.notBefore = time.strptime(self.notBefore_str,
"%b %d %H:%M:%S %Y")
self.notBefore_str_simple = time.strftime("%x", self.notBefore)
# not after
v = fields_dict[" Not After :"]
self.notAfter_str = None
if v:
v = v.split('\n',1)[0]
v = v.strip()
self.notAfter_str = v
if self.notAfter_str is None:
raise Exception(error_msg)
try:
self.notAfter = time.strptime(self.notAfter_str,
"%b %d %H:%M:%S %Y %Z")
except:
self.notAfter = time.strptime(self.notAfter_str,
"%b %d %H:%M:%S %Y")
self.notAfter_str_simple = time.strftime("%x", self.notAfter)
# subject
v = fields_dict[" Subject:"]
self.subject = None
if v:
v = v.split('\n',1)[0]
v = v.strip()
self.subject = v
if self.subject is None:
raise Exception(error_msg)
# Public Key Algorithm
v = fields_dict[" Public Key Algorithm:"]
self.pubKeyAlg = None
if v:
v = v.split('\n',1)[0]
v = v.strip()
self.pubKeyAlg = v
if self.pubKeyAlg is None:
raise Exception(error_msg)
# Modulus
v = fields_dict[" Modulus ("]
self.modulus = None
if v:
v,t = v.split(' bit):',1)
self.modulusLen = int(v)
t = t.replace(' ', '').replace('\n', ''). replace(':', '')
self.modulus_hexdump = t
self.modulus = long(t, 16)
if self.modulus is None:
raise Exception(error_msg)
# Exponent
v = fields_dict[" Exponent:"]
self.exponent = None
if v:
v = v.split('(',1)[0]
self.exponent = long(v)
if self.exponent is None:
raise Exception(error_msg)
# Public Key instance
self.key = RSA.construct((self.modulus, self.exponent, ))
# Subject Key Identifier
# Authority Key Identifier: keyid, dirname and serial
self.authorityKeyID_keyid = None
self.authorityKeyID_dirname = None
self.authorityKeyID_serial = None
if self.authorityKeyID: # (hex version already done using asn1parse)
v = fields_dict[" keyid:"]
if v:
v = v.split('\n',1)[0]
v = v.strip().replace(':', '')
self.authorityKeyID_keyid = v
v = fields_dict[" DirName:"]
if v:
v = v.split('\n',1)[0]
self.authorityKeyID_dirname = v
v = fields_dict[" serial:"]
if v:
v = v.split('\n',1)[0]
v = v.strip().replace(':', '')
self.authorityKeyID_serial = v
# Basic constraints
self.basicConstraintsCritical = False
self.basicConstraints=None
v = fields_dict[" X509v3 Basic Constraints:"]
if v:
self.basicConstraints = {}
v,t = v.split('\n',2)[:2]
if "critical" in v:
self.basicConstraintsCritical = True
if "CA:" in t:
self.basicConstraints["CA"] = t.split('CA:')[1][:4] == "TRUE"
if "pathlen:" in t:
self.basicConstraints["pathlen"] = int(t.split('pathlen:')[1])
# X509v3 Key Usage
self.keyUsage = []
v = fields_dict[" X509v3 Key Usage:"]
if v:
# man 5 x509v3_config
ku_mapping = {"Digital Signature": "digitalSignature",
"Non Repudiation": "nonRepudiation",
"Key Encipherment": "keyEncipherment",
"Data Encipherment": "dataEncipherment",
"Key Agreement": "keyAgreement",
"Certificate Sign": "keyCertSign",
"CRL Sign": "cRLSign",
"Encipher Only": "encipherOnly",
"Decipher Only": "decipherOnly"}
v = v.split('\n',2)[1]
l = map(lambda x: x.strip(), v.split(','))
while l:
c = l.pop()
if ku_mapping.has_key(c):
self.keyUsage.append(ku_mapping[c])
else:
self.keyUsage.append(c) # Add it anyway
print "Found unknown X509v3 Key Usage: '%s'" % c
print "Report it to arno (at) natisbad.org for addition"
# X509v3 Extended Key Usage
self.extKeyUsage = []
v = fields_dict[" X509v3 Extended Key Usage:"]
if v:
# man 5 x509v3_config:
eku_mapping = {"TLS Web Server Authentication": "serverAuth",
"TLS Web Client Authentication": "clientAuth",
"Code Signing": "codeSigning",
"E-mail Protection": "emailProtection",
"Time Stamping": "timeStamping",
"Microsoft Individual Code Signing": "msCodeInd",
"Microsoft Commercial Code Signing": "msCodeCom",
"Microsoft Trust List Signing": "msCTLSign",
"Microsoft Encrypted File System": "msEFS",
"Microsoft Server Gated Crypto": "msSGC",
"Netscape Server Gated Crypto": "nsSGC",
"IPSec End System": "iPsecEndSystem",
"IPSec Tunnel": "iPsecTunnel",
"IPSec User": "iPsecUser"}
v = v.split('\n',2)[1]
l = map(lambda x: x.strip(), v.split(','))
while l:
c = l.pop()
if eku_mapping.has_key(c):
self.extKeyUsage.append(eku_mapping[c])
else:
self.extKeyUsage.append(c) # Add it anyway
print "Found unknown X509v3 Extended Key Usage: '%s'" % c
print "Report it to arno (at) natisbad.org for addition"
# CRL Distribution points
self.cRLDistributionPoints = []
v = fields_dict[" X509v3 CRL Distribution Points:"]
if v:
v = v.split("\n\n", 1)[0]
v = v.split("URI:")[1:]
self.CRLDistributionPoints = map(lambda x: x.strip(), v)
# Authority Information Access: list of tuples ("method", "location")
self.authorityInfoAccess = []
v = fields_dict[" Authority Information Access:"]
if v:
v = v.split("\n\n", 1)[0]
v = v.split("\n")[1:]
for e in v:
method, location = map(lambda x: x.strip(), e.split(" - ", 1))
self.authorityInfoAccess.append((method, location))
# signature field
v = fields_dict[" Signature Algorithm:" ]
self.sig = None
if v:
v = v.split('\n',1)[1]
v = v.replace(' ', '').replace('\n', '')
self.sig = "".join(map(lambda x: chr(int(x, 16)), v.split(':')))
self.sigLen = len(self.sig)
if self.sig | |
# Copyright (c) 2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of Google Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Run DeepConsensus and generate a polished FASTQ.
Usage:
deepconsensus run \
--subreads_to_ccs=subreads_to_ccs.bam \
--ccs_fasta=ccs_fasta.fasta \
--output=predictions.fastq \
--cpus=4
"""
import dataclasses
import enum
import itertools
import multiprocessing
import os
import time
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Union
from absl import app
from absl import flags
from absl import logging
from ml_collections.config_dict import config_dict
from ml_collections.config_flags import config_flags
import numpy as np
import pandas as pd
import tensorflow as tf
from deepconsensus.models import data_providers
from deepconsensus.models import model_utils
from deepconsensus.postprocess import stitch_utils
from deepconsensus.preprocess import utils as preprocess_utils
from deepconsensus.utils import dc_constants
from deepconsensus.utils import utils
from tensorflow.python.platform import gfile
@enum.unique
class DebugStage(enum.Enum):
"""Stage to end after for debugging and runtime testing purposes."""
DC_INPUT = 1
TF_EXAMPLES = 2
RUN_MODEL = 3
FULL = 4
FLAGS = flags.FLAGS
# Inputs:
flags.DEFINE_string('subreads_to_ccs', None,
'Input BAM containing subreads aligned to ccs.')
flags.DEFINE_string('ccs_fasta', None, 'Input FASTA containing ccs sequences.')
# Outputs:
flags.DEFINE_string(
'output', None, 'Filename of output FASTQ file. If this path '
'does not end in ".fastq", the suffix will be added.')
# Model checkpoint:
flags.DEFINE_string(
'checkpoint', None, 'Path to checkpoint directory + prefix. '
'For example: path/to/model/checkpoint-50.')
config_flags.DEFINE_config_file('params', None,
'params.json configuration file.')
# The following just need to match the training parameters.
flags.DEFINE_integer('max_passes', 20, 'Maximum subreads in each input.')
flags.DEFINE_integer('example_width', 100, 'Number of bases in each input.')
flags.DEFINE_integer(
'padding', 20, 'Number of bases of padding to add to example_width to '
'allow for insertions.')
# The following parameters are used at the end for filtering the final output.
flags.DEFINE_integer('min_length', 0, 'Minimum length for reads output.')
flags.DEFINE_integer('min_quality', 20, 'Minimum quality for reads output.')
# The following parameters affect performance of this script.
flags.DEFINE_integer(
'batch_size', 1024,
'Number of examples to batch together for TensorFlow model prediction.')
flags.DEFINE_integer(
'batch_zmws', 20, 'Number of ZMWs to process at the same time. '
'If 0, process all ZMWs in one batch.')
# The following parameters are for debugging.
flags.DEFINE_integer('limit', None, 'Only process this many ZMWs. ')
flags.DEFINE_enum_class(
'end_after_stage', 'full', DebugStage,
'For debugging and runtime measurement purposes, '
'end after this stage for each ZMW.')
flags.DEFINE_integer(
'cpus',
multiprocessing.cpu_count() - 1,
'Number of processes to use during preprocessing stage. '
'Uses CPU count - 1 by default. '
' If 0, then preprocessing will be done in the main process '
'instead of using multiple processes.')
def register_required_flags():
flags.mark_flags_as_required([
'subreads_to_ccs',
'ccs_fasta',
'checkpoint',
'output',
])
@dataclasses.dataclass
class InferenceOptions:
"""A central place to define options used across various stages of inference.
Attributes:
example_width: Number of bases for each window/example given to the model.
example_height: Height of examples, which depends on max_passes.
padding: Number of bases of padding to add to example_width to allow for
insertions.
padded_len: Length of window after padding is added. This should be equal to
example_width + padding.
max_passes: Max number of subreads to include in input shown to model.
min_quality: Quality threshold to filter final reads.
min_length: Length threshold to filter final reads.
batch_size: Number of examples passed through model at once.
cpus: Number of processes to use for multiprocessing. Must be
positive (for multiprocessing) or 0 (for serial execution).
"""
example_width: int
example_height: int
padding: int
padded_len: int
max_passes: int
min_quality: int
min_length: int
batch_size: int
cpus: int
timing = []
def timelog(stage: str,
item: str,
before: float,
num_examples: Optional[int] = None,
num_subreads: Optional[int] = None,
is_batch: bool = True,
update_global_variable: bool = True) -> Dict[str, Any]:
"""Catalogue time elapsed for a given stage relative to "before"."""
after = time.time()
datum = {
'item': item,
'stage': stage,
'start_time': before,
'end_time': after,
'runtime': after - before,
'is_batch': is_batch
}
if num_examples:
datum['num_examples'] = num_examples
if num_subreads:
datum['num_subreads'] = num_subreads
if update_global_variable:
timing.append(datum)
return datum
def run_model_on_examples(
feature_dict_gen_fn: Callable[[], Dict[str, Union[np.ndarray, int, bytes]]],
model: tf.keras.Model,
model_params: Union[config_dict.ConfigDict, config_dict.FrozenConfigDict],
options: InferenceOptions,
) -> List[stitch_utils.DCModelOutput]:
"""Runs the model on one example to get one predicted output sequence.
Args:
feature_dict_gen_fn: Generator fn of feature dictionaries.
model: An initialized model that will be used to make predictions.
model_params: Parameters for the model.
options: Some options that apply to various stages of the inference run.
Returns:
A DeepConsensusInput proto containing the prediction from the model.
"""
def _process_input_helper(
features: Dict[str, tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
return data_providers.process_feature_dict(
features=features, params=model_params)
dataset = tf.data.Dataset.from_generator(
feature_dict_gen_fn,
output_signature={
'subreads':
tf.TensorSpec(
shape=(options.example_height, model_params.max_length,
model_params.num_channels),
dtype=dc_constants.TF_DATA_TYPE),
'subreads/num_passes':
tf.TensorSpec(shape=(), dtype=tf.int32),
'name':
tf.TensorSpec(shape=(), dtype=tf.string),
'window_pos':
tf.TensorSpec(shape=(), dtype=tf.int32),
})
dataset = dataset.map(map_func=_process_input_helper)
dataset = dataset.batch(batch_size=options.batch_size, drop_remainder=False)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
predictions = []
for rows, _, _, window_pos_arr, molecule_name_arr in dataset.as_numpy_iterator(
):
softmax_output = model.predict(rows)
y_preds = tf.argmax(softmax_output, -1)
error_prob = 1 - np.max(softmax_output, axis=-1)
quality_scores = -10 * np.log10(error_prob)
# Round to the nearest integer and cap at max allowed value.
quality_scores = np.round(quality_scores, decimals=0)
quality_scores = np.minimum(quality_scores, dc_constants.MAX_QUAL)
quality_scores = quality_scores.astype(dtype=np.int32)
for y_pred, qs, window_pos, molecule_name in zip(y_preds, quality_scores,
window_pos_arr,
molecule_name_arr):
dc_output = stitch_utils.DCModelOutput(
window_pos=window_pos, molecule_name=molecule_name.decode('utf=8'))
y_pred_bases = ''.join(
np.vectorize(dc_constants.VOCAB.__getitem__)(y_pred))
quality_string = utils.quality_scores_to_string(qs)
dc_output.sequence = y_pred_bases
dc_output.quality_string = quality_string
predictions.append(dc_output)
return predictions
def stitch_predictions_for_one_zmw(
predictions: Iterable[stitch_utils.DCModelOutput],
zmw: str,
options: InferenceOptions,
outcome_counter=stitch_utils.OutcomeCounter) -> Optional[str]:
"""Stitches together predictions into one sequence.
Args:
predictions: Predictions from running model on examples.
zmw: Molecule name, the part that is shared among all subreads.
options: Options here are used for filtering.
outcome_counter: Keeps track of how many ZMWs end up with which outcomes.
Returns:
Fastq string for one sequence.
"""
fastq_string = stitch_utils.stitch_to_fastq(
molecule_name=zmw,
predictions=predictions,
example_width=options.example_width,
min_quality=options.min_quality,
min_length=options.min_length,
outcome_counter=outcome_counter)
return fastq_string
def stream_fasta_and_bam(
subreads_to_ccs: str, ccs_fasta: str, options: InferenceOptions
) -> Generator[Tuple[str, str, Sequence[Any]], None, None]:
"""Streams inputs from FASTA and BAM concurrently.
Args:
subreads_to_ccs: Path to input BAM file with subreads aligned to template
sequences.
ccs_fasta: Path to the input FASTA file with template sequences (e.g.
CCS or POA).
options: Inference options, used to initialize a DcConfig object.
Yields:
For every ZMW, (ZMW name, template sequence, list of subreads).
"""
dc_config = preprocess_utils.DcConfig(
max_passes=options.max_passes,
example_width=options.example_width,
padding=options.padding)
# Temporarily disable unused-variable.
# pylint: disable=unused-variable
proc_feeder, main_counter = preprocess_utils.create_proc_feeder(
subreads_to_ccs=subreads_to_ccs, ccs_fasta=ccs_fasta, dc_config=dc_config)
# pylint: enable=unused_variable
for input_data in proc_feeder():
subreads, zmw, dc_config, _ = input_data
yield zmw, subreads, dc_config
def initialize_model(
checkpoint_path: str, params: config_dict.ConfigDict,
options: InferenceOptions
) -> Tuple[Optional[tf.keras.Model], Optional[config_dict.ConfigDict]]:
"""Initializes the model and gathers parameters.
Args:
checkpoint_path: Path to model checkpoint.
params: Parameter object, from flags.
options: Contains a few more parameters some of which will replace those in
the params object.
Returns:
A tuple containing an initialized model and a final parameter set.
"""
if FLAGS.end_after_stage in [DebugStage.TF_EXAMPLES, DebugStage.DC_INPUT]:
return None, None
# Figure out model parameters.
if not FLAGS.params:
params = model_utils.read_params_from_json(checkpoint_path=checkpoint_path)
else:
params = FLAGS.params
with params.unlocked():
params.max_passes = options.max_passes
logging.info('Loading %s', checkpoint_path)
model = model_utils.get_model(params)
# This loads a model saved in tf.train.Checkpoint format through the custom
# training loop code.
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(checkpoint_path).expect_partial()
model_utils.modify_params(
params=params,
speedy=True,
max_length=options.padded_len,
is_training=False)
logging.info('Finished initialize_model.')
return model, params
def preprocess(
| |
# Copyright (C) 2016 The Regents of the University of Michigan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""
Takes CSVs of Coursera quiz scores as input (from joined Coursera quiz_submission_metadata and quiz_metadata tables) and outputs a set of quiz features.
For the SQL queries used to generate the CSV files of raw input data, see ./sql/quiz_sql_query.txt
Usage: python3 quiz_feature_extractor.py\
-i /path/to/raw_data_directory\
-d /path/to/course_date_file
-o /path/to/output_directory
-n course_name [must match name in coursera_course_dates.csv; ex. "introfinance"]
on JG local:
python3 quiz_feature_extractor.py -i raw_data/thermo/ -d coursera_course_dates.csv -o proc_data/thermo/ -n introthermodynamics
"""
import argparse, datetime, re, os
import pandas as pd
import numpy as np
import itertools
from extraction.extraction_utils import course_len, timestamp_week, fetch_start_end_date
MILLISECONDS_IN_SECOND = 1000
MILLISECONDS_IN_DAY = 86400000
def fetch_course_runs(input_dir):
"""
Fetch numbers of course runs; this looks for any 3-digit numbers in filenames in input_dir.
:param input_dir: input directory with CSV files of quiz data.
:return: list of strings of course run numbers as they appear in filenames.
"""
runs = [re.match('.*(\d{3})_quiz\.csv', f).group(1) for f in os.listdir(input_dir) if re.match('.*(\d{3})_quiz\.csv', f)]
return runs
def read_quiz_data(dir, run):
"""
Read quiz data for a given run.
:param dir: input directory with CSV files of quiz data.
:param run: run number; must match number in filename exactly (i.e., '006' not '6').
:return: pd.DataFrame of quiz data for run.
"""
quiz_file = [x for x in os.listdir(dir) if x.endswith('{0}_quiz.csv'.format(run))][0]
try:
quiz_df = pd.read_csv(os.path.join(dir, quiz_file))
except Exception as e:
print("[ERROR] reading quiz data from file {}: {}".format(quiz_file, e))
return quiz_df
def read_quiz_metadata(dir, run):
"""
Read quiz metadata for a given run.
:param dir: input directory with CSV files of quiz data.
:param run: run number; must match number in filename exactly (i.e., '006' not '6').
:return: pd.DataFrame of quiz metadata for run.
"""
quiz_meta_file = [x for x in os.listdir(dir) if x.endswith('{0}_quiz_metadata.csv'.format(run))][0]
try:
quiz_meta_df = pd.read_csv(os.path.join(dir, quiz_meta_file))
except Exception as e:
print("[ERROR] reading quiz metadata from file {}: {}".format(quiz_file, e))
return quiz_meta_df
def get_users_and_weeks(df, dropout_fp, df_user_col = 'session_user_id', dropout_user_col = 'userID', week_col = 'assignment_week'):
"""
Helper function to fetch all unique users and weeks in a course
:param df: pd.DataFrame of course quiz data; needs columns for session_user_id and assignment_week
:param dropout_fp: path to droput csv from clickstream_feature_extractor
:param df_user_col: name of column containing unique user IDs in df.
:param dropout_user_col: name of column containing user IDs in droput df (user_dropout_weeks.csv, in output_dir). This will be set in xing_feature_extractor.py.
:param week_col: name of column containing weeks.
:return: series containing all unique session_user_ids in df, and zero-indexed list of all week numbers in course as integers.
"""
try:
dropout_df = pd.read_csv(dropout_fp)
except Exception as e:
print("[ERROR] reading dropout_df from {}: {}".format(dropout_fp, e))
users = dropout_df[dropout_user_col].unique()
weeks = [x for x in range(int(max(df[week_col].dropna().unique())) + 1)]
return users,weeks
def gen_user_week_df(users, weeks):
"""
Create dataframe with all unique combinations of users and weeks.
:param users: array or iterable of user ids.
:param weeks: array or iterable of weeks.
:return: pd.DataFrame with columns 'session_user_id', 'week'
"""
user_df = pd.DataFrame({'key': 1, 'session_user_id': users})
week_df = pd.DataFrame({'key': 1, 'week': weeks})
df_out = pd.merge(user_df, week_df, on='key')[['session_user_id', 'week']]
return df_out
def merge_feat_df(df, feat_temp_df, zero_fill_cols = None, zero_fill_prefix = None):
"""
Helper function to perform joining between output dataframe and temporary feature dataframes.
:param df_out: pd.DataFrame of user-week level features
:param feat_temp_df: pd.DataFrame of new features to be appended, column-wise, to df_out.
:return: df_out with feat_temp_df merged on, using left merge to retain all user-week records even if not present in feat_temp_df.
"""
df_out = df.merge(feat_temp_df, how='left', left_on=['session_user_id', 'week'], right_on=['session_user_id', 'assignment_week']).drop('assignment_week', axis=1)
if zero_fill_cols:
for col in zero_fill_cols:
df_out[col].fillna(0, inplace = True)
if zero_fill_prefix:
for col in [x for x in df_out.columns if x.startswith(zero_fill_prefix)]:
df_out[col].fillna(0, inplace=True)
return df_out
def gen_quiz_expanding_mean(df_in, users, weeks, quiz_types = ('video', 'quiz', 'homework')):
"""
Generate columns with expanging mean--i.e., cumulative or rolling mean--for each user across all previous weeks for quiz_type.
:param df_in: dataframe of user-submission level quiz data and features.
:param quiz_types: tuple of quiz types to consider or ('AGG') to aggregate all quiz types into single feat; default is all Coursera Spark quiz types documented here https://wiki.illinois.edu/wiki/display/coursera/quiz_metadata.
:return: pd.DataFrame with user-week level features.
"""
# initialize dataframe with all user, week combinations
df_out = pd.DataFrame([x for x in itertools.product(users, weeks)], columns=['session_user_id', 'assignment_week'])
for qt in quiz_types: # for each quiz type, compute expanding mean for user by week and merge as new column onto df_out
new_col_name = 'prior_avg_quiz_score_{0}'.format(qt)
if qt != 'AGG':
if not qt in df_in.quiz_type.unique(): # quiz type not used in course; set column to na and continue to next quiz type
df_out[new_col_name] = np.nan
continue
df = df_in[df_in.quiz_type == qt][['session_user_id', 'assignment_week', 'raw_score']]
else: # for AGG; get a subset of columns but keep all rows
df = df_in[['session_user_id', 'assignment_week', 'raw_score']]
# get expanding sums of raw scores and counts of quizzes at user-week level
df_feat = df.groupby(['session_user_id', 'assignment_week'])['raw_score'] \
.agg(('sum', 'count')) \
.reindex(pd.MultiIndex.from_product([users, weeks], names=['session_user_id', 'assignment_week'])) \
.groupby(level=0) \
.cumsum() \
.groupby(level=0) \
.shift(1)
df_feat[new_col_name] = df_feat['sum'] / df_feat['count']
df_feat = df_feat.reset_index().drop(['count', 'sum'], axis=1)
user_id_ixs = df_feat['session_user_id']
df_feat = df_feat.groupby('session_user_id').fillna(method='ffill')
df_feat['session_user_id'] = user_id_ixs
df_feat = df_feat[['session_user_id', 'assignment_week', new_col_name]]
df_out = df_out.merge(df_feat, how = 'left')
return df_out
def pct_max_weekly_submissions(quiz_df, quiz_meta_df):
"""
Helper function to compute student weekly submissions as a percentage of max # of submissions, and as a percentage of the highest number of student submissions that week.
:param quiz_df: pd.DataFrame of quiz submission data.
:param quiz_meta_df: pd.DataFrame of quiz metadata.
:return: df_out; pd.DataFrame with session_user_id, assignment_week, total_user_submissions_week, and weekly_pct_max_submissions
"""
# submissions as percentage of maximum instructor-allowed submissions that week
max_submission_df = quiz_meta_df.groupby('assignment_week')['maximum_submissions'].agg('sum').rename('max_allowed_submissions_week').reset_index()
total_submission_df = quiz_df[['session_user_id', 'assignment_week']].groupby(['session_user_id', 'assignment_week']).size().rename('total_user_submissions_week').reset_index()
df_out = total_submission_df.merge(max_submission_df)
df_out['weekly_pct_max_allowed_submissions'] = df_out['total_user_submissions_week']/df_out['max_allowed_submissions_week']
df_out.drop('max_allowed_submissions_week', axis = 1, inplace = True)
# submissions as a percentage of maximum/highest number of student submissions that week
max_student_submission_df = df_out.groupby('assignment_week')['total_user_submissions_week'].agg('max').rename('max_student_submissions_week').reset_index()
df_out = df_out.merge(max_student_submission_df)
df_out['weekly_pct_max_student_submissions'] = df_out['total_user_submissions_week']/df_out['max_student_submissions_week']
df_out.drop('max_student_submissions_week', axis = 1, inplace = True)
return df_out
def raw_points_per_submission(quiz_df):
total_submission_df = quiz_df[['session_user_id', 'assignment_week']]\
.groupby(['session_user_id', 'assignment_week'])\
.size()\
.rename('total_user_submissions_week')\
.reset_index()
total_raw_points_df = quiz_df\
.groupby(['session_user_id', 'assignment_week'])['raw_score']\
.agg('sum')\
.rename('total_raw_points_week')\
.reset_index()
df_out = total_submission_df.merge(total_raw_points_df)
df_out['raw_points_per_submission'] = df_out['total_raw_points_week'] / df_out['total_user_submissions_week']
df_out.drop('total_user_submissions_week', axis = 1, inplace = True)
return df_out
def pre_dl_submissions(quiz_df, submission_bins = [-np.inf, 0, MILLISECONDS_IN_DAY, 3*MILLISECONDS_IN_DAY, 7*MILLISECONDS_IN_DAY, np.inf], bin_labels = ['pre_dl_submission_count_late', 'pre_dl_submission_count_0_1_day', 'pre_dl_submission_count_1_3_day', 'pre_dl_submission_count_3_7_day', 'pre_dl_submission_count_greater_7_day']):
"""
Create dataframe with counts of submissions within bins defined by submission_bins by user and week.
:param quiz_df:
:param submission_bins:
:param bin_labels:
:return:
"""
# create categorical based on cut points of (1 week; 3 days; > 1 day; < 1 day)
quiz_submissions_binned = pd.cut(quiz_df['pre_dl_submission_time'], bins = submission_bins, labels = bin_labels)
temp = pd.concat([quiz_df[['session_user_id', 'assignment_week']], pd.get_dummies(quiz_submissions_binned)], axis = 1).groupby(['session_user_id', 'assignment_week']).agg('sum').reset_index()
return temp
def gen_quiz_features(quiz_df, quiz_meta_df, course_start, course_end, quiz_types = ('video', 'quiz', 'homework'), dropout_fp = "/output/user_dropout_weeks.csv"):
"""
Generates derived features for quiz_df.
:param quiz_df: raw pd.DataFrame of submission-level quiz data as pd.DataFrame; this is also used to append any new columns needed for deriving complex features.
:param quiz_meta_df: pd.DataFrame of quiz-level metadata
:param course_start:
:param course_end:
:quiz_types: list of quiz types to consider; other quiz types are excluded (quiz types are video, quiz, homework, exam, survey; see documentation here for more info on quiz types: https://wiki.illinois.edu/wiki/display/coursera/quiz_metadata
:return: df_out, user-week level pd.DataFrame of quiz data with derived features (one entry per user per week).
"""
# add columns with submission and assignment week using timestamp and course start/end dates
# note that pre-multiplying by 1000 is necessary because timestamp fomat for these submissions is different from clickstream timestamp format
quiz_df['submission_week'] = (quiz_df['submission_time']*1000).apply(timestamp_week, args = (course_start, course_end))
quiz_df['assignment_week'] = (quiz_df['soft_close_time']*1000).apply(timestamp_week, args = | |
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"snatch", "nonsky"},
"volatileStatus": 'Substitute',
"secondary": False,
"target": "self",
"type": "Normal"
},
"suckerpunch": {
"accuracy": 100,
"basePower": 80,
"category": "Physical",
"pp": 5,
"priority": 1,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Dark"
},
"sunnyday": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 5,
"priority": 0,
"flags": {},
"weather": 'sunnyday',
"secondary": False,
"target": "all",
"type": "Fire"
},
"superfang": {
"accuracy": 90,
"basePower": 0,
"category": "Physical",
"pp": 10,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"superpower": {
"accuracy": 100,
"basePower": 120,
"category": "Physical",
"pp": 5,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"self": {
"boosts": {
"atk": -1,
"def": -1
}
},
"secondary": False,
"target": "normal",
"type": "Fighting"
},
"supersonic": {
"accuracy": 55,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror", "sound", "authentic"},
"volatileStatus": 'confusion',
"secondary": False,
"target": "normal",
"type": "Normal"
},
"surf": {
"accuracy": 100,
"basePower": 90,
"category": "Special",
"pp": 15,
"priority": 0,
"flags": {"protect", "mirror", "nonsky"},
"secondary": False,
"target": "allAdjacent",
"type": "Water"
},
"swagger": {
"accuracy": 90,
"basePower": 0,
"category": "Status",
"pp": 15,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"volatileStatus": 'confusion',
"boosts": {
"atk": 2
},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"swallow": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"snatch", "heal"}, "secondary": False,
"target": "self",
"type": "Normal"
},
"sweetkiss": {
"accuracy": 75,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"volatileStatus": 'confusion',
"secondary": False,
"target": "normal",
"type": "Fairy"
},
"sweetscent": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"boosts": {
"evasion": -2
},
"secondary": False,
"target": "allAdjacentFoes",
"type": "Normal"
},
"swift": {
"accuracy": True,
"basePower": 60,
"category": "Special",
"pp": 20,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": False,
"target": "allAdjacentFoes",
"type": "Normal"
},
"switcheroo": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"}, "secondary": False,
"target": "normal",
"type": "Dark"
},
"swordsdance": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"snatch"},
"boosts": {
"atk": 2
},
"secondary": False,
"target": "self",
"type": "Normal"
},
"synchronoise": {
"accuracy": 100,
"basePower": 120,
"category": "Special",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": False,
"target": "allAdjacent",
"type": "Psychic"
},
"synthesis": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 5,
"priority": 0,
"flags": {"snatch", "heal"},
"secondary": False,
"target": "self",
"type": "Grass"
},
"tackle": {
"accuracy": 100,
"basePower": 50,
"category": "Physical",
"pp": 35,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"tailglow": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"snatch"},
"boosts": {
"spa": 3
},
"secondary": False,
"target": "self",
"type": "Bug"
},
"tailslap": {
"accuracy": 85,
"basePower": 25,
"category": "Physical",
"pp": 10,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"multihit": [2, 5],
"secondary": False,
"target": "normal",
"type": "Normal"
},
"tailwhip": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 30,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"boosts": {
"def": -1
},
"secondary": False,
"target": "allAdjacentFoes",
"type": "Normal"
},
"tailwind": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 15,
"priority": 0,
"flags": {"snatch"},
"sidecondition": 'tailwind',
"secondary": False,
"target": "allySide",
"type": "Flying"
},
"takedown": {
"accuracy": 85,
"basePower": 90,
"category": "Physical",
"pp": 20,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"recoil": [1, 4],
"secondary": False,
"target": "normal",
"type": "Normal"
},
"taunt": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror", "authentic"},
"volatileStatus": 'taunt',
"secondary": False,
"target": "normal",
"type": "Dark"
},
"technoblast": {
"accuracy": 100,
"basePower": 120,
"category": "Special",
"pp": 5,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"teeterdance": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "mirror"},
"volatileStatus": 'confusion',
"secondary": False,
"target": "allAdjacent",
"type": "Normal"
},
"telekinesis": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 15,
"priority": 0,
"flags": {"protect", "reflectable", "mirror", "gravity"},
"volatileStatus": 'telekinesis',
"secondary": False,
"target": "normal",
"type": "Psychic"
},
"teleport": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {},
"secondary": False,
"target": "self",
"type": "Psychic"
},
"thief": {
"accuracy": 100,
"basePower" : 60,
"category": "Physical",
"pp": 25,
"priority": 0,
"secondary": False,
"target": "normal",
"type": "Dark"
},
"thousandarrows": {
"accuracy": 100,
"basePower": 90,
"category": "Physical",
"pp": 10,
"priority": 0,
"volatileStatus": 'smackdown',
"ignoreImmunity": {"Ground"},
"secondary": False,
"target": "allAdjacentFoes",
"type": "Ground"
},
"thousandwaves": {
"accuracy": 100,
"basePower": 90,
"category": "Physical",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror", "nonsky"},
"isUnreleased": True,
"secondary": False,
"target": "allAdjacentFoes",
"type": "Ground"
},
"thrash": {
"accuracy": 100,
"basePower": 120,
"category": "Physical",
"pp": 10,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"self": {
"volatileStatus": 'lockedmove'
},
"secondary": False,
"target": "randomNormal",
"type": "Normal"
},
"thunder": {
"accuracy": 70,
"basePower": 110,
"category": "Special",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 30,
"status": 'par'
},
"target": "normal",
"type": "Electric"
},
"thunderfang": {
"accuracy": 95,
"basePower": 65,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"bite", "contact", "protect", "mirror"},
"secondary": [
{
"chance": 10,
"status": 'par'
}, {
"chance": 10,
"volatileStatus": 'flinch'
}
],
"target": "normal",
"type": "Electric"
},
"thunderpunch": {
"accuracy": 100,
"basePower": 75,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"contact", "protect", "mirror", "punch"},
"secondary": {
"chance": 10,
"status": 'par'
},
"target": "normal",
"type": "Electric"
},
"thundershock": {
"accuracy": 100,
"basePower": 40,
"category": "Special",
"pp": 30,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 10,
"status": 'par'
},
"target": "normal",
"type": "Electric"
},
"thunderwave": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"status": 'par',
"ignoreImmunity": False,
"secondary": False,
"target": "normal",
"type": "Electric"
},
"thunderbolt": {
"accuracy": 100,
"basePower": 90,
"category": "Special",
"pp": 15,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 10,
"status": 'par'
},
"target": "normal",
"type": "Electric"
},
"tickle": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"boosts": {
"atk": -1,
"def": -1
},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"topsyturvy": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"secondary": False,
"target": "normal",
"type": "Dark"
},
"torment": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 15,
"priority": 0,
"flags": {"protect", "reflectable", "mirror", "authentic"},
"volatileStatus": 'torment',
"secondary": False,
"target": "normal",
"type": "Dark"
},
"toxic": {
"accuracy": 90,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"status": 'tox',
"secondary": False,
"target": "normal",
"type": "Poison"
},
"toxicspikes": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"reflectable", "nonsky"},
"sidecondition": 'toxicspikes',
"secondary": False,
"target": "foeSide",
"type": "Poison"
},
"transform": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"triattack": {
"accuracy": 100,
"basePower": 80,
"category": "Special",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 20,
"status": ['brn', 'frz', 'par'],
},
"target": "normal",
"type": "Normal"
},
"trick": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"}, "secondary": False,
"target": "normal",
"type": "Psychic"
},
"trickortreat": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"secondary": False,
"target": "normal",
"type": "Ghost"
},
"trickroom": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 5,
"priority": -7,
"flags": {"mirror"}, "secondary": False,
"target": "all",
"type": "Psychic"
},
"triplekick": {
"accuracy": 90,
"basePower": 10,
"category": "Physical",
"pp": 10,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"multihit": [3, 3],
"secondary": False,
"target": "normal",
"type": "Fighting"
},
"trumpcard": {
"accuracy": True,
"basePower": 0,
"category": "Special",
"pp": 5,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"twineedle": {
"accuracy": 100,
"basePower": 25,
"category": "Physical",
"pp": 20,
"priority": 0,
"flags": {"protect", "mirror"},
"multihit": [2, 2],
"secondary": {
"chance": 20,
"status": 'psn'
},
"target": "normal",
"type": "Bug"
},
"twister": {
"accuracy": 100,
"basePower": 40,
"category": "Special",
"pp": 20,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 20,
"volatileStatus": 'flinch'
},
"target": "allAdjacentFoes",
"type": "Dragon"
},
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GameServerClusterArgs', 'GameServerCluster']
@pulumi.input_type
class GameServerClusterArgs:
def __init__(__self__, *,
cluster_id: pulumi.Input[str],
connection_info: pulumi.Input['GameServerClusterConnectionInfoArgs'],
realm_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GameServerCluster resource.
:param pulumi.Input[str] cluster_id: Required. The resource name of the game server cluster
:param pulumi.Input['GameServerClusterConnectionInfoArgs'] connection_info: Game server cluster connection information. This information is used to
manage game server clusters.
Structure is documented below.
:param pulumi.Input[str] realm_id: The realm id of the game server realm.
:param pulumi.Input[str] description: Human readable description of the cluster.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels associated with this game server cluster. Each label is a
key-value pair.
:param pulumi.Input[str] location: Location of the Cluster.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "cluster_id", cluster_id)
pulumi.set(__self__, "connection_info", connection_info)
pulumi.set(__self__, "realm_id", realm_id)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Input[str]:
"""
Required. The resource name of the game server cluster
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="connectionInfo")
def connection_info(self) -> pulumi.Input['GameServerClusterConnectionInfoArgs']:
"""
Game server cluster connection information. This information is used to
manage game server clusters.
Structure is documented below.
"""
return pulumi.get(self, "connection_info")
@connection_info.setter
def connection_info(self, value: pulumi.Input['GameServerClusterConnectionInfoArgs']):
pulumi.set(self, "connection_info", value)
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> pulumi.Input[str]:
"""
The realm id of the game server realm.
"""
return pulumi.get(self, "realm_id")
@realm_id.setter
def realm_id(self, value: pulumi.Input[str]):
pulumi.set(self, "realm_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human readable description of the cluster.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The labels associated with this game server cluster. Each label is a
key-value pair.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the Cluster.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _GameServerClusterState:
def __init__(__self__, *,
cluster_id: Optional[pulumi.Input[str]] = None,
connection_info: Optional[pulumi.Input['GameServerClusterConnectionInfoArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering GameServerCluster resources.
:param pulumi.Input[str] cluster_id: Required. The resource name of the game server cluster
:param pulumi.Input['GameServerClusterConnectionInfoArgs'] connection_info: Game server cluster connection information. This information is used to
manage game server clusters.
Structure is documented below.
:param pulumi.Input[str] description: Human readable description of the cluster.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels associated with this game server cluster. Each label is a
key-value pair.
:param pulumi.Input[str] location: Location of the Cluster.
:param pulumi.Input[str] name: The resource id of the game server cluster, eg:
'projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}'. For example,
'projects/my-project/locations/{location}/realms/zanzibar/gameServerClusters/my-onprem-cluster'.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] realm_id: The realm id of the game server realm.
"""
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if connection_info is not None:
pulumi.set(__self__, "connection_info", connection_info)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if realm_id is not None:
pulumi.set(__self__, "realm_id", realm_id)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
Required. The resource name of the game server cluster
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="connectionInfo")
def connection_info(self) -> Optional[pulumi.Input['GameServerClusterConnectionInfoArgs']]:
"""
Game server cluster connection information. This information is used to
manage game server clusters.
Structure is documented below.
"""
return pulumi.get(self, "connection_info")
@connection_info.setter
def connection_info(self, value: Optional[pulumi.Input['GameServerClusterConnectionInfoArgs']]):
pulumi.set(self, "connection_info", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human readable description of the cluster.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The labels associated with this game server cluster. Each label is a
key-value pair.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the Cluster.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of the game server cluster, eg:
'projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}'. For example,
'projects/my-project/locations/{location}/realms/zanzibar/gameServerClusters/my-onprem-cluster'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> Optional[pulumi.Input[str]]:
"""
The realm id of the game server realm.
"""
return pulumi.get(self, "realm_id")
@realm_id.setter
def realm_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "realm_id", value)
class GameServerCluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
connection_info: Optional[pulumi.Input[pulumi.InputType['GameServerClusterConnectionInfoArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A game server cluster resource.
To get more information about GameServerCluster, see:
* [API documentation](https://cloud.google.com/game-servers/docs/reference/rest/v1beta/projects.locations.realms.gameServerClusters)
* How-to Guides
* [Official Documentation](https://cloud.google.com/game-servers/docs)
## Example Usage
## Import
GameServerCluster can be imported using any of these accepted formats
```sh
$ pulumi import gcp:gameservices/gameServerCluster:GameServerCluster default projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}
```
```sh
$ pulumi import gcp:gameservices/gameServerCluster:GameServerCluster default {{project}}/{{location}}/{{realm_id}}/{{cluster_id}}
```
```sh
$ pulumi import gcp:gameservices/gameServerCluster:GameServerCluster default {{location}}/{{realm_id}}/{{cluster_id}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: Required. The resource name of the game server cluster
:param pulumi.Input[pulumi.InputType['GameServerClusterConnectionInfoArgs']] connection_info: Game server cluster connection information. This information is used to
manage game server clusters.
Structure is documented below.
:param pulumi.Input[str] description: Human readable description of the cluster.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels associated with this game server cluster. Each label is a
key-value pair.
:param pulumi.Input[str] location: Location of the Cluster.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] realm_id: The realm id of the game server realm.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GameServerClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A game server cluster resource.
To get more information about GameServerCluster, see:
* [API documentation](https://cloud.google.com/game-servers/docs/reference/rest/v1beta/projects.locations.realms.gameServerClusters)
* How-to Guides
* [Official Documentation](https://cloud.google.com/game-servers/docs)
## Example Usage
## Import
GameServerCluster can be imported using any of these accepted formats
```sh
$ pulumi import gcp:gameservices/gameServerCluster:GameServerCluster default projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}
```
```sh
$ pulumi import gcp:gameservices/gameServerCluster:GameServerCluster default {{project}}/{{location}}/{{realm_id}}/{{cluster_id}}
```
```sh
$ pulumi import gcp:gameservices/gameServerCluster:GameServerCluster default {{location}}/{{realm_id}}/{{cluster_id}}
```
:param str resource_name: The name of the resource.
:param GameServerClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GameServerClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] | |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 15:33:47 2019
@author: Bogoclu
"""
import typing
import multiprocessing as mp
import warnings
import numpy as np
from scipy import stats
from .space import FullSpace
from duqo.proba import DS, MC, SUSE, ISPUD, FORM
from duqo.doe.lhs import make_doe
def _check_obj_wgt(obj_weights, num_obj):
""" Check obj_wgt argument passed to CondMom """
if obj_weights is None:
return None
try:
_ = obj_weights[0]
except (TypeError, IndexError):
obj_weights = np.ones(num_obj) * obj_weights
if len(obj_weights) != num_obj:
msg = f"Mismatch between the number of entries ({len(obj_weights)} in "
msg += f"obj_wgt and the number of stochastic objectives ({num_obj})."
raise ValueError(msg)
return np.array(obj_weights).ravel()
def _check_std_inds(use_std, num_obj):
""" Check use_std argument passed to CondMom and
convert it to a slice definition
"""
if isinstance(use_std, bool):
inds = [use_std] * num_obj
if len(inds) != num_obj:
msg = "Mismatch between the number of entries in "
msg += "use_std and the number of stochastic objectives."
raise ValueError(msg)
return np.array(use_std, dtype=bool)
def _find_integrator_cls(integrator):
"""
Find the Integrator class as defined by the string integrator
"""
integrator = integrator.upper()
if integrator == "DS":
IntCls = DS
elif integrator == "MC":
IntCls = MC
elif integrator == "ISPUD":
IntCls = ISPUD
elif integrator == "FORM":
IntCls = FORM
elif integrator == "SUSE":
IntCls = SUSE
else:
msg = f"Requested integrator {integrator} is not found."
raise ValueError(msg)
return IntCls
def _make_chain(methods: list):
"""Makes the chain given a list of method names"""
try:
first = methods[0]
except TypeError:
raise TypeError(f"methods must be a list of strings or classes, not {type(methods)}")
try:
_ = first.upper()
except AttributeError:
return methods
return [_find_integrator_cls(name.upper()) for name in methods]
def _n_para_chk(num_parallel: int = None):
""" Check the num_parallel argument as passed to CondProb """
n_procs = max(1, mp.cpu_count()) # could cpu_count ever be < 1?
if num_parallel is None or num_parallel > n_procs:
print(f"Number of parallel processes was set to {n_procs}")
return n_procs
return num_parallel
def _default_init(targ_prob: float, acc_max: float, num_inp: int,
num_para: int):
"""Decide the default integrator chain methods and arguments depending
on the problem
Parameters
----------
targ_prob : float
target failure probability
acc_max : float
target tolerance for the estimation
num_inp : int
number of stochastic inputs of the constraints
num_para : int
number of parallel processes to use
Returns
-------
integrators : list
Integrator classes, that are to be initiated
int_args : dict
Keyword arguments to pass to integrators
"""
if targ_prob * acc_max >= 1e-5:
if targ_prob * acc_max >= 1e-4:
integrators = ["MC"]
else:
integrators = ["SUSE", "MC"]
int_args = {"num_starts": 1, "batch_size": 1e5}
elif num_inp < 15:
integrators = ["SUSE", "DS"]
int_args = {"num_starts": 1}
else:
integrators = ["SUSE"]
int_args = {"num_starts": num_para}
print("Using", integrators, "as default chain.")
return integrators, int_args
def _is_worker(workers, name):
""" check if name is in workers list of classes"""
for worker in workers:
wname = read_integrator_name(worker)
if name.upper() in wname.upper():
return True
return False
def read_integrator_name(worker):
""" read the name of the integrator instance worker """
name = str(worker).split(".")[-1]
return "".join([c for c in name if c.isalnum()])
class CondMom:
"""Class to estimate conditional means
full_space : FullSpace instance
The definition of the optimization and stochastic spaces
base_doe : int or np.ndarray
set if a new doe should be calculated or the same one should
be transformed during the optimization.
if array, it should have zero mean and unit variance
but the original marginal distributions and correlation.
it should have same number of columns as stochastic variables
used in the objective. If integer, a base_doe with that number of
samples will be created
doe_size : int
The size of the doe to use. If base_doe is a numpy array, this
has no effect and doesn't have to be passed.
obj_wgt : float or iterable of floats:
If not None, these weights will be used for combining the
estimated mean and the variance/std. dev. If iterable, it
must be the same length as the number of stochastic input
variables as used for the objective function.
If None, the variances are returned separetly
use_std : bool or iterable of bools
Flag to use standard deviation (True) or the variance for the
estimation. If iterable, it must be the same length as the number
of stochastic input variables as used for the objective function.
"""
def __init__(self, full_space: FullSpace, base_doe: typing.Union[bool, np.ndarray] = True,
doe_size: int = 100, obj_wgt: typing.Optional[typing.Union[float, list, np.ndarray]] = None,
use_std: typing.Union[bool, list] = False):
self.full_space = full_space
num_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(use_std, num_obj)
self._obj_wgt = _check_obj_wgt(obj_wgt, num_obj)
self._doe_size = None
self._base_doe = None
self.doe_size = doe_size
self.base_doe = base_doe
@property
def base_doe(self):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
return self._base_doe
@base_doe.setter
def base_doe(self, new_doe):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
# Sanity checks for base_doe. Using parameters with multiple valid types
# may be an antipattern but it makes configuration easier from
# the user point of view. Tolerate this for a better user experience.
if isinstance(new_doe, np.ndarray):
if self._is_valid_base(new_doe): # raises errors
self._base_doe = new_doe.copy() # Make our copy.
return
try:
make_base_doe = bool(new_doe)
except ValueError:
return
if make_base_doe:
# Prepare doe with zero mean and unit variance
doe = self.full_space.inp_space.sto_obj_base_doe(self.doe_size)
self._base_doe = doe
return
# if not bool(new_doe); remake new doe so set base_doe to None
self._base_doe = None
return
def _is_valid_base(self, new_doe):
# Assume numpy array
n_sto_obj_inps = len(self.full_space.inp_space.inds["sto_obj"])
if new_doe.shape[1] != n_sto_obj_inps:
msg = "base_doe must be one of None, bool or a 2d array "
msg += f"with shape (num_samples, num_stochastic_objective_variables={n_sto_obj_inps})."
raise TypeError(msg)
if max(abs(new_doe.mean(0).max()), abs(1 - new_doe.std(0).max())) > 0.5:
msg = "base_doe must have zero mean and unit variance."
raise ValueError(msg)
return True
@property
def doe_size(self):
"""Size of the base doe to use for the moment estimation"""
return self._doe_size
@doe_size.setter
def doe_size(self, new_size):
"""Size of the base doe to use for the moment estimation"""
self._doe_size = new_size
if self.base_doe is not None:
self.base_doe = new_size
@property
def obj_wgt(self):
"""Weights for the linear combination of cond. moments"""
return self._obj_wgt
@obj_wgt.setter
def obj_wgt(self, new_obj_wgt):
"""Weights for the linear combination of cond. moments"""
n_obj = len(self.full_space.obj_inds["sto"])
self._obj_wgt = _check_obj_wgt(new_obj_wgt, n_obj)
@property
def use_std(self):
"""Indexes to use std. dev. instead of variance"""
return self._use_std
@use_std.setter
def use_std(self, new_std):
"""Indexes to use std. dev. instead of variance"""
n_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(new_std, n_obj)
def gen_doe(self, x_opt):
"""Get DoE for the Moment estimation for x_opt"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
if self.base_doe is None:
return self.full_space.inp_space.sto_obj_doe(x_opt, self._doe_size)
mean, std = self.full_space.inp_space.opt_moms(x_opt)
names = self.full_space.inp_space.mulvar.names
names = [names[i] for i in self.full_space.inp_space.mv_inds("sto_obj")]
# Translating is not sufficient for lognormal and truncated normal
inds = [i for i, x in enumerate(names) if "log" in x or "trunc" in x]
if not inds:
return self.base_doe * std + mean
# Handle Lognormal
binds = np.ones(self.base_doe.shape[1], dtype=bool)
binds[inds] = False
base_doe = self.base_doe.copy()
base_doe[:, binds] = base_doe[:, binds] * std[binds] + mean[binds]
mean = mean[inds]
std = std[inds]
cur_mv = self.full_space.inp_space.opt_mulvar(x_opt, domain="sto_obj")
for ind in inds:
base_doe[:, ind] = cur_mv.dists[ind].marg.ppf(base_doe[:, ind])
return base_doe
def est_mom(self, x_opt):
""" Estimate conditional moments for a single optimization point x_opt
Conditional moments are E[Y | x_opt] and Var[Y | x_opt]
Parameters
----------
x_opt : numpy.ndarray
the coordinates of the optimization variables to compute
the moments
Returns
-------
mus : numpy.ndarray
Estimated means, or if obj_wgt was not None,
the combined mu + obj_wgt * sigma
sigmas : numpy.ndarray
Estimated variances or std. dev. depending on the settings.
only returned if obj_wgt is None.
"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
doe = self.gen_doe(x_opt)
res = self.full_space.sto_obj(doe, x_opt)
mus = np.mean(res, axis=0)
sigmas = np.zeros(mus.shape)
std_inds = | |
<reponame>spiderkiller13/elevator_gateway<gh_stars>1-10
#!/usr/bin/python
import os
import sys
import time
import json
import signal
from cmd_struct import cmdStruct
from global_var.global_logger import logger
from elevator_cmd import ElevatorCmd
from global_var.global_param import table, IS_SIMULATION, IS_USING_MQTT, AMR_MQTT_NAME, IS_USING_HTTP, is_using_rss, IS_USING_XBEE
#---- weixin alarm to cell phone -----#
if is_using_rss:
from weixin_alarm import alarm
#---- XBEE globaly import -----#
if IS_USING_XBEE:
from global_var.global_xbee import xbee_obj
# os.system("pppd /dev/xbee 9600 lock nodetach noauth crtscts mtu 576 persist maxfail 0 holdoff 1 172.16.58.3:192.168.3.11")
#----- MQTT globaly import -----#
if IS_USING_MQTT:
from global_var.global_mqtt import mqtt_obj, CLIENT_NAME
#----- HTTP globaly import -----#
if IS_USING_HTTP:
import cherrypy
from cherrypy.process.plugins import SimplePlugin
class TaskManager():
'''
This class should pull out to be a .py file, if there is any need of centeral control mission.
'''
def __init__(self):
self.req_list = []
self.is_sudo_release = False
def addTask(self, cmd):
self.req_list.append(cmd)
#self.printReq()
def delTask(self, idx):
del self.req_list[idx]
#self.printReq()
def printReq(self):
'''
Lazy fucntion, print out req_list
'''
for i in self.req_list:
logger.info("type: " +str(i.type)+" robot_id: "+ str(i.robot_id)+" tid: " + str(i.tid) + " current_floor: " + str(i.current_floor) +" target_floor: " + str(i.target_floor))
# Task arrangement
EC = ElevatorCmd()
TM = TaskManager()
class Elevator_server(object):
'''
Check Cmd is valid or not, if not Reject it. Put Cmd into TM to let elevertor_cmd.py execute it.
'''
#####################
### Utility cmd ###
#####################
def index(self):
msg = 'EV_board server start!'
logging.info(msg)
return msg
def open(self,robot_id=1, tid=0):
'''
Open elevator door (This function will keep door open until DOOR_OPEN_LIMIT_TIME is reached.)
'''
#TOOD check
cmd = cmdStruct('open', robot_id, tid, 0, 0)
TM.addTask(cmd)
return str(tid)
def release_button(self,robot_id=0, tid=0):
#TOOD check
cmd = cmdStruct('release_button', robot_id, tid, 0, 0)
TM.addTask(cmd)
return str(tid)
def close(self,robot_id=0, tid=0):
#TOOD check
cmd = cmdStruct('close', robot_id, tid, 0, 0)
TM.addTask(cmd)
return str(tid)
########################
### Often Used cmd ###
########################
def call(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
'''
AMR call elevator to carry AMR from current_floor to target_floor.
'''
if current_floor in table and target_floor in table:
# Check if same cmd is working
for i in TM.req_list:
if i.tid == tid: # Already have same tid in req_list
logger.warning("[call] REJECT call. Already have same tid in req_list. tid: " + str(tid))
return str(tid) # ignore this cmd
cmd = cmdStruct('call', robot_id, tid, current_floor, target_floor)
logger.info("[call] Accpeted call. "+ str(current_floor) +"F --> "+ str(target_floor) + "F, tid: " + str(tid) + ", robot_id: " + str(robot_id))
TM.addTask(cmd)# There is no identical cmd, add it!
else:
return "Invalid floor request."
return str(tid)
def precall(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
'''
Push floor button only
'''
cmd = cmdStruct('precall', robot_id, tid, current_floor, target_floor)
TM.addTask(cmd)
return str(tid)
def entering_done(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
try:
if TM.req_list[0].type != 'call':
logger.error("[entering_done] REJECT entering_done. No matched cmd.")
elif TM.req_list[0].robot_id != robot_id:
logger.error("[entering_done] REJECT entering_done. robot_id not matched.")
TM.req_list[0].total_logger.append(("cmd_entering_done_reject", time.time()))
elif TM.req_list[0].tid != tid:
logger.error("[entering_done] REJECT entering_done. tid not matched.")
TM.req_list[0].total_logger.append(("cmd_entering_done_reject", time.time()))
else:
logger.info("[entering_done] Accpeted entering_done cmd. Match tid: "+ str(tid))
TM.req_list[0].total_logger.append(("cmd_entering_done_accpet", time.time()))
TM.req_list[0].is_entering_done = True
except:
logger.error("[entering_done] REJECT entering_done. No matched cmd.")
return str(tid)
def release(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
'''
release current mission
'''
try:
if TM.req_list[0].type != 'call':
logger.error("[release] REJECT release. No matched cmd.")
elif TM.req_list[0].robot_id != robot_id:
logger.error("[release] REJECT release. robot_id not matched.")
TM.req_list[0].total_logger.append(("cmd_release_reject", time.time()))
elif TM.req_list[0].tid != tid:
logger.error("[release] REJECT release. tid not matched.")
TM.req_list[0].total_logger.append(("cmd_release_reject", time.time()))
else:
logger.info("[release] Accpeted release cmd. Match tid: "+ str(tid))
TM.req_list[0].total_logger.append(("cmd_release_accpet", time.time()))
TM.req_list[0].is_release = True
except:
logger.error("[release] REJECT release. No matched cmd.")
return str(tid)
##################
### Test Cmd ###
##################
def EVledWrite(self,key=0, d=0):
'''
Expose for elevator testing and tuning, DO NOT use this in normal process.
'''
if key in table:
if d == "high" or d == "low":
return str(EC.pb.EVledWrite(str(key), str(d)))
else:
return "invalid digital assign"
else:
return "REJECT! Can't find key. Do you type it right?"
def EVledRead(self,key=0):
'''
Expose for elevator testing and tuning, DO NOT use this in normal process.
'''
if key in table:
return str(EC.pb.EVledRead(str(key)))
else:
return "REJECT! Can't find key. Do you type it right?"
def reboot(self, robot_id=0, tid=0, pw=0):
'''
Reboot Raspberry-Pi and L432KC. L432KC power will be cut before Raspberry-Pi, to make sure "deep reboot" of MCU.
'''
if pw=='elevator_server':
cmd = cmdStruct('reboot', robot_id, tid,0, 0)
TM.addTask(cmd)
return str(tid)
else:
return "Wrong password, permission denied."
def sudo_release(self):
'''
for Test
'''
TM.is_sudo_release = True
return "OK"
def weixin_test(self):
'''
for rss Test
'''
if is_using_rss:
alarm.sent("[elevator_server] WeiXin Test !!! ")
else:
logger.info("weixin alarm is not allow, please go to parma.yaml and switch is_using_rss to True.")
return "OK"
######################
### Cmd CallBack ###
######################
Ele_Ser = Elevator_server()
if IS_USING_XBEE:
def xbee_cmd_CB(msg):
# logger.info("Get msg from main : " + msg)
'''
This is a cmd_CB from MQTT subscribe topic
'''
logger.info("[XBEE] xbee_cmd_CB : " + str(msg))
# Parse payload and Add task to req_list[]
cmd_dict = json.loads(msg)
# ------ Utility cmd ------#
if cmd_dict['cmd'] == 'open':
Ele_Ser.open(cmd_dict['robot_id'], cmd_dict['tid'])
elif cmd_dict['cmd'] == 'close':
Ele_Ser.close(cmd_dict['robot_id'], cmd_dict['tid'])
elif cmd_dict['cmd'] == 'release_button':
Ele_Ser.release_button(cmd_dict['robot_id'], cmd_dict['tid'])
# ------ Often Used cmd ------#
elif cmd_dict['cmd'] == 'call':
Ele_Ser.call(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['current_floor'],cmd_dict['target_floor'])
elif cmd_dict['cmd'] == 'precall':
Ele_Ser.precall(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['current_floor'],cmd_dict['target_floor'])
elif cmd_dict['cmd'] == 'reboot':
Ele_Ser.reboot(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['pw'])
elif cmd_dict['cmd'] == 'entering_done':
Ele_Ser.entering_done(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['current_floor'],cmd_dict['target_floor'])
elif cmd_dict['cmd'] == 'release':
Ele_Ser.release(cmd_dict['robot_id'], cmd_dict['tid'])
#---- Test cmd -----# TODO TODO
'''
elif cmd_dict['cmd'] == 'EVledWrite':
ans = Ele_Ser.EVledWrite(cmd_dict['key'], cmd_dict['d'])
topic_list = message.topic.split("/")
mqtt_obj.publish(topic_list[0]+"/"+topic_list[1]+"/reply", ans, qos = 1, retain = False)
elif cmd_dict['cmd'] == 'EVledRead':
ans = Ele_Ser.EVledRead(cmd_dict['key'])
topic_list = message.topic.split("/")
mqtt_obj.publish(topic_list[0]+"/"+topic_list[1]+"/reply", ans, qos = 1, retain = False)
elif cmd_dict['cmd'] == 'sudo_release':
Ele_Ser.sudo_release()
elif cmd_dict['cmd'] == 'weixin_test':
Ele_Ser.weixin_test()
'''
else:
logger.error("[MQTT_cmd_CM] unknow cmd ")
if IS_USING_MQTT:
def mqtt_cmd_CB(client, userdata, message):
'''
This is a cmd_CB from MQTT subscribe topic
'''
logger.info("[MQTT] cmd_CB : " + str(message.payload) + "(Q" + str(message.qos) + ", R" + str(message.retain) + ")")
# Parse payload and Add task to req_list[]
try:
cmd_dict = json.loads(message.payload.decode())
except:
logger.error("[MQTT_cmd_CM] invalid cmd formet ")
return
# ------ Utility cmd ------#
if cmd_dict['cmd'] == 'open':
Ele_Ser.open(cmd_dict['robot_id'], cmd_dict['tid'])
elif cmd_dict['cmd'] == 'close':
Ele_Ser.close(cmd_dict['robot_id'], cmd_dict['tid'])
elif cmd_dict['cmd'] == 'release_button':
Ele_Ser.release_button(cmd_dict['robot_id'], cmd_dict['tid'])
# ------ Often Used cmd ------#
elif cmd_dict['cmd'] == 'call':
Ele_Ser.call(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['current_floor'],cmd_dict['target_floor'])
elif cmd_dict['cmd'] == 'precall':
Ele_Ser.precall(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['current_floor'],cmd_dict['target_floor'])
elif cmd_dict['cmd'] == 'reboot':
Ele_Ser.reboot(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['pw'])
elif cmd_dict['cmd'] == 'entering_done':
Ele_Ser.entering_done(cmd_dict['robot_id'], cmd_dict['tid'], cmd_dict['current_floor'],cmd_dict['target_floor'])
elif cmd_dict['cmd'] == 'release':
Ele_Ser.release(cmd_dict['robot_id'], cmd_dict['tid'])
#---- Test cmd -----#
elif cmd_dict['cmd'] == 'EVledWrite':
ans = Ele_Ser.EVledWrite(cmd_dict['key'], cmd_dict['d'])
topic_list = message.topic.split("/")
mqtt_obj.publish(topic_list[0]+"/"+topic_list[1]+"/reply", ans, qos = 1, retain = False)
elif cmd_dict['cmd'] == 'EVledRead':
ans = Ele_Ser.EVledRead(cmd_dict['key'])
topic_list = message.topic.split("/")
mqtt_obj.publish(topic_list[0]+"/"+topic_list[1]+"/reply", ans, qos = 1, retain = False)
elif cmd_dict['cmd'] == 'sudo_release':
Ele_Ser.sudo_release()
elif cmd_dict['cmd'] == 'weixin_test':
Ele_Ser.weixin_test()
else:
logger.error("[MQTT_cmd_CM] good cmd forment, but don't know this cmd.")
if IS_USING_HTTP:
class cherrypy_cmd_CB(object):
@cherrypy.expose
def index(self):
msg = "[Cherrypy] EV_board server start!"
logging.info(msg)
return msg
@cherrypy.expose
def open(self,robot_id=1, tid=0):
return Ele_Ser.open(robot_id, tid)
@cherrypy.expose
def release_button(self,robot_id=0, tid=0):
return Ele_Ser.release_button(robot_id, tid)
@cherrypy.expose
def close(self,robot_id=0, tid=0):
return Ele_Ser.close(robot_id, tid)
########################
### Often Used cmd ###
########################
@cherrypy.expose
def call(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
return Ele_Ser.call(robot_id, tid, current_floor, target_floor)
@cherrypy.expose
def precall(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
return Ele_Ser.precall(robot_id, tid, current_floor, target_floor)
@cherrypy.expose
def entering_done(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
return Ele_Ser.entering_done(robot_id, tid, current_floor, target_floor)
@cherrypy.expose
def release(self, robot_id=0, tid=0, current_floor=0, target_floor=0):
return Ele_Ser.release(robot_id, tid, current_floor, target_floor)
##################
### Test Cmd ###
##################
@cherrypy.expose
def EVledWrite(self,key=0, d=0):
return Ele_Ser.EVledWrite(key,d)
@cherrypy.expose
def EVledRead(self,key=0):
return Ele_Ser.EVledRead(key)
@cherrypy.expose
def reboot(self, robot_id=0, tid=0, pw=0):
return Ele_Ser.reboot(robot_id, tid, pw)
@cherrypy.expose
def sudo_release(self):
return Ele_Ser.sudo_release()
@cherrypy.expose
def weixin_test(self):
return Ele_Ser.weixin_test()
def main():
global EC, TM
if TM.is_sudo_release:
logger.error("[cherrpy_expose] SUDO RELEASE ACCPECT!!")
EC.release_button()
TM.is_sudo_release = False
TM.req_list = []
# Routine check DOOR OPEN TIMEOUT
if EC.door_release_checker(): # TIMEOUT
EC.release_button()
if len(TM.req_list) != 0 and TM.req_list[0].type == 'call':
TM.req_list[0].is_timeout_release = True
if len(TM.req_list) == 0: # Nothing to do
pass
else: # Keep doing cmd
if TM.req_list[0].type == 'open':
EC.open()
TM.delTask(0)
elif TM.req_list[0].type == 'close':
EC.close()
TM.delTask(0)
| |
maxz - 0.00847548246383667),
(minx + 0.014916181564331055, maxy - 0.0095299631357193, maxz - 0.00961071252822876),
(minx + 0.013780921697616577, maxy - 0.0095299631357193, maxz - 0.012351423501968384),
(minx + 0.014916181564331055, maxy - 0.0095299631357193, maxz - 0.015092134475708008),
(minx + 0.01765689253807068, maxy - 0.0095299631357193, maxz - 0.016227364540100098),
(minx + 0.02039763331413269, maxy - 0.0095299631357193, maxz - 0.015092134475708008),
(minx + 0.021532833576202393, maxy - 0.0095299631357193, maxz - 0.012351423501968384),
(minx + 0.02039763331413269, maxy - 0.0095299631357193, maxz - 0.00961071252822876),
(minx + 0.01765689253807068, maxy - 0.0095299631357193, maxz - 0.009734481573104858),
(minx + 0.0158064067363739, maxy - 0.0095299631357193, maxz - 0.010500967502593994),
(minx + 0.015039980411529541, maxy - 0.0095299631357193, maxz - 0.012351423501968384),
(minx + 0.0158064067363739, maxy - 0.0095299631357193, maxz - 0.014201879501342773),
(minx + 0.01765689253807068, maxy - 0.0095299631357193, maxz - 0.014968395233154297),
(minx + 0.01950731873512268, maxy - 0.0095299631357193, maxz - 0.014201879501342773),
(minx + 0.020273834466934204, maxy - 0.0095299631357193, maxz - 0.012351423501968384),
(minx + 0.01950731873512268, maxy - 0.0095299631357193, maxz - 0.010500967502593994),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, maxz - 0.009734481573104858),
(minx + 0.0158064067363739, maxy - 0.009312078356742859, maxz - 0.010500967502593994),
(minx + 0.015039980411529541, maxy - 0.009312078356742859, maxz - 0.012351423501968384),
(minx + 0.0158064067363739, maxy - 0.009312078356742859, maxz - 0.014201879501342773),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, maxz - 0.014968395233154297),
(minx + 0.01950731873512268, maxy - 0.009312078356742859, maxz - 0.014201879501342773),
(minx + 0.020273834466934204, maxy - 0.009312078356742859, maxz - 0.012351423501968384),
(minx + 0.01950731873512268, maxy - 0.009312078356742859, maxz - 0.010500967502593994),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, maxz - 0.01099047064781189),
(minx + 0.01669454574584961, maxy - 0.009312078356742859, maxz - 0.011389046907424927),
(minx + 0.016295909881591797, maxy - 0.009312078356742859, maxz - 0.012351423501968384),
(minx + 0.01669454574584961, maxy - 0.009312078356742859, maxz - 0.013313770294189453),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, maxz - 0.013712406158447266),
(minx + 0.01861920952796936, maxy - 0.009312078356742859, maxz - 0.013313770294189453),
(minx + 0.019017815589904785, maxy - 0.009312078356742859, maxz - 0.012351423501968384),
(minx + 0.01861920952796936, maxy - 0.009312078356742859, maxz - 0.011389046907424927),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, maxz - 0.011496275663375854),
(minx + 0.01705223321914673, maxy - 0.009312078356742859, maxz - 0.011746734380722046),
(minx + 0.01680171489715576, maxy - 0.009312078356742859, maxz - 0.012351423501968384),
(minx + 0.01705223321914673, maxy - 0.009312078356742859, maxz - 0.012956112623214722),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, maxz - 0.013206571340560913),
(minx + 0.018261581659317017, maxy - 0.009312078356742859, maxz - 0.012956112623214722),
(minx + 0.018512040376663208, maxy - 0.009312078356742859, maxz - 0.012351423501968384),
(minx + 0.018261581659317017, maxy - 0.009312078356742859, maxz - 0.011746734380722046),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, maxz - 0.009734481573104858),
(minx + 0.0158064067363739, maxy - 0.009564712643623352, maxz - 0.010500967502593994),
(minx + 0.015039980411529541, maxy - 0.009564712643623352, maxz - 0.012351423501968384),
(minx + 0.0158064067363739, maxy - 0.009564712643623352, maxz - 0.014201879501342773),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, maxz - 0.014968395233154297),
(minx + 0.01950731873512268, maxy - 0.009564712643623352, maxz - 0.014201879501342773),
(minx + 0.020273834466934204, maxy - 0.009564712643623352, maxz - 0.012351423501968384),
(minx + 0.01950731873512268, maxy - 0.009564712643623352, maxz - 0.010500967502593994),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, maxz - 0.01099047064781189),
(minx + 0.01669454574584961, maxy - 0.009564712643623352, maxz - 0.011389046907424927),
(minx + 0.016295909881591797, maxy - 0.009564712643623352, maxz - 0.012351423501968384),
(minx + 0.01669454574584961, maxy - 0.009564712643623352, maxz - 0.013313770294189453),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, maxz - 0.013712406158447266),
(minx + 0.01861920952796936, maxy - 0.009564712643623352, maxz - 0.013313770294189453),
(minx + 0.019017815589904785, maxy - 0.009564712643623352, maxz - 0.012351423501968384),
(minx + 0.01861920952796936, maxy - 0.009564712643623352, maxz - 0.011389046907424927),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, maxz - 0.011496275663375854),
(minx + 0.01705223321914673, maxy - 0.009564712643623352, maxz - 0.011746734380722046),
(minx + 0.01680171489715576, maxy - 0.009564712643623352, maxz - 0.012351423501968384),
(minx + 0.01705223321914673, maxy - 0.009564712643623352, maxz - 0.012956112623214722),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, maxz - 0.013206571340560913),
(minx + 0.018261581659317017, maxy - 0.009564712643623352, maxz - 0.012956112623214722),
(minx + 0.018512040376663208, maxy - 0.009564712643623352, maxz - 0.012351423501968384),
(minx + 0.018261581659317017, maxy - 0.009564712643623352, maxz - 0.011746734380722046),
(minx + 0.01765689253807068, maxy - 0.008991599082946777, minz + 0.017180711030960083),
(minx + 0.014916181564331055, maxy - 0.008991599082946777, minz + 0.016045480966567993),
(minx + 0.013780921697616577, maxy - 0.008991606533527374, minz + 0.01330476999282837),
(minx + 0.014916181564331055, maxy - 0.008991606533527374, minz + 0.010564059019088745),
(minx + 0.01765689253807068, maxy - 0.008991606533527374, minz + 0.009428799152374268),
(minx + 0.02039763331413269, maxy - 0.008991606533527374, minz + 0.010564059019088745),
(minx + 0.021532833576202393, maxy - 0.008991606533527374, minz + 0.01330476999282837),
(minx + 0.02039763331413269, maxy - 0.008991599082946777, minz + 0.016045480966567993),
(minx + 0.01765689253807068, maxy - 0.0095299631357193, minz + 0.017180711030960083),
(minx + 0.014916181564331055, maxy - 0.0095299631357193, minz + 0.016045480966567993),
(minx + 0.013780921697616577, maxy - 0.0095299631357193, minz + 0.01330476999282837),
(minx + 0.014916181564331055, maxy - 0.0095299631357193, minz + 0.010564059019088745),
(minx + 0.01765689253807068, maxy - 0.0095299631357193, minz + 0.009428799152374268),
(minx + 0.02039763331413269, maxy - 0.0095299631357193, minz + 0.010564059019088745),
(minx + 0.021532833576202393, maxy - 0.0095299631357193, minz + 0.01330476999282837),
(minx + 0.02039763331413269, maxy - 0.0095299631357193, minz + 0.016045480966567993),
(minx + 0.01765689253807068, maxy - 0.0095299631357193, minz + 0.015921711921691895),
(minx + 0.0158064067363739, maxy - 0.0095299631357193, minz + 0.015155225992202759),
(minx + 0.015039980411529541, maxy - 0.0095299631357193, minz + 0.01330476999282837),
(minx + 0.0158064067363739, maxy - 0.0095299631357193, minz + 0.01145431399345398),
(minx + 0.01765689253807068, maxy - 0.0095299631357193, minz + 0.010687828063964844),
(minx + 0.01950731873512268, maxy - 0.0095299631357193, minz + 0.01145431399345398),
(minx + 0.020273834466934204, maxy - 0.0095299631357193, minz + 0.01330476999282837),
(minx + 0.01950731873512268, maxy - 0.0095299631357193, minz + 0.015155225992202759),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, minz + 0.015921711921691895),
(minx + 0.0158064067363739, maxy - 0.009312078356742859, minz + 0.015155225992202759),
(minx + 0.015039980411529541, maxy - 0.009312078356742859, minz + 0.01330476999282837),
(minx + 0.0158064067363739, maxy - 0.009312078356742859, minz + 0.01145431399345398),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, minz + 0.010687828063964844),
(minx + 0.01950731873512268, maxy - 0.009312078356742859, minz + 0.01145431399345398),
(minx + 0.020273834466934204, maxy - 0.009312078356742859, minz + 0.01330476999282837),
(minx + 0.01950731873512268, maxy - 0.009312078356742859, minz + 0.015155225992202759),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, minz + 0.014665752649307251),
(minx + 0.01669454574584961, maxy - 0.009312078356742859, minz + 0.014267116785049438),
(minx + 0.016295909881591797, maxy - 0.009312078356742859, minz + 0.01330476999282837),
(minx + 0.01669454574584961, maxy - 0.009312078356742859, minz + 0.012342393398284912),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, minz + 0.011943817138671875),
(minx + 0.01861920952796936, maxy - 0.009312078356742859, minz + 0.012342393398284912),
(minx + 0.019017815589904785, maxy - 0.009312078356742859, minz + 0.01330476999282837),
(minx + 0.01861920952796936, maxy - 0.009312078356742859, minz + 0.014267116785049438),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, minz + 0.014159917831420898),
(minx + 0.01705223321914673, maxy - 0.009312078356742859, minz + 0.01390942931175232),
(minx + 0.01680171489715576, maxy - 0.009312078356742859, minz + 0.01330476999282837),
(minx + 0.01705223321914673, maxy - 0.009312078356742859, minz + 0.012700080871582031),
(minx + 0.01765689253807068, maxy - 0.009312078356742859, minz + 0.012449592351913452),
(minx + 0.018261581659317017, maxy - 0.009312078356742859, minz + 0.012700080871582031),
(minx + 0.018512040376663208, maxy - 0.009312078356742859, minz + 0.01330476999282837),
(minx + 0.018261581659317017, maxy - 0.009312078356742859, minz + 0.01390942931175232),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, minz + 0.015921711921691895),
(minx + 0.0158064067363739, maxy - 0.009564712643623352, minz + 0.015155225992202759),
(minx + 0.015039980411529541, maxy - 0.009564712643623352, minz + 0.01330476999282837),
(minx + 0.0158064067363739, maxy - 0.009564712643623352, minz + 0.01145431399345398),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, minz + 0.010687828063964844),
(minx + 0.01950731873512268, maxy - 0.009564712643623352, minz + 0.01145431399345398),
(minx + 0.020273834466934204, maxy - 0.009564712643623352, minz + 0.01330476999282837),
(minx + 0.01950731873512268, maxy - 0.009564712643623352, minz + 0.015155225992202759),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, minz + 0.014665752649307251),
(minx + 0.01669454574584961, maxy - 0.009564712643623352, minz + 0.014267116785049438),
(minx + 0.016295909881591797, maxy - 0.009564712643623352, minz + 0.01330476999282837),
(minx + 0.01669454574584961, maxy - 0.009564712643623352, minz + 0.012342393398284912),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, minz + 0.011943817138671875),
(minx + 0.01861920952796936, maxy - 0.009564712643623352, minz + 0.012342393398284912),
(minx + 0.019017815589904785, maxy - 0.009564712643623352, minz + 0.01330476999282837),
(minx + 0.01861920952796936, maxy - 0.009564712643623352, minz + 0.014267116785049438),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, minz + 0.014159917831420898),
(minx + 0.01705223321914673, maxy - 0.009564712643623352, minz + 0.01390942931175232),
(minx + 0.01680171489715576, maxy - 0.009564712643623352, minz + 0.01330476999282837),
(minx + 0.01705223321914673, maxy - 0.009564712643623352, minz + 0.012700080871582031),
(minx + 0.01765689253807068, maxy - 0.009564712643623352, minz + 0.012449592351913452),
(minx + 0.018261581659317017, maxy - 0.009564712643623352, minz + 0.012700080871582031),
(minx + 0.018512040376663208, maxy - 0.009564712643623352, minz + 0.01330476999282837),
(minx + 0.018261581659317017, maxy - 0.009564712643623352, minz + 0.01390942931175232)]
# Faces
myfaces = [(12, 0, 1, 13), (13, 1, 2, 14), (14, 2, 3, 15), (15, 3, 4, 16), (17, 6, 7, 18),
(18, 7, 8, 19), (19, 8, 9, 20), (20, 9, 10, 21), (21, 10, 11, 22), (22, 11, 0, 12),
(1, 0, 23, 24), (2, 1, 24, 25), (3, 2, 25, 26), (4, 3, 26, 27), (5, 4, | |
"""
%s
%s
"""
import os, sys, operator, math
# copied into this file by preprocess.py:
"""
Note:
This module stems from the days when there were three (almost) competing
Numerical Python implementations around and people wanted to be able
to switch between these implementations in their Python programs.
Nowadays, numpy is the dominating module, and the use of _numpyload and
numpytools is no longer particularly fruitful. For backward compatibility
of scitools, the two modules still exist.
Unified array computing interface
=================================
Numeric, numarray, and numpy can be viewed as three different
implementations of Numerical Python functionality. The present module
enables writing scripts that are independent of the particular choice
of Numeric, numarray, or numpy. That is, the idea is that any of these
modules can be replaced by one of the alternatives, and the script
should still work. This requires the script to only use the set of
instructions that are common to Numeric, numarray, and numpy.
One reason for wanting the flexibility is that the different
implementations may exhibit different computational efficiency in
different applications. It also makes it trivial to adopt new versions
of Numerical Python in old scripts.
Basic Usage
-----------
To achieve a script that makes transparent use of Numeric, numarray, and
numpy, one needs to do one of the following imports::
from scitools.numpytools import *
# or
import scitools.numpytools as N
Then one should never explicitly import Numeric, numarray, or numpy,
and explicitly use functions in these modules as this may cause
different array types to be mixed in the same application. Only call
the functions that were imported by the star or prefix functions by
the N symbol.
What Gets Imported?
-------------------
All symbols from either Numeric, numarray, or numpy are imported
into the global namespace of this numpytools module::
from Numeric import *
#or
from numarray import *
#or
from numpy import *
Also the modules for random arrays, linear algebra, Matlab functions,
and FFT are imported. One problem with switching between Numeric,
numarray, and numpy is the additional modules for random arrays, etc.,
have different names in the three packages. For example::
Numeric has LinearAlgebra
numarray has numarray.linear_algebra.LinearAlgebra2
numpy has numpy.linalg
The Numeric names are always available in addition to the native names.
For example, an import numpy.linalg is associated with a::
LinearAlgebra = numpy.linalg
Note that the MA module is not imported since it redefines
the repr function (array([1,2]) becomes [1,2] as for a list) if
the Numeric is used. The user must always explicitly import this package
if Numeric is used as basic array module.
Note that the numpytools module also makes some extensions of Numerical
Python available, see the section "Functionality of this module that
extends Numerical Python" (below).
What to use: Numeric, numarray, or numpy?
-----------------------------------------
The present module defines a global variable basic_NumPy holding
either "Numeric", "numarray", or "numpy", depending on which module
that was actually imported.
To determine whether Numeric, numarray, or numpy is to be imported,
the following procedure is applied:
1. The command line arguments are checked for a --numarray,
--Numeric, or --numpy option.
2. If the user has already imported Numeric, numarray, or numpy by an::
import Numeric
#or
import numarray
#or
import numpy
statement, the present module continues to import from the same
module (module in sys.modules is used to check whether it should
be Numeric, numarray, or numpy). If the user has imported more than
one of the three module alternatives, numpy is used.
3. The environment variable NUMPYARRAY is checked.
If this variable contains "numarray", "Numeric", or "numpy" the
corresponding module is imported.
If neither 1., 2., nor 3. determines the import, i.e., the user has not
explicitly indicated what to use, the new numpy is the default choice.
Some Functions for Unified Usage
--------------------------------
Some operations, like finding the maximum and minimum values in an array,
or controlling the output format when printing arrays, have different
syntax in the different Numerical Python implementations. The functions
below attempt to provide a uniform syntax to functionality with
different names in Numeric, numarray, and numpy:
- NumPyArray:
the type used in isinstance(a,NumPyArray) for
checking if a is a NumPy array
- arrmin, arrmax:
compute maximum and minimum of all array entries
(same as amin(a,None) and amax(a,None) in scipy)
- array_output_precision(n):
print arrays with n decimals
- NumPy_type:
returns the type of an array, i.e., "Numeric", "numarray",
or "numpy"
- NumPy_dtype:
returns the type of the data in an array, i.e., 'd', 'i', etc.
- fortran_storage:
transparent transform of an array to column major (Fortran) storage
that preserves the nature (Numeric, numarray, numpy) of the array
Some frequently standard modules like sys, os, and operator are
imported into the namespace of the present module.
"""
import sys, os
# The first task to accomplish in this module is to determine
# whether to use Numeric, numarray, or numpy
basic_NumPy = None # will later hold 'Numeric', 'numarray', or 'numpy'
# check the command line (this code is similar to matplotlib.numerix):
if basic_NumPy is None:
if hasattr(sys, 'argv'): # Apache mod_python has no argv
for _a in sys.argv:
if _a in ["--Numeric", "--numeric", "--NUMERIC"]:
basic_NumPy = 'Numeric'
break
if _a in ["--Numarray", "--numarray", "--NUMARRAY"]:
basic_NumPy = 'numarray'
break
if _a in ["--NumPy", "--numpy", "--NUMPY"]:
basic_NumPy = 'numpy'
break
del _a # don't pollute the global namespace
# check if the user has already done an import Numeric, import numarray,
# or import numpy; use the module that was imported
if basic_NumPy is None:
if 'numpy' in sys.modules:
basic_NumPy = 'numpy'
elif 'numarray' in sys.modules:
basic_NumPy = 'numarray'
elif 'Numeric' in sys.modules:
basic_NumPy = 'Numeric'
# check the environment variable NUMPYARRAY:
if basic_NumPy is None:
if os.environ.has_key('NUMPYARRAY'):
if os.environ['NUMPYARRAY'] == 'numpy':
basic_NumPy = 'numpy'
elif os.environ['NUMPYARRAY'] == 'numarray':
basic_NumPy = 'numarray'
elif os.environ['NUMPYARRAY'] == 'Numeric':
basic_NumPy = 'Numeric'
if basic_NumPy is None: basic_NumPy = 'numpy' # final default choice
if basic_NumPy not in ('Numeric', 'numarray', 'numpy'):
raise ImportError('cannot decide which Numerical Python '\
'implementation to use (ended up with "%s")' % basic_NumPy)
#print 'from', basic_NumPy, 'import *'
# table of equivalent names of Numerical Python modules:
# (used to import modules under Numeric, numarray, or numpy name)
_NumPy_modules = (
('Numeric', 'numarray', 'numpy'),
# umath and Precision are included as part of Numeric, numarray, numpy
('LinearAlgebra', 'numarray.linear_algebra.LinearAlgebra2',
'numpy.linalg'),
('RandomArray', 'numarray.random_array.RandomArray2', 'numpy.random'),
('RNG', '', 'numpy.random'),
('FFT', 'numarray.fft', 'numpy.fft'),
('MLab', 'numarray.linear_algebra.mlab', 'numpy.oldnumeric.mlab'),
('MA', 'numarray.ma.MA', 'numpy.ma'),
)
if basic_NumPy == 'numpy':
try:
# fix backward compatibility with Numeric names:
import numpy
oldversion = (numpy.version.version[0] == '0')
majorversion = int(numpy.version.version[0])
minorversion = int(numpy.version.version[2])
for _Numeric_name, _dummy1, _numpy_name in _NumPy_modules[1:]:
if oldversion and (_Numeric_name in ['RNG', 'FFT']):
n, module = _numpy_name.split('.')
exec "from %s import %s as %s" %(n, module, _Numeric_name)
elif oldversion and (_Numeric_name == 'MLab'):
from numpy.lib import mlab as MLab
elif (oldversion or (majorversion == 1 and minorversion < 1)) \
and (_Numeric_name == 'MA'):
import numpy.core.ma; MA = numpy.core.ma
elif _numpy_name != '':
exec 'import %s; %s = %s' % \
(_numpy_name, _Numeric_name, _numpy_name)
del _Numeric_name, _dummy1, _numpy_name, _NumPy_modules
from numpy import *
if not oldversion:
# get the old names too (NewAxis, Float, etc.):
from numpy.oldnumeric import *
del oldversion
# define new names compatible with Numeric:
LinearAlgebra.solve_linear_equations = linalg.solve
LinearAlgebra.inverse = linalg.inv
LinearAlgebra.determinant = linalg.det
LinearAlgebra.eigenvalues = linalg.eigvals
LinearAlgebra.eigenvectors = linalg.eig
except ImportError as e:
raise ImportError('%s\nnumpy import failed!\n'\
'see doc of %s module for how to choose Numeric instead' % \
(e, __name__))
def array_output_precision(no_of_decimals):
"""Set no of decimals in printout of arrays."""
arrayprint.set_precision(no_of_decimals)
def arrmax(a):
"""Compute the maximum of all the entries in a."""
try:
return a.max()
except AttributeError:
# not a NumPy array
if operator.isSequenceType(a):
return max(a) # does not work for nested sequences
elif operator.isNumberType(a):
return a
else:
raise TypeError('arrmax of %s not supported' % type(a))
def arrmin(a):
"""Compute the minimum of all the entries in a."""
try:
return a.min()
except AttributeError:
# not a NumPy array
if operator.isSequenceType(a):
return min(a) # does not work for nested sequences
elif operator.isNumberType(a):
return a
else:
raise TypeError('arrmin of %s not supported' % type(a))
NumPyArray = ndarray
if basic_NumPy == 'numarray':
try:
for _Numeric_name, _numarray_name, _dummy1 in _NumPy_modules[1:]:
if _numarray_name:
exec 'import %s; %s = %s' % \
(_numarray_name, _Numeric_name, _numarray_name)
# RNG is not supported, make an object that gives an error message:
class __Dummy:
def __getattr__(self, name):
raise ImportError('You have chosen the | |
<reponame>qrefine/qrefine
from __future__ import division
import iotbx.pdb
from cctbx.array_family import flex
import mmtbx.model
import sys
import time
atom_database = {'H' : {'valence' : 1},
#
'C' : {'valence' : 4},
'N' : {'valence' : 3, 'lone pairs' : 1},
'O' : {'valence' : 2},
'F' : {'valence' : 1},
#
'P' : {'valence' : 3, 'lone pairs' : 1},
'S' : {'valence' : 2, 'lone pairs' : 2},
'Cl': {'valence' : 1},
'Ca' : {'valence' : -2, 'metal' : True},
'Cu' : {'valence' : -2,
#'charge' : 2,
'metal' : True},
'Zn' : {'valence' : -2,
#'charge' : 2,
'metal' : True},
}
# via attila
# transition metals need to have a mutliplicity set
atom_database['Cu'] = {'valence': 1, 'lone pairs': 1}
def distance2(xyz1, xyz2):
sum = 0
for i in range(3): sum+=(xyz2[i]-xyz1[i])**2
return sum
class atom_property(dict):
def __init__(self):
for element, data in atom_database.items():
self[element] = data
def __repr__(self):
outl = 'atom properties\n'
for element, data in self.items():
outl += ' %-2s : %s\n' % (element, data)
return outl
def get_valence(self, element, effective=True):
assert effective
return self.get(element.strip(), {}).get('valence', None)
def get_lone_pairs(self, element):
return self.get(element.strip(), {}).get('lone pairs', 0)
def is_metal(self, element):
return self.get(element.strip().capitalize(), {}).get('metal', False)
class electron_distribution(dict):
def __init__(self,
hierarchy,
grm,
alternative_location_id=None,
alternative_location_index=None,
verbose=False,
):
alternative_location_id='A'
self.properties = atom_property()
self.hierarchy = hierarchy
self.atoms = self.hierarchy.atoms()
self.grm = grm
self.verbose=verbose
if filter(None, hierarchy.get_conformer_indices()):
assert (alternative_location_id is not None or
alternative_location_index is not None)
for atom in hierarchy.atoms():
e = self.properties.get_valence(atom.element)
assert e is not None, ' element %s not found' % atom.element
self[atom.i_seq] = e
self.form_bonds()
def __repr__(self):
show_all = False
show_unpaired = True
show_empty_bonds = True
atoms = self.hierarchy.atoms()
outl = 'elec. dist.\n'
for key, electrons in self.items():
if type(key)==type(tuple([])):
if(show_empty_bonds and electrons==0) or show_all:
outl += ' %s-%s : %d\n' % (atoms[key[0]].quote(),
atoms[key[1]].quote(),
electrons,
)
else:
assert abs(electrons)<10
if(show_unpaired and electrons) or show_all:
outl += ' %s : %d\n' % (atoms[key].quote(), electrons)
return outl
def _generate_atoms(self):
for key, electrons in self.items():
if type(key)==type(tuple([])): continue
yield key
def _generate_bonds(self):
for key, electrons in self.items():
if type(key)==type(tuple([])):
yield key
def __setitem__(self, i_seq, electrons):
if electrons<-1:
if self.properties.get_lone_pairs(self.atoms[i_seq].element):
electrons+=2
dict.__setitem__(self, i_seq, electrons)
def _add_electron_to_bond(self, i_seqs):
if 0:
atoms = self.hierarchy.atoms()
print i_seqs, atoms[i_seqs[0]].quote(), atoms[i_seqs[1]].quote()
print self
self[i_seqs]+=1
self[i_seqs[0]]-=1
self[i_seqs[1]]-=1
if 0: print self
def form_bonds(self, extend_based_on_proximity=False, verbose=False):
if self.verbose or verbose: verbose=1
#verbose=1
atoms = self.hierarchy.atoms()
def _is_max_bond_valence(i_seq):
max_valence = self.properties.get_valence(atoms[i_seq].element)
lp = self.properties.get_lone_pairs(atoms[i_seq].element)
if lp: max_valence += lp*2
for bond in self._generate_bonds():
if i_seq in bond:
max_valence -= self[bond]
if max_valence==0: break
return max_valence==0
def _can_denote_electron_to_covalent_bond(i_seq, j_seq, verbose=False):
if verbose:
print 'processing %s %s' % (atoms[i_seq].quote(), atoms[j_seq].quote())
if self[i_seq]>0 and self[j_seq]>0:
if verbose:
print 'bonding %s %s' % (atoms[i_seq].quote(), atoms[j_seq].quote())
return True
elif self[i_seq]==0 and self[j_seq]==0:
return False
atom1 = atoms[i_seq]
if atom1.element_is_hydrogen() and self[i_seq]==0: return False
atom2 = atoms[j_seq]
if atom2.element_is_hydrogen() and self[j_seq]==0: return False
if _is_max_bond_valence(i_seq) or _is_max_bond_valence(j_seq):
return False
assert i_seq==atom1.i_seq
assert j_seq==atom2.i_seq
if atom1.element_is_hydrogen():
hydrogen = atom1
other = atom2
elif atom2.element_is_hydrogen():
hydrogen = atom2
other = atom1
else:
if self.properties.get_lone_pairs(atom1.element):
lone_pair = atom1
other = atom2
elif self.properties.get_lone_pairs(atom2.element):
lone_pair = atom2
other = atom1
else:
return False
if verbose:
print 'other-lp %s-%s' % (other.quote(), lone_pair.quote())
if self[other.i_seq]>0:
return True
return False
if self.properties.get_lone_pairs(other.element):
#self[other.i_seq]+=2
if verbose: print 'hydrogen-X lone pair TRUE'
return True
return None
###
def is_metal(atom1, atom2):
is_metal_count = [0,1][self.properties.is_metal(atom1.element)]
is_metal_count+= [0,1][self.properties.is_metal(atom2.element)]
if is_metal_count==2: assert 0
return is_metal_count
###
def generate_bonds_from_simple(simple,
sort_on_lone_pairs=False,
):
def _get_sum_lone_pairs(bp):
i_seq, j_seq = bp.i_seqs
lp1 = self.properties.get_lone_pairs(atoms[i_seq].element)
lp2 = self.properties.get_lone_pairs(atoms[j_seq].element)
return lp1+lp2
def _sort_lone_pairs(bp1, bp2):
slp1 = _get_sum_lone_pairs(bp1)
slp2 = _get_sum_lone_pairs(bp2)
if slp2>slp1: return -1
return 1
if sort_on_lone_pairs:
l = []
for bp in simple:
l.append(bp)
l.sort(_sort_lone_pairs)
for bp in l:
yield bp
else:
assert 0
###
xrs = self.hierarchy.extract_xray_structure(
crystal_symmetry=self.grm.crystal_symmetry)
simple, asu = self.grm.get_all_bond_proxies(sites_cart=xrs.sites_cart())
# need to filter out H-bonds
# look for metal coordination
# this needs to be intergrated with GRM to get correct metal coordination
metal_coordination = []
tmp = {}
for bp in simple:
assert bp.i_seqs not in self
i_seq, j_seq = bp.i_seqs
assert i_seq in self
assert j_seq in self
atom1 = atoms[i_seq]
atom2 = atoms[j_seq]
if is_metal(atom1, atom2):
tmp[distance2(atom1.xyz, atom2.xyz)] = (atom1, atom2)
# look for single (non-metal) bonds
for bp in simple:
if is_metal(atoms[bp.i_seqs[0]], atoms[bp.i_seqs[1]]): continue
assert bp.i_seqs not in self
i_seq, j_seq = bp.i_seqs
assert i_seq in self
assert j_seq in self
mc = None
if i_seq in metal_coordination:
mc = atoms[i_seq]
other = atoms[j_seq]
elif j_seq in metal_coordination:
mc = atoms[j_seq]
other = atoms[i_seq]
if mc:
if other.element_is_hydrogen():
continue
self[bp.i_seqs]=0
if _can_denote_electron_to_covalent_bond(i_seq, j_seq):
self._add_electron_to_bond(bp.i_seqs)
if verbose: print 'single: %s-%s\n%s' % (atoms[i_seq].quote(), atoms[j_seq].quote(),self)
# look for double bonds
for bp in generate_bonds_from_simple(simple,
sort_on_lone_pairs=True,
):
if bp.i_seqs not in self: continue
i_seq, j_seq = bp.i_seqs
assert i_seq in self
assert j_seq in self
while self[i_seq]>0 and self[j_seq]>0:
self._add_electron_to_bond(bp.i_seqs)
if verbose: print 'double',self
if verbose: print 'bonding 2',atoms[i_seq].quote(), atoms[j_seq].quote()
# look for hyper-valance bonds
for bp in simple:
if bp.i_seqs not in self: continue
if verbose: print 'hyper',self
i_seq, j_seq = bp.i_seqs
assert i_seq in self
assert j_seq in self
while _can_denote_electron_to_covalent_bond(i_seq,
j_seq,
verbose=verbose):
self._add_electron_to_bond(bp.i_seqs)
# remove HG on sulfur bridge
self.check_sulfur_bridge()
def check_sulfur_bridge(self, verbose=False):
atoms = self.hierarchy.atoms()
for i_seq in self._generate_atoms():
for j_seq in self._generate_atoms():
if j_seq==i_seq: break
atom1 = atoms[i_seq]
atom2 = atoms[j_seq]
if self[i_seq]<0 and self[j_seq]<0:
bond0 = bond1 = bond2 = None
for key in self:
if type(key)==type(tuple([])):
if i_seq in key and j_seq in key:
bond0 = key
elif i_seq in key and not bond1:
other1=list(key)
other1.remove(i_seq)
if atoms[other1[0]].element_is_hydrogen():
bond1 = key
elif j_seq in key and not bond2:
other2=list(key)
other2.remove(j_seq)
if atoms[other2[0]].element_is_hydrogen():
bond2 = key
if bond0 and bond1 and bond2:
if verbose:
print '-'*80
print bond0, bond1, bond2
print atoms[bond0[0]].quote()
print atoms[bond0[1]].quote()
print atoms[bond1[0]].quote()
print atoms[bond1[1]].quote()
print atoms[bond2[0]].quote()
print atoms[bond2[1]].quote()
self[bond1]-=1
self[bond2]-=1
self[bond1[0]]+=1
self[bond1[1]]+=1
self[bond2[0]]+=1
self[bond2[1]]+=1
def extend_based_on_proximity(self):
# use available electrons and proximity
# needs more care or does not need bond proxies
if extend_based_on_proximity and 0:
rc = self.get_possible_covalent_bonds()
for i_seq, j_seq in rc:
if verbose:
print ' forming bond between %s %s' % (atoms[i_seq].quote(),
atoms[j_seq].quote())
assert (i_seq, j_seq) not in self
self[(i_seq, j_seq)] = 1
self[i_seq]-=1
self[j_seq]-=1
def get_possible_covalent_bonds(self):
rc = []
atoms = self.hierarchy.atoms()
for i_seq in self._generate_atoms():
if self[i_seq]<1: continue
for j_seq in self._generate_atoms():
if j_seq==i_seq: break
if self[j_seq]<1: continue
atom1 = atoms[i_seq]
atom2 = atoms[j_seq]
# exclude H-H
if atom1.element_is_hydrogen() and atom2.element_is_hydrogen(): continue
# terminal atoms on a single amino acid C..N
if not (atom1.element_is_hydrogen() or atom2.element_is_hydrogen()):
continue
d2 = distance2(atoms[i_seq].xyz ,atoms[j_seq].xyz)
if atom1.element_is_hydrogen() or atom2.element_is_hydrogen():
if d2<1.5:
rc.append([i_seq, j_seq])
continue
assert d2>9, ' %s-%s is %0.1f' % (atoms[i_seq].quote(),
atoms[j_seq].quote(),
d2,
)
return rc
def validate_atomic_formal_charges(self, verbose=False):
data = {'*' : {'N' : [-1,0,1],
'OXT': [1,0],
},
'LYS' : {'NZ' : [-1]},
'GLU' : {'OE2': [1]},
'ASP' : {'OD2': [1]},
}
rc = []
for i_seq in self._generate_atoms():
atom = self.atoms[i_seq]
residue_data = data.get(atom.parent().resname, {})
residue_data.update(data['*'])
if not residue_data:
if self[i_seq]:
rc.append(i_seq)
assert 0
else:
if self[i_seq] in residue_data.get(atom.name.strip(), [0]): continue
residue_data = data['*'] # needs to be only AA?
if self[i_seq] in residue_data.get(atom.name.strip(), [0]): continue
rc.append(i_seq)
if verbose:
for i_seq in rc:
print i_seq, self.atoms[i_seq].quote()
return rc
def get_total_charge(self):
total=0
for key, electrons in self.items():
if type(key)==type(tuple([])): continue
total+=electrons
return total*-1
def get_charged_atoms(self):
rc = []
atoms = self.hierarchy.atoms()
for key, electrons in self.items():
if type(key)==type(tuple([])): continue
if electrons:
rc.append([ atoms[key],electrons])
return rc
def run(pdb_filename=None,
raw_records=None,
return_formal_charges=False,
verbose=False,
):
if pdb_filename:
# Read file into pdb_input class
inp = iotbx.pdb.input(file_name=pdb_filename)
elif raw_records:
inp = iotbx.pdb.input(lines=raw_records, source_info='lines from PDB')
else:
assert 0
# create a model manager
import StringIO
log = StringIO.StringIO()
default_scope = mmtbx.model.manager.get_default_pdb_interpretation_scope()
working_params = default_scope.extract()
# optional???
working_params.pdb_interpretation.automatic_linking.link_metals=True
model = mmtbx.model.manager(
model_input = inp,
log = log,
)
model.process(make_restraints=True,
pdb_interpretation_params = working_params)
# get xray structure
xrs = model.get_xray_structure()
grm = model.get_restraints_manager()
t0=time.time()
atom_valences = electron_distribution(
model.get_hierarchy(), # needs to be altloc free
model.get_restraints_manager().geometry,
verbose=verbose,
)
if verbose: print | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Детектирование лиц
"""
# ######################################################################################################################
# Импорт необходимых инструментов
# ######################################################################################################################
import os # Работа с файловой системой
import cv2 # Алгоритмы компьютерного зрения
import pkg_resources # Работа с ресурсами внутри пакетов
import numpy as np # Научные вычисления
from datetime import datetime # Работа со временем
# Персональные
from liberty.samples import play # Пример воспроизведения фото/видео данных
# ######################################################################################################################
# Сообщения
# ######################################################################################################################
class Messages(play.Run):
"""Класс для сообщений"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
super().__init__() # Выполнение конструктора из суперкласса
self._description = self._('Детектирование лиц')
self._description_time = '{}{}' + self._description + ' ...{}'
self._load_faces_model_start = self._('[{}] Загрузка модели "{}" ...')
self._model_not_load = self._('[{}{}{}] Модель "{}" не загружена ...')
self._face_found_in_frame = self._('Лиц: {}')
self._face_not_found_in_frame = self._('Лица не найдены ...')
self._face_precent = '{:.2f}%'
# ######################################################################################################################
# Анализ лицевых характеристик человека в маске
# ######################################################################################################################
class Detection(Messages):
"""Анализ лицевых характеристик человека в маске"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
super().__init__() # Выполнение конструктора из суперкласса
self.title_faces_method = self._('Метод') # Название метода
self.title_faces_method_en = 'Method' # Название метода на английском (для окна приложения)
# Необходимые расширения для моделей
self._required_extension_models = (
'pbtxt', 'pb', # Детектирование лиц с помощью глубокого обучения в OpenCV (TensorFlow)
'prototxt', 'caffemodel', # Детектирование лиц с помощью глубокого обучения в OpenCV (Caffe)
)
self._dnn = ('tf', 'caffe') # Модели нейронной сети
# Данные методов
self._packages_functions = {
'opencv_dnn': {
'all': self._('Детектирование лиц с помощью глубокого обучения в OpenCV'),
'en': 'Faces detection with deep learning in OpenCV'
},
}
# Названия моделей и их конфигурационных файлов
self._path_to_files_models = {
'opencv_dnn': {
'tf': {
'path_to_model': 'opencv_face_detector_uint8.pb',
'path_to_config_model': 'opencv_face_detector.pbtxt'
},
'caffe': {
'path_to_model': 'res10_300x300_ssd_iter_140000_fp16.caffemodel',
'path_to_config_model': 'deploy.prototxt'
}
}
}
# Путь к моделям
self._path_to_models = pkg_resources.resource_filename(
'liberty',
os.path.join('modules', 'facesdet', 'models')
)
# Добавление вариантов ошибок при автоматическом обновлении конфигурационного файла
self._automatic_update['faces_model_not_load'] = False # Модель не загружена
self._model_faces = None # Модель поиска лиц
# Шрифты
# 1. Для лиц
self._fonts.append('face')
# ------------------------------------------------------------------------------------------------------------------
# Свойства
# ------------------------------------------------------------------------------------------------------------------
# Получение названия метода
@property
def title_faces_method(self):
return self._title_faces_method
# Установка названия метода
@title_faces_method.setter
def title_faces_method(self, name):
self._title_faces_method = name
# Получение названия метода на английском
@property
def title_faces_method_en(self):
return self._title_faces_method_en
# Установка названия метода на английском
@title_faces_method_en.setter
def title_faces_method_en(self, name):
self._title_faces_method_en = name
# Получение названий методов
@property
def packages_functions(self):
return self._packages_functions
# Получение модели
@property
def model_faces(self):
return self._model_faces
# ------------------------------------------------------------------------------------------------------------------
# Внешние методы
# ------------------------------------------------------------------------------------------------------------------
# Загрузка модели для глубокого обучения в OpenCV
def load_faces_model_opencv_dnn(self, path_to_model = None, path_to_config_model = None, dnn = 'tf', out = True):
"""
Загрузка модели для метода Виолы-Джонса в OpenCV
(str, str, str [, bool]) -> bool
Аргументы:
path_to_model - Путь к модели
path_to_config_model - Путь к конфигурационному файлу модели
dnn - Модель нейронной сети
out - Печатать процесс выполнения
Возвращает: True если модель загружена, в обратном случае False
"""
none = 'DL' # Замена None
# Путь к модели по умолчанию
if path_to_model is None:
path_to_model = none
# Путь к конфигурационному файлу модели по умолчанию
if path_to_config_model is None:
path_to_config_model = none
# Проверка аргументов
if type(path_to_model) is not str or not path_to_model or type(path_to_config_model) is not str \
or not path_to_config_model or type(dnn) is not str or not dnn or type(out) is not bool:
# Вывод сообщения
if out is True:
self._inv_args(__class__.__name__, self.load_faces_model_opencv_dnn.__name__)
return False
# Модель нейронной сети не совпадает с необходимыми
if dnn not in self._dnn:
return False
# Путь к модели по умолчанию
if path_to_model is none:
path_to_model = \
os.path.join(self._path_to_models, self._path_to_files_models['opencv_dnn'][dnn]['path_to_model'])
# Путь к конфигурационному файлу модели по умолчанию
if path_to_config_model is none:
path_to_config_model = os.path.join(
self._path_to_models, self._path_to_files_models['opencv_dnn'][dnn]['path_to_config_model']
)
required_extension_model = None # Необходимое расширения файла модели
required_extension_config_model = None # Необходимое расширения конфигурационного файла модели
# 8-битная квантованная версия с использованием TensorFlow
if dnn == self._dnn[0]:
# Необходимое расширения файла модели
required_extension_model = self._required_extension_models[1]
# Необходимое расширения конфигурационного файла модели
required_extension_config_model = self._required_extension_models[0]
# Версия FP16 оригинальной реализации Caffe
if dnn == self._dnn[1]:
# Необходимое расширения файла модели
required_extension_model = self._required_extension_models[3]
# Необходимое расширения конфигурационного файла модели
required_extension_config_model = self._required_extension_models[2]
# Файл модели не найден
if self.search_file(path_to_model, required_extension_model, False, out) is False:
return False
# Конфигурационный файл модели не найден
if self.search_file(path_to_config_model, required_extension_config_model, False, out) is False:
return False
# Вывод сообщения
if out is True:
print(self._load_faces_model_start.format(
datetime.now().strftime(self._format_time),
self.packages_functions['opencv_dnn']['all']
))
try:
# 8-битная квантованная версия с использованием TensorFlow
if dnn == self._dnn[0]:
# Чтение модели нейронной сети в формате TensorFlow
self._model_faces = cv2.dnn.readNetFromTensorflow(path_to_model, path_to_config_model)
# Версия FP16 оригинальной реализации Caffe
if dnn == self._dnn[1]:
# Чтение модели нейронной сети в формате Caffe
self._model_faces = cv2.dnn.readNetFromCaffe(path_to_config_model, path_to_model)
except SystemError:
# Вывод сообщения
if out is True:
print(self._model_not_load.format(self.red, datetime.now().strftime(self._format_time), self.end,
os.path.basename(path_to_model)))
return False
# Установка названия метода
self.title_faces_method = self.packages_functions['opencv_dnn']['all'] # Перевод на текущий язык
self.title_faces_method_en = self.packages_functions['opencv_dnn']['en'] # Английский
return True
# Детектирование лиц с помощью глубокого обучения в OpenCV
def opencv_faces_dnn(self, net, frame, width = 300, height = 0, conf_threshold = 0.7, draw = True,
draw_precent = True, out = True):
"""
Детектирование лиц с помощью глубокого обучения в OpenCV
(cv2.dnn_Net, numpy.ndarray [, int, int, float, bool, bool, bool]) -> tuple or None
Аргументы:
net - Нейронная сеть
frame - Изображение
width - Ширина изображения для масштабирования
height - Высота изображения для масштабирования
conf_threshold - Доверительный порог
draw - Рисование на изображении областей с лицами
draw_precent - Рисование на изображении процентов для каждого найденного лица
out - Печатать процесс выполнения
Возвращает кортеж:
1. Обработанное изображение
2. Список координат лиц
"""
# Проверка аргументов
if (type(net) is not cv2.dnn_Net or type(frame) is not np.ndarray or len(frame) is 0
or type(height) is not int or height < 0 or type(width) is not int or width < 0
or type(conf_threshold) is not float or conf_threshold < 0 or conf_threshold > 1
or type(draw) is not bool or type(draw_precent) is not bool or type(out) is not bool):
# Вывод сообщения
if out is True:
self._inv_args(__class__.__name__, self.opencv_faces_dnn.__name__)
return None
# Формат изображения
if frame.shape[2] == 3:
frame_clone = frame.frame.copy() # Копирование изображения
elif frame.shape[2] == 4:
frame_clone = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB) # Копирование изображения
else:
return None
frame_height = frame_clone.shape[0] # Высота изображения
frame_width = frame_clone.shape[1] # Ширина изображения
# Параметр ширины не указан
if not width:
width = frame_width
# Параметр высоты не указан
if not height:
height = int(frame_height * width / frame_width) # Масштабирование ширины относительно ширины
# Обработка входного изображения
# - Вычитание среднего значения из элементов каждого канала
# - Масштабирование изображения
blob = cv2.dnn.blobFromImage(frame_clone, 1.0, (width, height), [104, 117, 123], False, False)
net.setInput(blob) # Прогонка обработанного входного изображения через сеть
detections = net.forward() # Прогнозы с обнаруженными лицами
faces_boxes = [] # Список кординат лиц
# Пройтись по всем прогнозам с лицами
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2] # Получение текущего прогноза
# Прогноз больше доверительного порога
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * frame_width) # Начальная координата по оси X
y1 = int(detections[0, 0, i, 4] * frame_height) # Начальная координата по оси Y
x2 = int(detections[0, 0, i, 5] * frame_width) # Конечная координата по оси X
y2 = int(detections[0, 0, i, 6] * frame_height) # Конечная координата по оси Y
# Получение ограничивающего прямоугольника лица
rectangle_face = self.get_rectangle_face(x1, y1, x2, y2)
# Координаты ограничивающего прямоугольника найдены
if rectangle_face is not None:
x1, y1, x2, y2 = rectangle_face
faces_boxes.append([x1, y1, x2, y2]) # Добавление прямоугольной области с лицом в список координат лиц
# Рисование прямоугольной области с лицом на изображении
if draw is True:
# Рисование на изображении процентов для каждого найденного лица
if draw_precent is True:
label_face = self._face_precent.format(confidence * 100) # Процент лица
# Размеры текста
(width_text_top, height_text_top), (offset_x_top, offset_y_top) = \
self._font['face'].font.getsize(label_face)
# Верхняя левая точка прямоугольника
labels_base_coords = [
x1,
y1 - height_text_top - (self._args['face_padding'] * 2) - self._args['face_distance']
]
# Координаты выходят за рамки
if labels_base_coords[1] <= 0:
labels_base_coords[1] = y1
# Выполнять пока текст не будет входить в рамки окна
while True:
if labels_base_coords[0] + | |
<filename>actions/fd_devices.py
import hashlib
import logging
import os
import parted
import shlex
import shutil
import socket
import subprocess
import tempfile
import time
from config.config import *
class Devices:
def __init__(self):
self.mapping_file = MAPPING_FILE
self.usb_ports = USB_PORTS
self.usbdir = USBDIR
self.hub_rows = HUB_ROWS
self.mappings = self.get_mappings()
self.replicator = socket.gethostname()
def get_mappings(self):
'''Opens mapping file, and returns a list with the port numbers in order'''
with open(self.mapping_file) as f:
mappings = f.readlines()
mappings = list(filter(None, [m.rstrip('\n') for m in mappings]))
return mappings
def get_usb_ids(self):
'''Returns a dictionary of the disk-path: usb ids from /dev/disk/by-path. Removes the partition listings'''
try:
usb_ids = {os.path.realpath(os.path.join(self.usbdir, usb)): usb.split(':')[1].split('.') \
for usb in os.listdir(self.usbdir) if 'usb' in usb and 'part' not in usb}
except FileNotFoundError:
return {}
return usb_ids
def get_direct_dev(self):
'''Direct devices must be in the USB port in the config. Generally, '01' '''
dir_dev = {key: '.'.join(val) for key, val in self.get_usb_ids().items() if '.'.join(val) in self.usb_ports}
return dir_dev
def get_dev_loc_from_mapping(self, dev_addr):
'''given the device address, return the device location from the mappings'''
return self.mappings.index(dev_addr) if dev_addr in self.mappings else None
def get_dev_map_location(self, dev_addr):
'''Return the device location with the column number'''
dev_loc = self.get_dev_loc_from_mapping(dev_addr)
if dev_loc == None:
return None
if dev_loc > self.hub_rows - 1:
return (dev_loc - self.hub_rows, 1)
else:
return (dev_loc, 0)
def get_hubs_with_mappings(self, usb_ids):
'''Returns dict of hubs eg: {'1': [('1.2.3', '/dev/sdb', (0.0)), ('1.3.5', '/dev/sdc', (0.1))]}'''
hubs = {}
for key, val in usb_ids.items():
dev_name = key
hub = val[0]
if hub in self.usb_ports:
dev_addr = '.'.join(val[1:])
dev_map_location = self.get_dev_map_location(dev_addr)
else:
logging.info(f'Hub {hub} is not in usb port list. Something went wrong')
dev_addr = None
dev_map_location = None
if dev_map_location != None:
if hub not in hubs.keys():
hubs[hub] = [(dev_addr, dev_name, dev_map_location)]
else:
hubs[hub].append((dev_addr, dev_name, dev_map_location))
return hubs
class FdDevice:
'''Actions pertaining to particular devices'''
def __init__(self, **kwargs):
self.port = kwargs.get('port', None)
self.hub = kwargs.get('hub', None)
self.hub_coordinates = kwargs.get('hub_coordinates', None)
self.device = kwargs.get('device', None)
self.device_dir = kwargs.get('device_dir', None)
self.source_mdsums = kwargs.get('source_mdsums', None)
self.devices = Devices()
self.ignore_files = IGNORED_FILES
self.label = DEVICE_LABEL
def get_device_from_port(self):
self.device = None
if self.port:
direct_dev = self.devices.get_direct_dev()
for key, val in direct_dev.items():
if val == self.port:
self.device = key
def mount_device(self, **kwargs):
'''Mount device on a tempdir, and return the directory name '''
device = kwargs.get('device')
if not device:
device = self.device
temp_dir = tempfile.mkdtemp()
cmd = shlex.split(f'mount {device} {temp_dir}')
#print(cmd)
status = self.check_call(cmd, timeout=30)
if status != 0:
logging.info(f'Unable to mount device {device}.')
return None, 1
return temp_dir, 0
def get_file_list(self):
'''mount and list files from (source) device, then unmount'''
try:
directory, status = self.mount_device()
if status == 0:
files = os.listdir(directory)
mount_status = self.check_mountpoint(mount_dir=directory)
#TO DO: check status
return files
else:
return None
except OSError:
return 1
def create_dir(self, directory):
try:
os.makedirs(directory)
except OSError as e:
logging.info(f'Error Creating {directory}. {e}')
return 1
return 0
def create_checksums_files(self, **kwargs):
'''Creates checksums from files in source_dir, or, if none given, to the current device'''
source_dir = kwargs.get('source_dir')
if not source_dir:
source_dir = self.device_dir
pc = 100
adj = 0
else:
pc = 50
adj = 50
start = time.time()
logging.info(f'Creating checksums on {source_dir}')
mdsums = []
self.num_files = self.count_files(source_dir)
if self.num_files > 100:
chunks = self.num_files // 100
elif self.num_files > 2:
chunks = self.num_files // 2
else:
chunks = 1
num_copied = 0
try:
for dname in os.walk(source_dir):
for fname in dname[2]:
mdsums.append(fname + ' ' + hashlib.md5(
open(os.path.join(dname[0], fname), 'rb').read()).hexdigest())
num_copied += 1
if num_copied % chunks == 0:
self.calculate_and_emit(num_copied, self.num_files, pc=pc, adj=adj)
except Exception as e:
logging.error(f'Error creating checksums: {e}')
return 1
logging.info(f'Time creating checksums: {time.time()-start} seconds')
return mdsums
def compare_checksums_files(self, dest_dir):
'''Compare checksums for newly copied fielse with those stored in the checksums file'''
mdsums_dest = self.create_checksums_files(source_dir=dest_dir)
if mdsums_dest == 1:
return 1
logging.info('Comparing checksums...')
# Create sets from the two list, and compare
diff = set(self.source_mdsums) ^ set(mdsums_dest)
if diff:
diffs = "\n".join(diff)
logging.info(f'These files differ:\n {diffs}')
return 1
logging.info('Totals: {len()self.source_mdsums)}, {len(mdsums_dest)}')
return 0
def delete_all(self, **kwargs):
directory = kwargs.get('directory')
if not directory:
directory = self.device_dir
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError as e:
return 1
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except OSError as e:
print('rmdir error', e)
return 1
return 0
def delete_ignored(self, **kwargs):
'''Ignored files (listed in config) are removed before copying'''
directory = kwargs.get('directory', None)
if directory == None:
directory = self.device_dir
logging.info('removing unwanted files')
to_delete = [os.path.join(directory, f) for f in os.listdir(directory) if f in self.ignore_files]
logging.info(to_delete)
for the_file in to_delete:
if os.path.isfile(the_file):
logging.info('removing file {the_file}')
try:
os.remove(the_file)
except OSError as e:
return 1
elif os.path.isdir(the_file):
logging.info(f'removing dir {the_file}')
for root, dirs, files in os.walk(the_file, topdown=False):
for name in files:
logging.info(f'removing file {name}')
try:
os.remove(os.path.join(root, name))
except OSError as e:
logging.error(e)
return 1
for name in dirs:
logging.info(f'removing file {name}')
try:
os.rmdir(os.path.join(root, name))
except OSError as e:
logging.error(e)
return 1
try:
os.rmdir(the_file)
except OSError as e:
logging.error(e)
return 1
return 0
def count_files(self, directory):
'''Return number of files in a directory and its subdirectories'''
file_count = 0
if os.path.isdir(directory):
for path, dirs, filenames in os.walk(directory):
file_count += len(filenames)
return file_count
def makedirs(self, dest):
'''Create a given directory, if it doesn't exist already'''
if not os.path.exists(dest):
try:
os.makedirs(dest)
except OSError as oe:
logging.info(f'On creation of {dest}: {oe}')
def calculate_and_emit(self, done, total, **kwargs):
pc = kwargs.get('pc', 100)
adj = kwargs.get('adj', 0)
progress = int(round( (done / float(total)) * pc)) + adj
self.emit(progress)
def copy_files(self, **kwargs):
source_dir = kwargs.get('source_dir', None)
if source_dir == None:
source_dir = self.source_dir
dest_dir = kwargs.get('dest_dir', None)
if dest_dir == None:
dest_dir = self.device_dir
self.num_files = self.count_files(source_dir)
chunks = self.num_files // 100
chunks = 1 if chunks == 0 else chunks
if self.num_files > 0:
self.makedirs(dest_dir)
num_copied = 0
for path, dirs, filenames in os.walk(source_dir):
for directory in dirs:
_dest_dir = path.replace(source_dir,dest_dir)
self.makedirs(os.path.join(_dest_dir, directory))
for sfile in filenames:
src_file = os.path.join(path, sfile)
dest_file = os.path.join(path.replace(source_dir, dest_dir), sfile)
try:
shutil.copy(src_file, dest_file)
except OSError as oe:
logging.info(f'On copy {source_dir} to {dest_dir}: {oe}')
return 1
num_copied += 1
if num_copied % chunks == 0:
pc = 50 if self.checksums else 100
self.calculate_and_emit(num_copied, self.num_files, pc=pc)
return 0
def check_call(self, cmd, **kwargs):
timeout = kwargs.get('timeout')
shell = kwargs.get('shell', False)
device = kwargs.get('device')
if not device:
device = self.device
try:
status = subprocess.check_call(cmd, timeout=timeout, shell=shell)
except subprocess.CalledProcessError as cpe:
logging.info(f'{device}: {cpe}')
status = cpe.returncode
except subprocess.TimeoutExpired as te:
logging.info(f'{device}: {te}')
status = 1
except Exception as e:
logging.info(f'{device}: {e}')
status = 1
return status
def format_device(self, **kwargs):
''' Wipe shred_blocks of data from the device. Shredding the whole drive would take too much time'''
device = kwargs.get('device')
if not device:
device = self.device
'''Make sure the device is unmounted before beginning'''
self.check_mountpoint(device=device)
logging.info(f'Formatting device {device}...')
try:
dev = parted.getDevice(device)
except parted._ped.IOException as e:
logging.error(e)
return 1
except Exception as e:
logging.error(e)
return 1
''' First clobber it, removing all partitions'''
try:
status = dev.clobber()
except parted._ped.IOException as e:
logging.error(e)
return 1
except Exception as e:
logging.error(e)
return 1
if status == False:
return 1
''' Create a fresh disk'''
try:
disk = parted.freshDisk(dev, 'msdos')
except parted._ped.DiskException as e:
logging.info(f'Error formatting disk {device} {e}')
return 1
except:
logging.info(f'Error formatting disk {device}.')
return 1
try:
disk.commit()
except parted._ped.DiskException as e:
logging.info(f'Error formatting disk {device}. {e}')
return 1
except:
logging.info(f'Error formatting disk {device}.')
return 1
'''# Here it is possible to create partitions with disk using parted, but we'll just format'''
''' Make a file system'''
cmd = shlex.split(f'mkfs.vfat -I {device}')
logging.info(cmd)
status = self.check_call(cmd, timeout=30)
if status != 0:
return 1
cmd = shlex.split(f'fatlabel {device} {self.label}')
status = self.check_call(cmd, timeout=30)
return status
def check_mountpoint(self, **kwargs):
mount_dir = kwargs.get('mount_dir', None)
device = kwargs.get('device', None)
if device == None:
device = self.device
if mount_dir == None:
cmd = shlex.split(f'umount {device}')
status = self.check_call(cmd, timeout=30)
else:
''' Check to see if directory | |
<gh_stars>1-10
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from numpy.random import RandomState
from sklearn.metrics import average_precision_score
from torch.utils.data import DataLoader
from dataloader import TestDataset
# regularization terms options
L2 = True
L1 = False
L2_COEFF = 0.00002
PROJECT_CUBE = False
PROJECT_SPHERE = False
class KGEModel(nn.Module):
def __init__(self, model_name, nentity, nrelation, ntriples, hidden_dim, args):
super(KGEModel, self).__init__()
self.model_name = model_name
self.nentity = nentity
self.nrelation = nrelation
self.epsilon = 2.0
self.lmbda = 0.1
self.hidden_dim = hidden_dim
self.idx = 1
self.ruge_rule_penalty = 1
self.alpha = 1000
self.ranks = []
self.gamma = nn.Parameter(
torch.Tensor([args.gamma]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),
requires_grad=False
)
self.xi = nn.Parameter(torch.zeros(ntriples, 1))
nn.init.uniform_(
tensor=self.xi,
a= -0.1,
b= 0.1
)
self.xi_neg = nn.Parameter(torch.zeros(ntriples, 1))
nn.init.uniform_(
tensor=self.xi_neg,
a= -0.1,
b= 0.1
)
ent_dim_mult, rel_dim_mult = self.compute_multipliers()
# define entity and relation embeddings
self.entity_dim = ent_dim_mult*hidden_dim
self.relation_dim = rel_dim_mult*hidden_dim
self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))
self.relation_embedding = nn.Parameter(torch.zeros(self.nrelation, self.relation_dim))
self.initialize(self.entity_embedding, nentity, hidden_dim)
self.initialize(self.relation_embedding, nrelation, hidden_dim)
# setup multipliers
if self.model_name in ['SpacESS']:
mult = 4 if 'Quat' in self.model_name else 1
self.rotator_head = nn.Parameter(torch.zeros(nrelation, mult*hidden_dim))
self.rotator_tail = nn.Parameter(torch.zeros(nrelation, mult*hidden_dim))
self.initialize(self.rotator_head, nrelation, hidden_dim)
self.initialize(self.rotator_tail, nrelation, hidden_dim)
# set rule info
self.ruge = args.ruge
self.epsilon_inv = .1
self.epsilon_impl = .1
self.epsilon_eq = 0
self.epsilon_sym = 0
self.inject = args.inject
self.rule_weight = {
'inverse': 2.0,
'implication': 1.0,
'symmetry': .01,
'equality': .01,
'ruge': 1
}
if model_name == 'pRotatE':
self.modulus = nn.Parameter(torch.Tensor([[0.5 * self.embedding_range.item()]]))
self.gamma1 = 50; self.gamma2 = 70
#Do not forget to modify this line when you add a new model in the "forward" function
if model_name not in ['TransE', 'DistMult', 'ComplEx', 'RotatE', 'pRotatE', 'SpacESS', 'QuatE', 'TransComplEx']:
raise ValueError('model %s not supported' % model_name)
def compute_multipliers(self):
if self.model_name == 'RotatE':
return 2, 1
if self.model_name in ['SpacESS', 'ComplEx', 'DistMult']:
return 2, 2
if self.model_name in ['QuatE']:
return 4, 4
return 1, 1
def initialize(self, tensor, in_features, out_features):
if 'Quat' not in self.model_name:
nn.init.uniform_(
tensor=tensor,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
else: # use quaternion initialization
fan_in = in_features
fan_out = out_features
s = 1. / np.sqrt(2 * in_features)
rng = torch.random.manual_seed(42)
# Generating randoms and purely imaginary quaternions :
kernel_shape = (in_features, out_features)
nweigths = in_features * out_features
wi = torch.FloatTensor(nweigths).uniform_()
wj = torch.FloatTensor(nweigths).uniform_()
wk = torch.FloatTensor(nweigths).uniform_()
# Purely imaginary quaternions unitary
norms = torch.sqrt(wi**2 + wj**2 + wk**2) + 0.0001
wi /= norms
wj /= norms
wk /= norms
wi = wi.reshape(kernel_shape)
wj = wj.reshape(kernel_shape)
wk = wk.reshape(kernel_shape)
modulus = torch.zeros(kernel_shape).uniform_(-s, s)
phase = torch.zeros(kernel_shape).uniform_(-np.pi, np.pi)
weight_r = modulus * torch.cos(phase)
weight_i = modulus * wi * torch.sin(phase)
weight_j = modulus * wj * torch.sin(phase)
weight_k = modulus * wk * torch.sin(phase)
tensor.data = torch.cat((weight_r, weight_i, weight_j, weight_k), dim = 1)
def set_loss(self, loss_name):
self.loss_name = loss_name
loss_fnc = {
'rotate': self.rotate_loss,
'custom': self.custom_loss,
'slide': self.slide_loss,
'quate': self.quate_loss,
'ruge': self.ruge_loss,
'bce': self.bce_logits_loss, # ruge loss; to test models with and without ruge rule addition
'uncertain_loss': self.uncertain_loss,
'limit_loss':self.Limit_Loss
}
if loss_name == 'quate':
self.criterion = nn.Softplus()
if loss_name == 'limit_loss':
self.lda = 0.01
print(self.lda)
if loss_name == 'slide':
self.margin = nn.Parameter(torch.FloatTensor([24.0]), requires_grad = True)
self.lambda1 = 1.
self.sigma = 1.01000100
if loss_name == 'ruge':
self.criterion = nn.BCEWithLogitsLoss(reduction = 'sum')
self.ruge_rule_penalty = .01
if loss_name not in loss_fnc:
raise ValueError('model %s not supported' % loss_name)
self.Loss = loss_fnc[loss_name]
def select_relations(self, indices):
relation_dict = {}
if self.model_name in ['SpacESS']:
relation_head = torch.index_select(
self.rotator_head,
dim=0,
index=indices
).unsqueeze(1)
relation_tail = torch.index_select(
self.rotator_tail,
dim=0,
index=indices
).unsqueeze(1)
relation_dict = {'rotator_head': relation_head, 'rotator_tail': relation_tail}
if self.model_name != 'biRotatE':
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=indices
).unsqueeze(1)
relation_dict['translation'] = relation
return relation_dict
def entities_select(self, indices_heads, indices_tails):
head = torch.index_select(
self.entity_embedding,
dim=0,
index=indices_heads)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=indices_tails)
return head, tail
def forward(self, idx, sample, mode='single'):
'''
Forward function that calculate the score of a batch of triples.
In the 'single' mode, sample is a batch of triple.
In the 'head-batch' or 'tail-batch' mode, sample consists two part.
The first part is usually the positive sample.
And the second part is the entities in the negative samples.
Because negative samples and positive samples usually share two elements
in their triple ((head, relation) or (relation, tail)).
'''
self.idx = idx
relation_list = []
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
head, tail = self.entities_select(sample[:, 0], sample[:, 2])
head = head.unsqueeze(1); tail = tail.unsqueeze(1)
relation_dict = self.select_relations(sample[:, 1])
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
head, tail = self.entities_select(head_part.view(-1), tail_part[:, 2])
head = head.view(batch_size, negative_sample_size, -1)
tail = tail.unsqueeze(1)
relation_dict = self.select_relations(tail_part[:, 1])
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
head, tail = self.entities_select(head_part[:, 0], tail_part.view(-1))
head = head.unsqueeze(1)
tail = tail.view(batch_size, negative_sample_size, -1)
relation_dict = self.select_relations(head_part[:, 1])
else:
raise ValueError('mode %s not supported' % mode)
arg_dict = {
'head': head,
**relation_dict,
'tail': tail
}
if self.model_name not in ['SpacESS']:
arg_dict['mode'] = mode
score = self.compute_score(arg_dict)
return score
def compute_score(self, arg_dict):
model_func = {
'TransE': self.TransE,
'DistMult': self.DistMult,
'ComplEx': self.ComplEx,
'RotatE': self.RotatE,
'pRotatE': self.pRotatE,
'SpacESS': self.SpacESS,
'TransComplEx': self.TransComplEx,
'QuatE': self.QuatE
}
score = model_func[self.model_name](**arg_dict)
return score
def TransE(self, head, translation, tail, mode):
if mode == 'head-batch':
score = head + (translation - tail)
else:
score = (head + translation) - tail
score = torch.norm(score, p = 1, dim = 2)
return score
#return self.gamma.item() - score
def DistMult(self, head, translation, tail, mode):
if mode == 'head-batch':
score = head * (translation * tail)
else:
score = (head * translation) * tail
score = score.sum(dim = 2)
return score
def ComplEx(self, head, translation, tail, mode):
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(translation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
score = re_head * re_score + im_head * im_score
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = re_score * re_tail + im_score * im_tail
score = score.sum(dim = 2)
return score
def RotatE(self, head, translation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
#Make phases of relations uniformly distributed in [-pi, pi]
'''phase_relation = relation/(self.embedding_range.item()/pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
'''
re_relation, im_relation = self.extract_relations(translation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim = 0)
score = score.norm(dim = 0)
score = score.sum(dim = 2)
return score
def extract_relations(self, *args):
pi = 3.14159265358979323846
split_relations = []
for relation in args:
phase_relation = relation/(self.embedding_range.item()/pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
split_relations.extend([re_relation, im_relation])
return split_relations
def TransComplEx(self, head, translation, tail, mode):
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(translation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
if mode == 'head-batch':
re_score = re_head + re_relation - re_tail
im_score = im_head + im_relation + im_tail
else:
re_score = re_head + re_relation - re_tail
im_score = im_head + im_relation + im_tail
normscore_re = torch.norm(re_score, p=1, dim=2)
normscore_im = torch.norm(im_score, p=1, dim=2)
norm_score = normscore_re + normscore_im
score = norm_score
return score
def SpacESS(self, head, rotator_head, rotator_tail, translation, tail):
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# multipliers
re_relation_head, im_relation_head, re_relation_tail, im_relation_tail = self.extract_relations(
rotator_head, rotator_tail)
re_translation, im_translation = torch.chunk(translation, 2, dim = 2)
re_score_head = re_relation_head * re_head - im_relation_head * im_head
im_score_head = re_relation_head * im_head + im_relation_head * re_head
re_score_tail = re_relation_tail * re_tail + im_relation_tail | |
default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_post_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param Tag data:
:return: Tag
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_tags_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_tags_post`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_tags_post`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/tags'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tag',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_tags_rel_fk_delete(self, id, nk, fk, **kwargs):
"""
Remove the tags relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_rel_fk_delete(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_tags_rel_fk_delete_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_tags_rel_fk_delete_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_tags_rel_fk_delete_with_http_info(self, id, nk, fk, **kwargs):
"""
Remove the tags relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_rel_fk_delete_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_tags_rel_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_tags_rel_fk_delete`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_tags_rel_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_tags_rel_fk_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/tags/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_tags_rel_fk_head(self, id, nk, fk, **kwargs):
"""
Check the existence of tags relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_rel_fk_head(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_tags_rel_fk_head_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_tags_rel_fk_head_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_tags_rel_fk_head_with_http_info(self, id, nk, fk, **kwargs):
"""
Check the existence of tags relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_rel_fk_head_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_tags_rel_fk_head" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_tags_rel_fk_head`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_tags_rel_fk_head`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_tags_rel_fk_head`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/tags/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_tags_rel_fk_put(self, id, nk, fk, **kwargs):
"""
Add a related item by id for tags.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_rel_fk_put(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:param DesignTag data:
:return: DesignTag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_tags_rel_fk_put_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_tags_rel_fk_put_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_tags_rel_fk_put_with_http_info(self, id, nk, fk, **kwargs):
"""
Add a related item by id for tags.
This method makes a synchronous HTTP request by | |
<reponame>bwalker1/spatial-constrained-clustering-and-pseudotime
# -*- coding: utf-8 -*-
import os, math, shutil
import warnings
# from python_codes.train.train import train
from python_codes.train.clustering import clustering
from python_codes.train.pseudotime import pseudotime
from python_codes.util.util import load_seqfish_mouse_data, preprocessing_data, save_preprocessed_data, load_preprocessed_data, save_features
from python_codes.train.clustering import clustering
from python_codes.train.pseudotime import pseudotime
from scipy.spatial import distance_matrix
warnings.filterwarnings("ignore")
from scipy.sparse import csr_matrix
from python_codes.util.util import *
from matplotlib.colors import to_hex
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial','Roboto']
rcParams['savefig.dpi'] = 300
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable, inset_locator
from python_codes.util.exchangeable_loom import write_exchangeable_loom
title_sz = 16
####################################
#----------Get Annotations---------#
####################################
def get_clusters(args, dataset, sample_name, method="leiden"):
original_spatial = args.spatial
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(str)
args.spatial = original_spatial
return pred_clusters
####################################
#-------------Plotting-------------#
####################################
def plt_setting():
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 30
plt.rc('font', size=MEDIUM_SIZE, weight="bold") # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def figure(nrow, ncol, rsz=3., csz=3., wspace=.4, hspace=.5, left=None, right=None):
fig, axs = plt.subplots(nrow, ncol, figsize=(ncol * csz, nrow * rsz))
plt_setting()
plt.subplots_adjust(wspace=wspace, hspace=hspace, left=left, right=right)
return fig, axs
def set_ax_for_expr_plotting(ax):
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * .9, box.height])
ax.invert_yaxis()
return ax
def plot_annotation(args, adata, nrow = 1, scale = 0.045, ncol=4, rsz=2.5, csz=2.8, wspace=.4, hspace=.5, scatter_sz=1.):
fig, axs = figure(nrow, ncol, rsz=rsz, csz=csz, wspace=wspace, hspace=hspace)
if nrow == 1:
for ax in axs:
ax.axis('off')
ax = axs[0]
x, y = adata.obsm["spatial"][:, 0]*scale, adata.obsm["spatial"][:, 1]*scale
prefix = "celltype_mapped_refined"
annotated_cell_types = adata.obs[prefix]
cell_type_strs = annotated_cell_types.cat.categories.astype(str)
cell_type_ints = annotated_cell_types.values.codes
cell_type_colors = list(adata.uns[f'{prefix}_colors'].astype(str))
# colors = np.array([cell_type_colors[item] for item in cell_type_ints])
cm = plt.get_cmap("tab20")
n_cluster = len(cell_type_colors)
for cid in range(n_cluster):
cit = cell_type_ints == cid
color = cm((cid * (n_cluster / (n_cluster - 1.0))) / n_cluster)
ax.scatter(x[cit], y[cit], s=scatter_sz, color=color, label=cell_type_strs[cid], marker=".")
ax.set_facecolor("none")
ax.set_title("Annotation", fontsize=title_sz)
xlim, ylim = None, None
ax.invert_yaxis()
return fig, axs, x, y, xlim, ylim
def plot_clustering(args, adata, sample_name, method="leiden", dataset="seqfish_mouse", cm = plt.get_cmap("tab20"), scale = .62, scatter_sz=1., nrow= 1):
original_spatial = args.spatial
fig, axs, x, y, xlim, ylim = plot_annotation(args, adata, scale=scale, nrow=nrow, ncol=3, rsz=5, csz=5.5, wspace=.3, hspace=.4)
spatials = [False, True]
for sid, spatial in enumerate(spatials):
ax = axs[sid + 1]
args.spatial = spatial
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(int)
uniq_pred = np.unique(pred_clusters)
n_cluster = len(uniq_pred)
for cid, cluster in enumerate(uniq_pred):
color = cm((cid * (n_cluster / (n_cluster - 1.0))) / n_cluster)
ind = pred_clusters == cluster
ax.scatter(x[ind], y[ind], s=scatter_sz, color=color, label=cluster, marker=".")
ax.set_facecolor("none")
title = args.arch if not spatial else "%s + SP" % args.arch
ax.set_title(title, fontsize=title_sz, pad=-30)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
# box = ax.get_position()
# height_ratio = 1.0
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height * height_ratio])
# lgnd = ax.legend(loc='center left', fontsize=8, bbox_to_anchor=(1, 0.5), scatterpoints=1, handletextpad=0.1,
# borderaxespad=.1)
# for handle in lgnd.legendHandles:
# handle._sizes = [8]
fig_fp = f"{output_dir}/{method}.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
args.spatial = original_spatial
def plot_pseudotime(args, adata, sample_name, dataset="seqfish_mouse", cm = plt.get_cmap("gist_rainbow"), scale = 0.62, scatter_sz=1.3, nrow = 1):
original_spatial = args.spatial
fig, axs, x, y, _, _ = plot_annotation(args, adata, scale=scale, nrow=nrow, ncol=3, rsz=5, csz=5.5, wspace=.3, hspace=.4)
spatials = [False, True]
for sid, spatial in enumerate(spatials):
ax = axs[sid + 1]
args.spatial = spatial
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pseudotimes = pd.read_csv(f"{output_dir}/pseudotime.tsv", header=None).values.flatten().astype(float)
st = ax.scatter(x, y, s=scatter_sz, c=pseudotimes, cmap=cm, marker=".")
ax.invert_yaxis()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
clb = fig.colorbar(st, cax=cax)
clb.ax.set_ylabel("pseudotime", labelpad=10, rotation=270, fontsize=10, weight='bold')
title = args.arch if not spatial else "%s + SP" % args.arch
ax.set_title(title, fontsize=title_sz)
ax.set_facecolor("none")
fig_fp = f"{output_dir}/psudotime.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
args.spatial = original_spatial
def plot_expr_in_ST(args, adata, genes, sample_name, dataset, scatter_sz= 1., cm = plt.get_cmap("RdPu"), n_cols = 4, max_expr_threshold=.0):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
mkdir(output_dir)
n_genes = len(genes)
n_rows = int(math.ceil(n_genes/n_cols))
fig, axs = figure(n_rows, n_cols, rsz=5.5, csz=5.2, wspace=.1, hspace=.3, left=.05, right=.95)
exprs = np.array(adata.X.todense()).astype(float)
all_genes = np.array(adata.var_names)
x, y = adata.obsm["spatial"][:, 0], adata.obsm["spatial"][:, 1]
for gid, gene in enumerate(genes):
row = gid // n_cols
col = gid % n_cols
ax = axs[row][col] if n_rows > 1 else axs[col]
expr = exprs[:, all_genes == gene]
expr = (expr - expr.mean())/expr.std()
ax = set_ax_for_expr_plotting(ax)
st = ax.scatter(x, y, s=scatter_sz, c=expr, cmap=cm, vmin=0, vmax=6)
# if gid == len(genes) - 1:
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="5%", pad=0.05)
# clb = fig.colorbar(st, cax=cax)
# clb.ax.set_ylabel("Expr.", labelpad=10, rotation=270, fontsize=10, weight='bold')
ax.set_title(gene, fontsize=28, pad=20)
fig_fp = f"{output_dir}/ST_expression.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
def plot_umap_comparison_with_coord_alpha(args, sample_name, dataset, n_neighbors=15):
methods = ["scanpy", "Seurat", "DGI", "DGI_SP"]
files = ["PCA.tsv", "seurat.PCs.tsv", "features.tsv", "features.tsv"]
nrow, ncol = 1, len(methods)
data_root = f'{args.dataset_dir}/{dataset}/{dataset}/preprocessed'
if os.path.exists(f"{data_root}/adata.h5ad"):
adata_filtered, spatial_graph = load_preprocessed_data(args, dataset, dataset)
else:
adata = load_stereo_seq_data(args)
adata_filtered, spatial_graph = preprocessing_data(args, adata)
save_preprocessed_data(args, dataset, dataset, adata_filtered, spatial_graph)
coord = adata_filtered.obsm['spatial'].astype(float)
x, y = coord[:, 0], coord[:, 1]
normed_x = (x - np.min(x))/(np.max(x) - np.min(x))
normed_y = (y - np.min(y))/(np.max(y) - np.min(y))
normed_c = np.sqrt(normed_x**2 + normed_y**2)
normed_c = (normed_c - np.min(normed_c))/(np.max(normed_c) - np.min(normed_c))
data_root = f'{args.dataset_dir}/{dataset}/{sample_name}'
fig, axs = figure(nrow, ncol, rsz=5.5, csz=6., wspace=.1, hspace=.1, left=.05, right=.95)
for ax in axs:
ax.axis('off')
for mid, method in enumerate(methods):
print(f"Processing {sample_name} {method}")
col = mid % ncol
ax = axs[col]
output_dir = f'{args.output_dir}/{dataset}/{sample_name}/{method}'
umap_positions_fp = f"{output_dir}/umap_positions.tsv"
if not os.path.exists(umap_positions_fp):
file_name = files[mid]
feature_fp = f'{output_dir}/{file_name}'
adata = sc.read_csv(feature_fp, delimiter="\t")
sc.pp.neighbors(adata, n_neighbors=n_neighbors, use_rep='X')
sc.tl.umap(adata)
umap_positions = adata.obsm["X_umap"]
np.savetxt(umap_positions_fp, umap_positions, fmt='%.5f\t%.5f', header='', footer='', comments='')
else:
umap_positions = pd.read_csv(umap_positions_fp, header=None, sep="\t").values.astype(float)
if method != "Seurat":
pred_clusters = pd.read_csv(f"{output_dir}/leiden.tsv", header=None).values.flatten().astype(int)
else:
pred_clusters = pd.read_csv(f"{output_dir}/metadata.tsv", sep="\t")["seurat_clusters"].values.flatten().astype(int)
cluster_names = list(np.unique(pred_clusters))
n_cluster = len(cluster_names)
cm = plt.get_cmap("tab20")
for cid, cluster in enumerate(cluster_names):
ind = pred_clusters == cluster
umap_sub = umap_positions[ind]
alphas = normed_c[ind]
color = to_hex(cm((cid * (n_cluster / (n_cluster - 1.0))) / n_cluster))
color_gradients = linear_gradient(color, n=6)["hex"]
n = umap_sub.shape[0]
colors = np.array([color_gradients[int(alphas[i] // 0.2) + 1] for i in range(n)])
ax.scatter(umap_sub[:, 0], umap_sub[:, 1], s=1, color=colors, label=cluster)
if mid == len(methods) - 1:
box = ax.get_position()
height_ratio = 1.0
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height * height_ratio])
ax.legend(loc='center left', fontsize='x-small', bbox_to_anchor=(1, 0.5), scatterpoints=1, handletextpad=0.05,
borderaxespad=.1)
ax.set_title(method.replace("_", " + "), fontsize=title_sz)
fig_fp = f"{output_dir}/umap_comparison-calpha.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
def plot_rank_marker_genes_group(args, dataset, sample_name, adata_filtered, method="cluster", top_n_genes=3):
original_spatial = args.spatial
args.spatial = True
pred_clusters = get_clusters(args, dataset, sample_name)
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
adata_filtered.obs[method] = pd.Categorical(pred_clusters)
sc.tl.rank_genes_groups(adata_filtered, method, method='wilcoxon')
# sc.pl.rank_genes_groups(adata_filtered, n_genes=25, ncols=5, fontsize=10, sharey=False, save=f"{sample_name}_ranks_gby_{method}.pdf")
# sc.pl.rank_genes_groups_heatmap(adata_filtered, n_genes=top_n_genes, standard_scale='var', show_gene_labels=True, save=f"{sample_name}_heatmap_gby_{method}.pdf")
sc.pl.rank_genes_groups_dotplot(adata_filtered, n_genes=top_n_genes, standard_scale='var', cmap='bwr', save=f"{sample_name}_mean_expr_gby_{method}.pdf")
# sc.pl.rank_genes_groups_dotplot(adata_filtered, n_genes=top_n_genes, values_to_plot="logfoldchanges", cmap='bwr', vmin=-4, vmax=4, min_logfoldchange=1.5, colorbar_title='log fold change', save=f"{sample_name}_dot_lfc_gby_{method}.pdf")
# sc.pl.rank_genes_groups_matrixplot(adata_filtered, n_genes=top_n_genes, values_to_plot="logfoldchanges", cmap='bwr', vmin=-4, vmax=4, min_logfoldchange=1.5, colorbar_title='log fold change', save=f"{sample_name}_matrix_lfc_gby_{method}.pdf")
# sc.pl.rank_genes_groups_matrixplot(adata_filtered, n_genes=top_n_genes, cmap='bwr', colorbar_title='Mean Expr.', save=f"{sample_name}_matrix_mean_expr_gby_{method}.pdf")
files = [
# f"rank_genes_groups_cluster{sample_name}_ranks_gby_{method}.pdf",
# f"heatmap{sample_name}_heatmap_gby_{method}.pdf",
f"dotplot_{sample_name}_mean_expr_gby_{method}.pdf"#,
# f"dotplot_{sample_name}_dot_lfc_gby_{method}.pdf",
# f"matrixplot_{sample_name}_matrix_lfc_gby_{method}.pdf",
# f"matrixplot_{sample_name}_matrix_mean_expr_gby_{method}.pdf"
]
for file in files:
src_fp = f"./figures/{file}"
target_fp = f"{output_dir}/{file}"
shutil.move(src_fp, target_fp)
args.spatial = original_spatial
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
mkdir(os.path.dirname(cluster_marker_genes_fp))
result = adata_filtered.uns['rank_genes_groups']
groups = result['names'].dtype.names
df = pd.DataFrame(
{group + '_' + key[:1]: result[key][group]
for group in groups for key in ['names', 'pvals']})
df.to_csv(cluster_marker_genes_fp, sep="\t", index=False)
def plot_pseudotime_comparison(args, adata, sample_name, dataset="seqfish_mouse", cm = plt.get_cmap("gist_rainbow"), scale = 0.045, n_neighbors=50, root_cell_type = None, cell_types=None):
methods = ["Seurat", "monocle", "slingshot", "DGI_SP"]#, "stLearn", "DGI"
files = ["seurat.PCs.tsv", None, None, "features.tsv"]#, "PCs.tsv", "features.tsv"
nrow, ncol = 1, len(methods)
coord = adata.obsm['spatial'].astype(float) * scale
x, y = coord[:, 0], coord[:, 1]
fig, axs = figure(nrow, ncol, rsz=2.4, csz=2.8, wspace=.35, hspace=.3)
for ax in axs:
ax.axis('off')
ax.invert_yaxis()
for mid, method in enumerate(methods):
print(f"Processing {sample_name} {method}")
col = mid % ncol
ax = axs[col]
output_dir = f'{args.output_dir}/{dataset}/{sample_name}/{method}'
pseudotime_fp = f"{output_dir}/pseudotime.tsv"
if not os.path.exists(pseudotime_fp):
file_name = files[mid]
feature_fp = f'{output_dir}/{file_name}'
if file_name.endswith("npz"):
obj = np.load(feature_fp)
adata = anndata.AnnData(obj.f.sedr_feat)
else:
adata = sc.read_csv(feature_fp, delimiter="\t")
sc.pp.neighbors(adata, n_neighbors=n_neighbors, use_rep='X')
sc.tl.umap(adata)
sc.tl.leiden(adata, resolution=.8)
sc.tl.paga(adata)
distances = distance_matrix(adata.X, adata.X)
sum_dists = distances.sum(axis=1)
adata.uns['iroot'] = np.argmax(sum_dists)
if root_cell_type:
descend_dist_inds = sorted(range(len(sum_dists)), key=lambda k: sum_dists[k], reverse=True)
for root_idx in descend_dist_inds:
if cell_types[root_idx] == root_cell_type:
adata.uns['iroot'] = root_idx
break
sc.tl.diffmap(adata)
sc.tl.dpt(adata)
pseudotimes = adata.obs['dpt_pseudotime'].to_numpy()
np.savetxt(pseudotime_fp, pseudotimes, fmt='%.5f', header='', footer='', comments='')
| |
#!/usr/bin/env python3
from termcolor import colored
env = dict()
"""
Set software environmental configuration, not used for now
"""
def set_env(key="", val=""):
global env
env[key] = val
def disas(pos=0, cnt=-1):
ins_list = list()
limit = len(env['data'])
while 1:
byte = hex(env['data'][pos])[2:].rjust(2, '0')
op = opcodes[byte.upper()]
size = int(op[0])
if (pos + size) > limit:
print(colored("Error: can't disassemble opcode", 'red'))
break
s = colored('0x'+hex(pos)[2:].rjust(8, '0'), 'yellow') + ':\t\t'
if int(byte, 16) != 0xff:
for i in list(range(size)):
s += hex(env['data'][pos+i])[2:].rjust(2, '0') + ' '
if size == 3:
s += ' '*4
elif size == 2:
s += ' '*(4+3)
else:
s += ' '*(4+3*2)
else :
s += colored(byte, 'red') + ' '*(11)
if 'j' in op[1].lower() or 'call' in op[1].lower() or 'ret' in op[1].lower():
s += colored(op[1], 'cyan') + ' '
if 'ret' in op[1].lower():
s += '\n'
else:
s += colored(op[1], 'green') + ' '
for i in list(range(size-1)):
s += colored(hex(env['data'][pos+i+1])[2:].rjust(2, '0'), 'green')
if len(op) == 3 and size == 1:
s += colored(op[2], 'green')
elif len(op) == 3:
s += ' (' + colored(op[2], 'green') + ')'
print(s)
ins_list.append(op + [env['data'][pos:pos+size]])
pos += size
cnt -= 1
if cnt == 0 or (cnt < 0 and 'ret' in op[1].lower()) or pos > limit:
break
return ins_list
def dump(unit=1, where=0, cnt=64):
limit = len(env['data'])
num = 0
while cnt != 0:
if num == 4 or num == 0:
print('\n' + colored('0x' + hex(where)[2:].rjust(4, '0'), 'yellow') + ': ', end="")
num = 0
diff = limit - where
if diff <= 0:
break
elif diff != 0 and diff < 3:
s = ''
for i in list(range(diff)):
x = env['data'][where+i]
if x == 0xff:
s += colored(hex(x)[2:], 'red')
else:
s += hex(x)[2:].rjust(2, '0')
break
else:
print(' ', end="")
s = ''
for i in list(range(unit)):
x = env['data'][where+i]
if x == 0xff:
s += colored(hex(x)[2:], 'red')
else:
s += hex(x)[2:].rjust(2, '0')
print(s, end="")
num += 1
where += unit
cnt -= 1
print('\n\n')
def strings(where=0, cnt=1):
limit = len(env['data'])
while cnt != 0:
s = ""
while env['data'][where] != 0:
if where >= limit:
print(s)
return
s += chr(env['data'][where])
where += 1
print(hex(where) + ': ' + s)
cnt -= 1
def finds(s=''):
s = bytes(s, 'ascii')
occur = list()
pos = env['data'].find(s)
while pos != -1:
occur.append(pos)
pos = env['data'].find(s, pos+1)
return occur
def find(seq):
seq = bytes([int(seq[i:i+2],16) for i in range(0, len(seq), 2)])
occur = list()
pos = env['data'].find(seq)
while pos != -1:
occur.append(pos)
pos = env['data'].find(seq, pos+1)
return occur
"""
may induce false positive because there is no alignment on instructions
-> will take into accounts jmp + 1 like tricks
"""
def xref(where=0):
xrefs = list()
byte1 = where >> 8
byte2 = where & 0xff
for i in list(range(len(env['data']))):
if env['data'][i] == byte1 and env['data'][i+1] == byte2:
xrefs.append(i)
# because jmp/call #addr11 is tricky we will parse all such opcodes
# not optimized but should do it for such small firmware
for pos in list(range(len(env['data']))):
byte = hex(env['data'][pos])[2:].rjust(2, '0')
op = opcodes[byte.upper()]
if 'AJMP' in op[1] or 'ACALL' in op[1]:
jmp_addr = ( ( (((pos + 2) >> 3) << 3 ) | (env['data'][pos] >> 5) ) << 8 ) | env['data'][pos+1]
if where == jmp_addr:
xrefs.append(pos)
for x in xrefs:
disas(x, 3)
print("-------------------")
return xrefs
opcodes = {
'00' : ['1', 'NOP'],
'01' : ['2', 'AJMP', 'addr11'],
'02' : ['3', 'LJMP', 'addr16'],
'03' : ['1', 'RR', 'A'],
'04' : ['1', 'INC', 'A'],
'05' : ['2', 'INC', 'direct'],
'06' : ['1', 'INC', '@R0'],
'07' : ['1', 'INC', '@R1'],
'08' : ['1', 'INC', 'R0'],
'09' : ['1', 'INC', 'R1'],
'0A' : ['1', 'INC', 'R2'],
'0B' : ['1', 'INC', 'R3'],
'0C' : ['1', 'INC', 'R4'],
'0D' : ['1', 'INC', 'R5'],
'0E' : ['1', 'INC', 'R6'],
'0F' : ['1', 'INC', 'R7'],
'10' : ['3', 'JBC', 'bit,offset'],
'11' : ['2', 'ACALL', 'addr11'],
'12' : ['3', 'LCALL', 'addr16'],
'13' : ['1', 'RRC', 'A'],
'14' : ['1', 'DEC', 'A'],
'15' : ['2', 'DEC', 'direct'],
'16' : ['1', 'DEC', '@R0'],
'17' : ['1', 'DEC', '@R1'],
'18' : ['1', 'DEC', 'R0'],
'19' : ['1', 'DEC', 'R1'],
'1A' : ['1', 'DEC', 'R2'],
'1B' : ['1', 'DEC', 'R3'],
'1C' : ['1', 'DEC', 'R4'],
'1D' : ['1', 'DEC', 'R5'],
'1E' : ['1', 'DEC', 'R6'],
'1F' : ['1', 'DEC', 'R7'],
'20' : ['3', 'JB', 'bit,offset'],
'21' : ['2', 'AJMP', 'addr11'],
'22' : ['1', 'RET'],
'23' : ['1', 'RL', 'A'],
'24' : ['2', 'ADD', 'A,#immed'],
'25' : ['2', 'ADD', 'A,direct'],
'26' : ['1', 'ADD', 'A,@R0'],
'27' : ['1', 'ADD', 'A,@R1'],
'28' : ['1', 'ADD', 'A,R0'],
'29' : ['1', 'ADD', 'A,R1'],
'2A' : ['1', 'ADD', 'A,R2'],
'2B' : ['1', 'ADD', 'A,R3'],
'2C' : ['1', 'ADD', 'A,R4'],
'2D' : ['1', 'ADD', 'A,R5'],
'2E' : ['1', 'ADD', 'A,R6'],
'2F' : ['1', 'ADD', 'A,R7'],
'30' : ['3', 'JNB', 'bit,offset'],
'31' : ['2', 'ACALL', 'addr11'],
'32' : ['1', 'RETI'],
'33' : ['1', 'RLC', 'A'],
'34' : ['2', 'ADDC', 'A,#immed'],
'35' : ['2', 'ADDC', 'A,direct'],
'36' : ['1', 'ADDC', 'A,@R0'],
'37' : ['1', 'ADDC', 'A,@R1'],
'38' : ['1', 'ADDC', 'A,R0'],
'39' : ['1', 'ADDC', 'A,R1'],
'3A' : ['1', 'ADDC', 'A,R2'],
'3B' : ['1', 'ADDC', 'A,R3'],
'3C' : ['1', 'ADDC', 'A,R4'],
'3D' : ['1', 'ADDC', 'A,R5'],
'3E' : ['1', 'ADDC', 'A,R6'],
'3F' : ['1', 'ADDC', 'A,R7'],
'40' : ['2', 'JC', 'offset'],
'41' : ['2', 'AJMP', 'addr11'],
'42' : ['2', 'ORL', 'direct,A'],
'43' : ['3', 'ORL', 'direct,#immed'],
'44' : ['2', 'ORL', 'A,#immed'],
'45' : ['2', 'ORL', 'A,direct'],
'46' : ['1', 'ORL', 'A,@R0'],
'47' : ['1', 'ORL', 'A,@R1'],
'48' : ['1', 'ORL', 'A,R0'],
'49' : ['1', 'ORL', 'A,R1'],
'4A' : ['1', 'ORL', 'A,R2'],
'4B' : ['1', 'ORL', 'A,R3'],
'4C' : ['1', 'ORL', 'A,R4'],
'4D' : ['1', 'ORL', 'A,R5'],
'4E' : ['1', 'ORL', 'A,R6'],
'4F' : ['1', 'ORL', 'A,R7'],
'50' : ['2', 'JNC', 'offset'],
'51' : ['2', 'ACALL', 'addr11'],
'52' : ['2', 'ANL', 'direct,A'],
'53' : ['3', 'ANL', 'direct,#immed'],
'54' : ['2', 'ANL', 'A,#immed'],
'55' : ['2', 'ANL', 'A,direct'],
'56' : ['1', 'ANL', 'A,@R0'],
'57' : ['1', 'ANL', 'A,@R1'],
'58' : ['1', 'ANL', 'A,R0'],
'59' : ['1', 'ANL', 'A,R1'],
'5A' : ['1', 'ANL', 'A,R2'],
'5B' : ['1', 'ANL', 'A,R3'],
'5C' : ['1', 'ANL', 'A,R4'],
'5D' : ['1', 'ANL', 'A,R5'],
'5E' : ['1', 'ANL', 'A,R6'],
'5F' : ['1', 'ANL', 'A,R7'],
'60' : ['2', 'JZ', 'offset'],
'61' : ['2', 'AJMP', 'addr11'],
'62' : ['2', 'XRL', 'direct,A'],
'63' : ['3', 'XRL', 'direct,#immed'],
'64' : ['2', 'XRL', 'A,#immed'],
'65' : ['2', 'XRL', 'A,direct'],
'66' : ['1', 'XRL', 'A,@R0'],
'67' : ['1', 'XRL', 'A,@R1'],
'68' : ['1', 'XRL', 'A,R0'],
'69' : ['1', 'XRL', 'A,R1'],
'6A' : ['1', 'XRL', 'A,R2'],
'6B' : ['1', 'XRL', 'A,R3'],
'6C' : ['1', 'XRL', 'A,R4'],
'6D' : ['1', 'XRL', 'A,R5'],
'6E' : ['1', 'XRL', 'A,R6'],
'6F' : ['1', 'XRL', 'A,R7'],
'70' : ['2', 'JNZ', 'offset'],
'71' : ['2', 'ACALL', 'addr11'],
'72' : ['2', 'ORL', 'C,bit'],
'73' : ['1', 'JMP', '@A+DPTR'],
'74' : ['2', 'MOV', 'A,#immed'],
'75' : ['3', 'MOV', 'direct,#immed'],
'76' : ['2', 'MOV', '@R0,#immed'],
'77' : ['2', 'MOV', '@R1,#immed'],
'78' : ['2', 'MOV', 'R0,#immed'],
'79' : ['2', 'MOV', 'R1,#immed'],
'7A' : ['2', 'MOV', 'R2,#immed'],
'7B' : ['2', 'MOV', 'R3,#immed'],
'7C' : ['2', 'MOV', 'R4,#immed'],
'7D' : ['2', 'MOV', 'R5,#immed'],
'7E' : ['2', 'MOV', 'R6,#immed'],
'7F' : ['2', 'MOV', 'R7,#immed'],
'80' : ['2', 'SJMP', 'offset'],
'81' : ['2', 'AJMP', 'addr11'],
'82' : ['2', 'ANL', 'C,bit'],
'83' : ['1', 'MOVC', 'A,@A+PC'],
'84' : ['1', 'DIV', 'AB'],
'85' : ['3', 'MOV', 'direct,direct'],
'86' : ['2', 'MOV', 'direct,@R0'],
'87' : ['2', 'MOV', 'direct,@R1'],
'88' : ['2', 'MOV', 'direct,R0'],
'89' : ['2', 'MOV', 'direct,R1'],
'8A' : ['2', 'MOV', 'direct,R2'],
'8B' : ['2', 'MOV', 'direct,R3'],
'8C' : ['2', 'MOV', 'direct,R4'],
'8D' : ['2', 'MOV', 'direct,R5'],
'8E' : ['2', 'MOV', 'direct,R6'],
'8F' : ['2', 'MOV', 'direct,R7'],
'90' : ['3', 'MOV', 'DPTR,#immed'],
'91' : ['2', 'ACALL', 'addr11'],
'92' : ['2', 'MOV', 'bit,C'],
'93' : ['1', 'MOVC', 'A,@A+DPTR'],
'94' : ['2', 'SUBB', 'A,#immed'],
'95' : ['2', 'SUBB', 'A,direct'],
'96' : ['1', 'SUBB', 'A,@R0'],
'97' : ['1', 'SUBB', 'A,@R1'],
'98' : ['1', 'SUBB', 'A,R0'],
'99' : ['1', 'SUBB', 'A,R1'],
'9A' : ['1', 'SUBB', 'A,R2'],
'9B' : ['1', 'SUBB', 'A,R3'],
'9C' : ['1', 'SUBB', 'A,R4'],
'9D' : ['1', 'SUBB', 'A,R5'],
'9E' : ['1', 'SUBB', 'A,R6'],
'9F' : ['1', 'SUBB', 'A,R7'],
'A0' : ['2', 'ORL', 'C,/bit'],
'A1' : ['2', 'AJMP', 'addr11'],
'A2' : ['2', 'MOV', 'C,bit'],
'A3' : ['1', 'INC', 'DPTR'],
'A4' : ['1', 'MUL', 'AB'],
'A5' : ['', ''],
'A6' : ['2', 'MOV', '@R0,direct'],
'A7' : ['2', 'MOV', '@R1,direct'],
'A8' : ['2', 'MOV', 'R0,direct'],
'A9' : ['2', 'MOV', 'R1,direct'],
'AA' : ['2', 'MOV', 'R2,direct'],
'AB' : ['2', 'MOV', 'R3,direct'],
'AC' : ['2', 'MOV', 'R4,direct'],
'AD' : ['2', 'MOV', 'R5,direct'],
'AE' : ['2', 'MOV', 'R6,direct'],
'AF' : ['2', 'MOV', 'R7,direct'],
'B0' : ['2', 'ANL', 'C,/bit'],
'B1' : ['2', 'ACALL', 'addr11'],
'B2' : ['2', 'CPL', 'bit'],
'B3' : ['1', 'CPL', 'C'],
'B4' : ['3', 'CJNE', 'A,#immed,offset'],
'B5' : ['3', 'CJNE', 'A,direct,offset'],
'B6' : ['3', 'CJNE', '@R0,#immed,offset'],
'B7' : ['3', 'CJNE', '@R1,#immed,offset'],
'B8' : ['3', 'CJNE', 'R0,#immed,offset'],
'B9' : ['3', 'CJNE', 'R1,#immed,offset'],
'BA' : ['3', 'CJNE', 'R2,#immed,offset'],
'BB' : ['3', 'CJNE', 'R3,#immed,offset'],
'BC' : ['3', 'CJNE', 'R4,#immed,offset'],
'BD' : ['3', 'CJNE', 'R5,#immed,offset'],
'BE' : ['3', 'CJNE', 'R6,#immed,offset'],
'BF' : ['3', 'CJNE', 'R7,#immed,offset'],
'C0' : ['2', 'PUSH', 'direct'],
'C1' : ['2', 'AJMP', 'addr11'],
'C2' : ['2', 'CLR', 'bit'],
'C3' : ['1', 'CLR', 'C'],
'C4' : ['1', 'SWAP', 'A'],
'C5' : ['2', 'XCH', 'A,direct'],
'C6' : ['1', 'XCH', 'A,@R0'],
'C7' : ['1', 'XCH', 'A,@R1'],
'C8' : ['1', 'XCH', 'A,R0'],
'C9' : ['1', 'XCH', 'A,R1'],
'CA' : ['1', 'XCH', 'A,R2'],
'CB' : ['1', 'XCH', 'A,R3'],
'CC' : ['1', 'XCH', 'A,R4'],
'CD' : ['1', 'XCH', 'A,R5'],
'CE' : ['1', 'XCH', 'A,R6'],
'CF' : ['1', 'XCH', 'A,R7'],
'D0' : ['2', 'POP', 'direct'],
'D1' : ['2', 'ACALL', 'addr11'],
'D2' : ['2', 'SETB', 'bit'],
'D3' : ['1', 'SETB', 'C'],
'D4' : ['1', 'DA', 'A'],
'D5' : ['3', 'DJNZ', 'direct,offset'],
'D6' : ['1', 'XCHD', 'A,@R0'],
'D7' : ['1', 'XCHD', 'A,@R1'],
'D8' : ['2', | |
h3s):
dup_sample_indices = [(h3, h1, h2) for h3 in h3s for h1 in h1s for h2 in h2s if h3==h1 or h3==h2 or h1==h2]
df = stat_df.drop(dup_sample_indices)
return df
def drop_self_comparisons(self):
df = self.drop_self_comparisons_static(self.stat_df, self.h1s, self.h2s, self.h3s)
self.stat_df_drop = df
return self.stat_df_drop
@staticmethod
def get_stat_df_static(f3s, stat_name, do_drop_self_comparisons, h1s, h2s, h3s):
df = pd.DataFrame(f3s.flatten(),
index=pd.MultiIndex.from_tuples([(h3,h1,h2) for h3 in h3s for h1 in h1s for h2 in h2s]),
columns=[stat_name])
df.index.names = ['h3','h1','h2']
if do_drop_self_comparisons:
df = F3test.drop_self_comarisons_static(df, h1s, h2s, h3s)
return df
def get_stat_df(self, stat, zscores):
return self.get_stat_df_static(stat, self.ftype, self.do_drop_self_comparisons, self.h1s, self.h2s, self.h3s)
class Dtest(Ftest):
"""
Parameters:
hs1s : list of sample names to use as h1
hs2s : list of sample names to use as h2
hs3s : list of sample names to use as h3
hs4s : list of sample names to use as h4
vcf_filename : vcf.gz filename. Should be tabix indexed
for random access.
ind_to_pop : dictionary that maps individuals to populations
if each individual is its on population this can also
be a list of individuals
reduce_dim : If true, remove dimensions with length 1 (not implemented).
reduce_dim : If true, remove dimensions with length 1 (not implemented).
jackknife_levels : 'chrom' ... weighted block-jackknife across whole chromosomes
'chumk' ... block-jackknife across chunks of chunksize snps
This can be very memory intensive.
"""
ftype = 'D'
def __init__(self, vcf_filename, ind_to_pop, h1s, h2s, h3s, h4s, **kwa):
self.h1s = h1s
self.h2s = h2s
self.h3s = h3s
self.h4s = h4s
Ftest.__init__(self, vcf_filename, ind_to_pop, **kwa)
self.calc_params = (self.ind_to_pop, self.h1s, self.h2s, self.h3s, self.h4s)
@staticmethod
def fly_reduce_fun(chunk_res, result=None):
"""
Function that reduces on the fly by summing
and also implements a counter of chunks
so that weighted jackknifing can be performed.
"""
if result is None:
return (chunk_res[0], chunk_res[1], 1)
else:
return (result[0] + chunk_res[0], result[1] + chunk_res[1], result[2] + 1)
@staticmethod
def calc_stat_static(chunk1, chunk2, ind_to_pop, h1s, h2s, h3s, h4s):
hap_df = Dtest.get_hap_df(chunk1, chunk2)
af = Dtest.get_af(hap_df, ind_to_pop)
if len(af):
return calc.d(af[h1s], af[h2s], af[h3s], af[h4s])
else:
return np.zeros((len(h1s),len(h2s),len(h3s),len(h4s))), np.zeros((len(h1s),len(h2s),len(h3s),len(h4s)))
@staticmethod
def get_calc_stat(*args):
def calc_stat(chunk1, chunk2):
return Dtest.calc_stat_static(chunk1, chunk2, *args)
return calc_stat
@staticmethod
def jackknife(res, i):
return np.sum(res[np.arange(len(res))!=i, 0], axis=0)/np.sum(res[np.arange(len(res))!=i, 1], axis=0)
@staticmethod
def get_stat(res):
d = np.sum(res[:,0], axis=0)*1./np.sum(res[:,1], axis=0)
return d
@staticmethod
def get_zscores(res, d, weights=None):
jackknife_estimates = [Dtest.jackknife(res, i) for i in np.arange(len(res))]
if weights is None:
weights = res[:,2]*1./np.sum(res[:,2])
average = np.average(jackknife_estimates, axis=0, weights=weights)
variance = np.average(1.*(jackknife_estimates - average)**2, axis=0, weights=weights).astype(float)
try:
zscores = d * 1. / ( np.sqrt(variance) * np.sqrt(len(jackknife_estimates)-1) )
except AttributeError, e:
print variance.shape
print np.sqrt(5.)
print variance.max()
print variance.min()
#print np.sqrt(variance)
return variance
raise e
return zscores
@staticmethod
def get_stat_df_static(stat, zscores, h1s, h2s, h3s, h4s, stat_name):
stat_s = pd.Series(stat.flatten(),
index=pd.MultiIndex.from_tuples([(h1,h2,h3,h4) for \
h1 in h1s for h2 in h2s for h3 in h3s for h4 in h4s]))
stat_s.name = stat_name
z_s = pd.Series(zscores.flatten(),
index=pd.MultiIndex.from_tuples([(h1,h2,h3,h4) for \
h1 in h1s for h2 in h2s for h3 in h3s for h4 in h4s]))
z_s.name = 'Z'
stat_df = pd.concat([stat_s, z_s], axis=1)
stat_df.sort_values(stat_name, ascending=False, inplace=True)
return stat_df
def get_stat_df(self,stat, zscores):
return self.get_stat_df_static(stat, zscores, self.h1s, self.h2s, self.h3s, self.h4s, self.ftype)
@staticmethod
def drop_self_comparisons_static(stat_df, h1s, h2s, h3s, h4s):
dup_sample_indices = [(h1, h2, h3, h4) for h1 in h1s for h2 in h2s for h3 in h3s for h4 in h4s \
if h3==h1 or h3==h2 or h1==h2 or h3==h4 or h1==h4 or h2==h4]
df = stat_df.drop(dup_sample_indices)
return df
def drop_self_comparisons(self):
df = self.drop_self_comparisons_static(self.stat_df, self.h1s, self.h2s, self.h3s, self.h4s)
self.stat_df_drop = df
return self.stat_df_drop
def get_consistent_with_tree(self, ete_tree):
"""
Get a data frame with the subset
of tuples that are consistent with a
given ete tree.
Parameters:
ete_tree : ete3 tree object of all samples.
Needs to be rooted and include
all outgroups..
"""
self.stat_df_consist = treetools.get_consistent_df(self.stat_df, ete_tree)
return self.stat_df_consist
class F4ratio(Dtest):
"""
Parameters:
hs1s : list of sample names to use as h1
hs2s : list of sample names to use as h2
hs3s : list of sample names to use as h3
hs4s : list of sample names to use as h4
vcf_filename : vcf.gz filename. Should be tabix indexed
for random access.
ind_to_pop : dictionary that maps individuals to populations
if each individual is its on population this can also
be a list of individuals
reduce_dim : If true, remove dimensions with length 1 (not implemented).
reduce_dim : If true, remove dimensions with length 1 (not implemented).
jackknife_levels : 'chrom' ... weighted block-jackknife across whole chromosomes
'chumk' ... block-jackknife across chunks of chunksize snps
This can be very memory intensive.
"""
ftype = 'F4ratio'
def __init__(self, vcf_filename, ind_to_pop, h1s, h2s, h3s, h4s, subsampling_method='per_chunk_replace', **kwa):
Dtest.__init__(self, vcf_filename, ind_to_pop, h1s, h2s, h3s, h4s, **kwa)
pop_to_hap = {pop:[] for pop in set(self.ind_to_pop.values())}
for s, pop in self.ind_to_pop.iteritems():
pop_to_hap[pop].append((s, 0))
pop_to_hap[pop].append((s, 1))
self.pop_to_hap = pop_to_hap
self.subsampling_method = subsampling_method
self.calc_params = (self.ind_to_pop, self.pop_to_hap, self.h1s, self.h2s,
self.h3s, self.h4s, self.subsampling_method)
@staticmethod
def get_af_hap(hap_df, hap_to_pop):
if len(hap_df):
af = hap_df.groupby(hap_to_pop, axis=1).mean()
else:
af = pd.DataFrame(columns=set(hap_to_pop.values()))
return af
@staticmethod
def calc_stat_static(chunk1, chunk2, ind_to_pop, pop_to_hap, h1s, h2s, h3s, h4s, subsampling_method):
hap_df = F4ratio.get_hap_df(chunk1, chunk2)
af = F4ratio.get_af(hap_df, ind_to_pop)
#do the random subsets for each chunk independently
if subsampling_method == 'per_chunk_noreplace' or \
subsampling_method == 'per_chunk_replace':
#r00 = os.urandom(3)
#r0 = int(r00.encode('hex'), 16)
#r1 = int(np.ceil(hap_df.sum().sum()/1111.))
#np.random.seed(int(r0*r1))
hap_to_pop_a = {}
hap_to_pop_b = {}
for h3 in h3s:
samples = pop_to_hap[h3]
sample_idx = np.arange(len(samples))
#try:
# ixa = np.random.choice(sample_idx, len(samples)/2, replace=False)
#except ValueError, e:
# raise e
ixa = np.random.choice(sample_idx, len(samples)/2, replace=False)
if subsampling_method == 'per_chunk_noreplace':
ixb = [i for i in sample_idx if i not in ixa]
else:
ixb = np.random.choice(sample_idx, len(samples)/2, replace=False)
#ixb = np.random.choice(sample_idx, len(samples)/2, replace=False)
hap_to_pop_a.update({samples[i]: h3 for i in ixa})
hap_to_pop_b.update({samples[i]: h3 for i in ixb})
af3_a = F4ratio.get_af_hap(hap_df, hap_to_pop_a)[h3s]
af3_b = F4ratio.get_af_hap(hap_df, hap_to_pop_b)[h3s]
#hap_df[samples_a].mean(axis=1)
#af3_b = hap_df[samples_b].mean(axis=1)
#af_sub = F4ratio.get_af_hap(hap_df, hap_to_pop_ab)
elif subsampling_method == 'no_subsampling':
#this is equivalent to f_hom from Martin, <NAME>
af3_a = af[h3s]
af3_b = af[h3s]
if len(af):
#here we remove all SNP sites that contain
#nans in any population. This is unfortunate,
#because it looses info (for the comparisons without nan_)
#but np.einsum cannot handle any nans.
def nn(df):
return df.notnull().all(axis=1)
nnl = nn(af[h1s])&nn(af[h2s])&nn(af[h3s])&nn(af3_a)&nn(af3_b)&nn(af[h4s])
return calc.f4ratio(af[nnl][h1s], af[nnl][h2s], af[nnl][h3s], af3_a[nnl], af3_b[nnl], af[nnl][h4s])
else:
return np.zeros((len(h1s),len(h2s),len(h3s),len(h4s))), np.zeros((len(h1s),len(h2s),len(h3s),len(h4s)))
@staticmethod
def get_calc_stat(*args):
def calc_stat(chunk1, chunk2):
return F4ratio.calc_stat_static(chunk1, chunk2, *args)
return calc_stat
class F4ratioH3derived(Dtest):
"""
An implementation of the f4ratio where
only sites are considered where h3 is derived.
"""
pass
class calc:
"""
This is a container for
the basic functions that do the
calculations.
Not to be instantiated.
ATTENTION:
All the functions that use
einsum produce nans for any product
with missing data.
Missing data should be removed beforehand.
"""
@staticmethod
def pwd(af1, af2):
"""
ATTENTION pi needs to be corrected for resampling
similar to f3!!!!!!
Calculate pairwise differences (pi and dxy).
Input can be np.ndarray or pd.DataFrame.
Rows are allele frequencies or haplotypes for variants.
Columns are individuals or populations.
Rows containing np.nan should be removed beforehand.
Result:
Diagonal entries are pi = 2p(1-p).
Off-diagonal entries are dxy = pq
"""
return np.einsum('ij,ik->jk',af1, 1-af2) + np.einsum('ij,ik->jk',1-af1, af2)
# @staticmethod
# def pwd0(af_df):
# """
# Calculate pairwise differences (pi and dxy).
#
# Input can be np.ndarray or pd.DataFrame.
# Rows are allele frequencies or haplotypes for variants.
# Columns are individuals or populations.
#
# Rows containing np.nan should be removed beforehand.
#
# Result:
# Diagonal entries are pi = 2p(1-p).
# Off-diagonal entries are dxy = pq
#
#
# """
# pw_af = np.einsum('ij,ik->jk',af_df, 1-af_df)
#
# try:
# pw_af = pd.DataFrame(pw_af, index = af_df.columns, columns = af_df.columns)
# except AttributeError:
# pass
#
# return pw_af + pw_af.T
@staticmethod
def divergence(groups):
"""
!!! careful the ordering of groups can be surprising.
use [n for n,_ in groups] to get the axis labels of result
Calculate pairwise differences (pi and dxy).
This function returns an unbiased estimate
for pi.
Rows containing np.nan should | |
<reponame>tcwilliams90/kaggle
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 7 20:15:33 2017
@author: tcwilliams
"""
# Import libraries
import os, math
import numpy as np
from random import random, randint
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
# Modeling Algorithms
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier , GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.cross_validation import cross_val_score
# Modeling Helpers
from sklearn.preprocessing import Imputer , Normalizer , scale
from sklearn.cross_validation import train_test_split , StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
# Configure visualizations
%matplotlib inline
mpl.style.use( 'ggplot' )
sns.set_style( 'white' )
pylab.rcParams[ 'figure.figsize' ] = 8 , 6
# used guidance from https://www.kaggle.com/helgejo/an-interactive-data-science-tutorial
# Helge's helper functions
def plot_histograms( df , variables , n_rows , n_cols ):
fig = plt.figure( figsize = ( 16 , 12 ) )
for i, var_name in enumerate( variables ):
ax=fig.add_subplot( n_rows , n_cols , i+1 )
df[ var_name ].hist( bins=10 , ax=ax )
ax.set_title( 'Skew: ' + str( round( float( df[ var_name ].skew() ) , ) ) ) # + ' ' + var_name ) #var_name+" Distribution")
ax.set_xticklabels( [] , visible=False )
ax.set_yticklabels( [] , visible=False )
fig.tight_layout() # Improves appearance a bit.
plt.show()
def plot_distribution( df , var , target , **kwargs ):
row = kwargs.get( 'row' , None )
col = kwargs.get( 'col' , None )
facet = sns.FacetGrid( df , hue=target , aspect=4 , row = row , col = col )
facet.map( sns.kdeplot , var , shade= True )
facet.set( xlim=( 0 , df[ var ].max() ) )
facet.add_legend()
def plot_categories( df , cat , target , **kwargs ):
row = kwargs.get( 'row' , None )
col = kwargs.get( 'col' , None )
facet = sns.FacetGrid( df , row = row , col = col )
facet.map( sns.barplot , cat , target )
facet.add_legend()
def plot_correlation_map( df ):
corr = titanic.corr()
_ , ax = plt.subplots( figsize =( 12 , 10 ) )
cmap = sns.diverging_palette( 220 , 10 , as_cmap = True )
_ = sns.heatmap(
corr,
cmap = cmap,
square=True,
cbar_kws={ 'shrink' : .9 },
ax=ax,
annot = True,
annot_kws = { 'fontsize' : 12 }
)
def describe_more( df ):
var = [] ; l = [] ; t = []
for x in df:
var.append( x )
l.append( len( pd.value_counts( df[ x ] ) ) )
t.append( df[ x ].dtypes )
levels = pd.DataFrame( { 'Variable' : var , 'Levels' : l , 'Datatype' : t } )
levels.sort_values( by = 'Levels' , inplace = True )
return levels
def plot_variable_importance( X , y ):
tree = DecisionTreeClassifier( random_state = 99 )
tree.fit( X , y )
plot_model_var_imp( tree , X , y )
def plot_model_var_imp( model , X , y ):
imp = pd.DataFrame(
model.feature_importances_ ,
columns = [ 'Importance' ] ,
index = X.columns
)
imp = imp.sort_values( [ 'Importance' ] , ascending = True )
imp[ : 10 ].plot( kind = 'barh' )
print (model.score( X , y ))
# Function to replace missing ages from Ahmed BESBES
def process_age():
global full_set
# a function that fills the missing values of the Age variable
def fillAges_TW(row, grouped_median):
return grouped_median.loc[row['Sex'], row['Pclass'], row['Title']]['Age']
full_set.loc[0:890, 'Age'] = full_set.head(891).apply(lambda r : fillAges_TW(r, grouped_median_train) if np.isnan(r['Age'])
else r['Age'], axis=1)
full_set.loc[891:, 'Age'] = full_set.iloc[891:].apply(lambda r : fillAges_TW(r, grouped_median_test) if np.isnan(r['Age'])
else r['Age'], axis=1)
def bin_age(df):
bins = (-1, 0, 6, 12, 18, 25, 35, 60, 120)
group_names = ['Unknown', 'Baby', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Middle Age', 'Senior']
categories = pd.cut(df.Age, bins, labels=group_names)
df.Age = categories
return df
def process_fare():
global full_set
# a function that fills the missing values of the Age variable
def fillFares(row, grouped_fare_median):
return grouped_fare_median.loc[row['Pclass']]['Fare']
full_set.loc[0:890, 'Fare'] = full_set.head(891).apply(lambda r : fillFares(r, grouped_fare_median_train) if np.isnan(r['Fare']) or r['Fare'] == 0
else r['Fare'], axis=1)
full_set.loc[891:, 'Fare'] = full_set.iloc[891:].apply(lambda r : fillFares(r, grouped_fare_median_test) if np.isnan(r['Fare']) or r['Fare'] == 0
else r['Fare'], axis=1)
# full_set.loc[0:890, 'Fare'].fillna(full_set.head(891).Fare.mean(),inplace=True)
# full_set.loc[891:, 'Fare'].fillna(full_set.iloc[891:].Fare.mean(),inplace=True)
def process_multi_cabin_fares():
global full_set
# a function that fills the missing values of the Age variable
def fixMultiCabinFares(row):
fare = row['Fare']
cabins = row['Cabin'].split(" ")
if len(cabins) > 1:
fare = fare / len(cabins)
return fare
full_set.loc[0:890, 'Fare'] = full_set.head(891).apply(fixMultiCabinFares, axis=1)
full_set.loc[891:, 'Fare'] = full_set.iloc[891:].apply(fixMultiCabinFares, axis=1)
def bin_fare(df):
bins = (-1, 0, 8, 15, 31, 1000)
group_names = ['Unknown', 'fare_qrt_1', 'fare_qrt_2', 'fare_qrt_3', 'fare_qrt_4']
categories = pd.cut(df.Fare, bins, labels=group_names)
df.Fare = categories
return df
def get_ticket_prefix_and_number(ticket):
split = ticket.rsplit(" ", 1)
if len(split) > 1:
tp = split[0]
tn = split[1]
else:
if str.isnumeric(split[0]):
tp = "N/A"
tn = split[0]
else: # no ticket number, probably employee
tp = split[0]
tn = 0
return [tp, tn]
def get_ticket_prefix(tkt):
tkt = tkt.replace('.', '')
tkt = tkt.replace('/', '')
tkt = tkt.split()
tkt = map(lambda t: t.strip(), tkt)
tkt = filter(lambda t: not t.isdigit(), tkt)
l_tkt = list(tkt) # filter in Python 3 returns iterator, not list as in Py 2
if len(l_tkt) > 0:
return l_tkt[0]
else:
return 'XXX'
def recover_original_data(trn_file):
orig_train = pd.read_csv(trn_file)
targets = orig_train.Survived
train = orig_train.head(891)
test = orig_train.iloc[891:]
return train, test, targets
def compute_score(cl, X, y, scoring='accuracy'):
xv = cross_val_score(cl, X, y, cv=5, scoring=scoring)
return np.mean(xv)
def dummy_age_and_fare(full_set):
age_dummies = pd.get_dummies(full_set.Age, prefix = 'Age')
full_set = pd.concat([full_set, age_dummies], axis=1)
full_set.drop('Age',axis=1,inplace=True)
fare_dummies = pd.get_dummies(full_set.Fare, prefix = 'Fare')
full_set = pd.concat([full_set, fare_dummies], axis=1)
full_set.drop('Fare',axis=1,inplace=True)
# get titanic data
skydrive_path = "C:/Users/tcwilliams/SkyDrive/Documents"
onedrive_path = "C:/Users/tcwilliams/OneDrive/Documents"
if os.path.isdir(skydrive_path):
file_path = skydrive_path + "/tcw/datasci/kaggle/titanic/"
else:
file_path = onedrive_path + "/tcw/datasci/kaggle/titanic/"
train_file = file_path + "train.csv"
test_file = file_path + "test.csv"
output_file = file_path + "results.csv"
train = pd.read_csv(train_file)
test = pd.read_csv(test_file)
titanic = train
titanic.head()
titanic.describe()
plot_correlation_map(titanic)
plot_distribution(titanic, var = 'Age', target = 'Survived', row = 'Sex')
plot_distribution(titanic, var = 'Fare', target = 'Survived', row = 'Sex')
plot_categories(titanic, cat = 'Sex', target = 'Survived')
plot_categories(titanic, cat = 'Pclass', target = 'Survived')
plot_categories(titanic, cat = 'SibSp', target = 'Survived')
plot_categories(titanic, cat = 'Parch', target = 'Survived')
# Create combined data set for feature extraction. Remove the Survived values from the training data before joining
targets = train.Survived # save the 'y' values from the training set
train.drop('Survived', 1, inplace = True)
full_set = train.append(test, ignore_index = True)
full_set.describe()
print('datasets:', 'full_set:', full_set.shape, 'titanic:', titanic.shape)
full_set['Title'] = full_set['Name'].map(lambda name: name.split( ',')[1].split('.')[0].strip())
Title_Dictionary = {
"Capt": "Official",
"Col": "Official",
"Major": "Official",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Official",
"Rev": "Official",
"the Countess":"Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
# fix Titles
full_set['Title'] = full_set.Title.map(Title_Dictionary)
# Separate ticket numbers from any prefixes
#ticket_split = pd.DataFrame()
#ticket_split = full_set['Ticket'].map(get_ticket_prefix_and_number )
#full_set['TicketPrefix'] = ticket_split.map(lambda t: str.strip(t[0]))
full_set['TicketPrefix'] = full_set['Ticket'].map(get_ticket_prefix)
tp_dummies = pd.get_dummies(full_set.TicketPrefix, prefix = 'TktPrfx')
full_set = pd.concat([full_set, tp_dummies], axis=1)
full_set.drop('TicketPrefix',axis=1,inplace=True)
#full_set['TicketNumber'] = ticket_split.map(lambda t: t[1])
# Replace missing ages per Ahmed BESBES
grouped_train = full_set.head(891).groupby(['Sex', 'Pclass', 'Title'])
grouped_median_train = grouped_train.median()
grouped_test = full_set.iloc[891:].groupby(['Sex', 'Pclass', 'Title'])
grouped_median_test= grouped_test.median()
grouped_median_train
grouped_median_test
process_age()
bin_age(full_set)
# set up age groups
# age categorization doesn't seem to add any accuracy 10/12
#full_set.loc[full_set['Age'] <= 16, 'Age'] = 0
#full_set.loc[(full_set['Age'] > 16) & (full_set['Age'] <= 32), 'Age'] = 1
#full_set.loc[(full_set['Age'] > 32) & (full_set['Age'] <= 48), 'Age'] = 2
#full_set.loc[(full_set['Age'] > 48) & (full_set['Age'] <= 64), 'Age'] = 3
#full_set.loc[full_set['Age'] > 64, 'Age'] = 4
# Replace missing fares with median using Ahmed BESBES process
grouped_fare_train = full_set.head(891).groupby(['Pclass'])
grouped_fare_median_train = grouped_fare_train.mean()
grouped_fare_test = full_set.iloc[891:].groupby(['Pclass'])
grouped_fare_median_test= grouped_fare_test.median()
grouped_fare_median_train
grouped_fare_median_test
process_fare()
bin_fare(full_set)
# Dummy variables for Age and Fare
#dummy_age_and_fare(full_set)
# Add dummy variables for Titles
title_dummies = pd.get_dummies(full_set.Title, prefix="Title")
title_dummies.head()
full_set = pd.concat([full_set, title_dummies], axis=1)
full_set.drop('Title',axis=1,inplace=True)
# map Sex to numbers
full_set['Sex'] = full_set['Sex'].map({'male': 1, 'female': 0})
#sex_dummies = pd.get_dummies(sex, prefix = 'Sex')
# replace missing Embarked values
full_set.loc[0:890, 'Embarked'].fillna('S', inplace=True)
full_set.loc[891:, 'Embarked'].fillna('S', inplace=True)
# add dummies for Embarked
embarked_dummies = pd.get_dummies(full_set.Embarked, prefix = 'Embarked')
full_set = pd.concat([full_set, embarked_dummies], axis=1)
full_set.drop('Embarked',axis=1,inplace=True)
#full_set.describe()
# Fill missing values of Fare with average (mean) fare
# Note: some fares were paid for multiple cabins. Need to adjust for this
# full_set[ 'Fare' ] = full_set.Fare.fillna(full_set.Fare.mean())
deck = pd.DataFrame()
cabin = pd.DataFrame()
# Helge's feature extraction erroneously referred to deck as 'Cabin'
full_set['Cabin'] = full_set.Cabin.fillna('U')
cabin['Cabin'] = full_set['Cabin']
deck['Deck'] = cabin['Cabin'].map(lambda c : c[0])
# process_multi_cabin_fares()
#cabin_dummies = pd.get_dummies(cabin['Cabin'], prefix = 'Cabin')
# Initial set of models to tryfull_set.drop('Cabin',axis=1,inplace=True)
deck_dummies | |
<filename>vizier/core/io/base.py
# Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition and implementation for different object stores that read and
write resources as Json objects.
"""
from abc import abstractmethod
from typing import Dict, Any, Optional, Callable, Union, List
import json
import os
import shutil
import yaml
from vizier.core.util import get_short_identifier, get_unique_identifier
"""Maximium nuber attempts to generate a unique identifier before an exception
is raised.
"""
MAX_ATTEMPS = 100
"""Elements for parameters when instantiating the default object store from a
dictionary.
"""
PARA_KEEP_DELETED = 'keepDeletedFiles'
PARA_LONG_IDENTIFIER = 'useLongIdentifier'
class ObjectStore(object):
"""Abstract object store class that defines the interface methods to read
and write objects and to maintain folders.
"""
@abstractmethod
def create_folder(self,
parent_folder: str,
identifier: Optional[str] = None
) -> str:
"""Create a new folder in the given parent folder. The folder name is
either given as the identifier argument or a new unique identifier is
created if the argument is None. Returns the identifier for the created
folder.
Parameters
----------
parent_folder: string
Path to parent folder
identifier: string, optional
Folder identifier
Returns
-------
string
"""
raise NotImplementedError()
@abstractmethod
def create_object(self,
parent_folder: str,
identifier: Optional[str] = None,
content: Union[List[Any], Dict[str,Any], None] = None
) -> str:
"""Create a new object in the given parent folder. The object path is
either given as the identifier argument or a new unique identifier is
created if the argument is None. Returns the path for the created
object.
Parameters
----------
parent_folder: string
Path to parent folder
identifier: string, optional
Folder identifier
content: list or dict, optional
Default content for the new resource
Returns
-------
string
"""
raise NotImplementedError()
@abstractmethod
def delete_folder(self,
folder_path: str,
force_delete: bool = False
) -> None:
"""Delete the folder with the given path and all of its files and
subfolders.
Parameters
----------
folder_path: string
Path to the folder that is being deleted
force_delete: bool, optional
Force deletion of the resource
"""
raise NotImplementedError()
@abstractmethod
def delete_object(self, object_path, force_delete=False):
"""Delete the object with the given path.
Parameters
----------
file_path: string
Path to the object that is being deleted
force_delete: bool, optional
Force deletion of the resource
"""
raise NotImplementedError()
@abstractmethod
def exists(self, resource_path: str) -> bool:
"""Returns True if a resource at the given path exists.
Parameters
----------
resource_path: string
Path to resource
Returns
-------
bool
"""
raise NotImplementedError()
@abstractmethod
def join(self, parent_folder: str, identifier: str) -> str:
"""Concatenate the identifier for a given folder and a folder resource.
Parameters
----------
parent_folder: string
Path to the parent folder
identifier: string
Identifier for resource in the parent folder
Returns
-------
string
"""
raise NotImplementedError()
@abstractmethod
def list_folders(self, folder_path, create=True):
"""Get a list of all subfolders in the given folder. If the folder does
not exist it is created if the create flag is True.
Parameters
----------
folder_path: string
Path to the parent folder
create: bool, optional
Flag indicating that the parent folder should be created if it does
not exist
Returns
-------
list(string)
"""
raise NotImplementedError()
@abstractmethod
def list_objects(self, folder_path: str) -> List[str]:
"""Get a list of all objects in the given folder. Returns a list of
resource names.
Parameters
----------
folder_path: string
Path to the resource folder
Returns
-------
list(string)
"""
raise NotImplementedError()
@abstractmethod
def read_object(self,
object_path: str
) -> Union[Dict[str, Any], List[Dict[str, Any]], None]:
"""Read Json document from given path.
Raises ValueError if no object with given path exists.
"""
raise NotImplementedError()
@abstractmethod
def write_object(self,
object_path: str,
content: Union[Dict[str, Any], List[Dict[str, Any]], List[str], None]
) -> None:
"""Write content as Json document to given path.
Parameters
----------
object_path: string
Path identifier for a resource object
content: dict or list
Json object or array
"""
raise NotImplementedError()
class DefaultObjectStore(ObjectStore):
"""Default implementation of the object store. If the keep_deleted_files
flag is set to True none of the delete methods will have any effect. The
flag allows to switch between scenarios where we want to keep the full
history of any resource that was ever created.
"""
def __init__(self,
properties: Optional[Dict[str, Any]] = None,
identifier_factory: Optional[Callable[[], str]] = None,
keep_deleted_files: bool = False
):
"""Initialize the identifier_factory and keep_deleted_files flag. By
default the get_unique_identifier function is used to generate new
folder and resource identifier.
Parameters
----------
properties: dict
Dictionary for object properties. Overwrites the default values.
identifier_factory: func, optional
Function to create a new unique identifier
keep_deleted_files: bool, optional
Flag indicating whether files and folder are actually deleted or not
"""
# Initialize the default values. Override them if respective properties
# are given.
self.identifier_factory = identifier_factory if identifier_factory is not None else get_unique_identifier
self.keep_deleted_files = keep_deleted_files
if properties is not None:
if PARA_KEEP_DELETED in properties:
self.keep_deleted_files = properties[PARA_KEEP_DELETED]
if PARA_LONG_IDENTIFIER in properties and not properties[PARA_LONG_IDENTIFIER]:
self.identifier_factory = get_short_identifier
def create_folder(self,
parent_folder: str,
identifier: Optional[str] = None
) -> str:
"""Create a new folder in the given parent folder. The folder name is
either given as the identifier argument or a new unique identifier is
created if the argument is None. Returns the identifier for the created
folder.
Parameters
----------
parent_folder: string
Path to parent folder
identifier: string, optional
Folder identifier
Returns
-------
string
"""
count = 0
while identifier is None:
# Allow repeated calls to the identifier factory until an identifier
# is returned that does not reference an existing folder. The max.
# attemps counter is used to avoid an endless loop.
candidate = self.identifier_factory()
if not os.path.exists(os.path.join(parent_folder, candidate)):
identifier = candidate
else:
count += 1
if count >= MAX_ATTEMPS:
raise RuntimeError('could not generate unique identifier')
# Create the new folder and return the identifier
os.makedirs(os.path.join(parent_folder, identifier))
return identifier
def create_object(self, parent_folder, identifier=None, content=None):
"""Create a new object in the given parent folder. The object path is
either given as the identifier argument or a new unique identifier is
created if the argument is None. Returns the path for the created
object.
Parameters
----------
parent_folder: string
Path to parent folder
identifier: string, optional
Folder identifier
content: list or dict, optional
Default content for the new resource
Returns
-------
string
"""
count = 0
filename = identifier
while identifier is None:
# Allow repeated calls to the identifier factory until an identifier
# is returned that does not reference an existing folder. The max.
# attemps counter is used to avoid an endless loop.
candidate = self.identifier_factory()
filename = candidate
if not os.path.exists(os.path.join(parent_folder, filename)):
identifier = candidate
else:
count += 1
if count >= MAX_ATTEMPS:
raise RuntimeError('could not generate unique identifier')
# Create an empty file
file_path = os.path.join(parent_folder, filename)
if content is not None:
self.write_object(object_path=file_path, content=content)
else:
with open(file_path, 'w'):
pass
return identifier
def delete_folder(self,
folder_path: str,
force_delete: bool = False
) -> None:
"""Delete the folder with the given path and all of its files and
subfolders.
Parameters
----------
folder_path: string
Path to the folder that is being deleted
force_delete: bool, optional
Force deletion of the resource
"""
if force_delete or not self.keep_deleted_files:
shutil.rmtree(folder_path)
def delete_object(self,
object_path: str,
force_delete: bool = False
):
"""Delete the object with the given path.
Parameters
----------
file_path: string
Path to the object that is being deleted
force_delete: bool, optional
Force deletion of the resource
"""
if force_delete or not self.keep_deleted_files:
os.remove(object_path)
def exists(self,
resource_path: str
) -> bool:
"""Returns True if a resource at the given path exists.
Parameters
----------
resource_path: string
Path to resource
Returns
-------
bool
"""
return os.path.exists(resource_path)
def | |
<filename>meta_infomax/trainers/fomaml_trainer.py
import torch
from copy import deepcopy
import logging
import torch
import torch.optim as optim
from transformers import get_linear_schedule_with_warmup, AdamW
from typing import Dict
from meta_infomax.datasets.fudan_reviews import MultiTaskDataset, MultiTaskCollator
from meta_infomax.trainers.super_trainer import BaseTrainer
from meta_infomax.datasets.utils import sample_domains
from meta_infomax.trainers.PMIScorer import PMIScorer
from torch.utils.data import Dataset, DataLoader
from random import shuffle, choice
class FOMAMLTrainer(BaseTrainer):
"""Train to classify sentiment across different domains/tasks"""
def __init__(self, config: Dict):
"""Initialize the trainer with data, models and optimizers
Parameters
---
config:
{
'exp_name': "multitask_test",
'epochs': 10,
'batch_size': 64,
'valid_freq': 50,
'save_freq': 100,
'device': 'cpu',
'data_dir': 'data/mtl-dataset/',
'transformer_name': "bert-base-uncased",
'domains': ['apparel', 'baby', 'books', 'camera_photo', 'electronics',
'health_personal_care', 'imdb', 'kitchen_housewares', 'magazines',
'music', 'software', 'sports_outdoors', 'toys_games', 'video'],
'train_domains': ['apparel', 'baby', 'books', 'camera_photo', 'health_personal_care',
'imdb', 'kitchen_housewares', 'magazines', 'sports_outdoors', 'toys_games'],
'valid_domains': ['software', 'electronics'],
'test_domains': ['music', 'video'],
}
"""
super().__init__(config)
# for now, we say that the training data, is the train split of every train domain
# we could eventually also include the test split of the train_domain
train_data = MultiTaskDataset(tokenizer=self.tokenizer, data_dir=config['data_dir'], split='train',
keep_datasets=config['train_domains'], random_state=config['random_state'],
validation_size=0, const_len=True)
val_data = MultiTaskDataset(tokenizer=self.tokenizer, data_dir=config['data_dir'], split='train',
keep_datasets=config['val_domains'], random_state=config['random_state'],
validation_size=0, const_len=True)
test_data = MultiTaskDataset(tokenizer=self.tokenizer, data_dir=config['data_dir'], split='train',
keep_datasets=config['test_domains'], random_state=config['random_state'],
validation_size=0, const_len=True)
# loaders are now dicts mapping from domains to individual loaders
### k-shot is defined per class (pos/negative), so here we multiply by 2, as we just sample the whole data
train_loader = train_data.episodic_dataloaders(batch_size=config['k_shot'],
collate_fn=train_data.collator, shuffle=True)
val_loader = val_data.episodic_dataloaders(batch_size=config['k_shot'],
collate_fn=val_data.collator, shuffle=True)
test_loader = test_data.episodic_dataloaders(batch_size=config['k_shot'],
collate_fn=test_data.collator, shuffle=True)
### concatenating pos and neg to create balanced batches
self.train_loader = self.prepare_balanced_batches(train_loader)
self.val_loader = self.prepare_balanced_batches(val_loader)
self.test_loader = self.prepare_balanced_batches(test_loader)
## concatenate pos and neg batches to create balanced batches
## define iterator for train (other use list for indexing)
self.train_loader_iterator = {domain: iter(domain_loader) for domain, domain_loader in self.train_loader.items()}
self.train_examples_per_episode = config['k_shot']*4 * config['n_domains']
self.current_episode = 0
self.ffn_opt = optim.Adam(self.model.head.parameters(), lr=self.config['meta_lr'])
self.bert_opt = AdamW(self.model.encoder.parameters(), lr=config['meta_lr'], correct_bias=False,
weight_decay=config['weight_decay']) # use transformers AdamW
self.bert_scheduler = get_linear_schedule_with_warmup(self.bert_opt,
num_warmup_steps=config['warmup_steps'],
num_training_steps=len(self.train_loader) *
config['epochs'])
def train(self):
"""Main training loop."""
assert self.config['collapse_domains'] == False, 'only implemented for collapse_domains=False'
logging.info("***** Running training - FoMAML *****")
logging.info(" Num examples = %d", len(self.train_loader))
logging.info(" Num Episodes = %d", self.config['episodes'])
logging.info(" K-shot = %d", self.config['k_shot'])
logging.info(" N-way = %d", self.config['n_domains'])
###adding lists for keeping track of the performance through training
self.train_log = {
'train_accs' : [],
'val_accs' : [],
'test_accs' : [],
}
for episode in range(self.current_episode, self.config['episodes']):
self.current_episode = episode
### break if too amny iterators exhausted
if len(self.train_loader.keys()) < self.config['n_domains']:
logging.info("Breaking training: Not enough training data remaining")
break
episode_domains = sample_domains(self.train_loader, n_samples=self.config['n_domains'],
strategy=self.config['domain_sampling_strategy'])
results = self.outer_loop(episode_domains, mode='training')
### none returned if there is no more data
if not results:
break
self.train_log['train_accs'].append(results['accuracy'])
self.writer.add_scalar('Query_Accuracy/Train', results['accuracy'], self.current_episode)
self.writer.add_scalar('Meta_Loss/Train', results['loss'], self.current_episode)
logging.info(f"EPSIODE:{episode} Query_Accuracy: {results['accuracy']:.3f} Meta_Loss: {results['loss']:.3f}")
if self.current_episode % self.config['valid_freq'] == 0:
self.train_log['val_accs'].append(self.fine_tune(mode = 'validate'))
self.train_log['test_accs'].append(self.fine_tune(mode = 'test'))
## break if number of examples exceed the threshold
if (self.config['num_examples'] != 'all' and episode * self.train_examples_per_episode > self.config['num_examples']):
logging.info("Breaking training: num examples threshold exceeded")
break
### final validation and test before finishing
self.train_log['val_accs'].append(self.fine_tune(mode = 'validate'))
self.train_log['test_accs'].append(self.fine_tune(mode = 'test'))
logging.info("Training finished with performance:")
logging.info(self.train_log)
def test(self):
self.reshuffle_test_loaders(self.config['sort_test_by_pmi'])
res = self.fine_tune(mode = 'test')
return res
def reshuffle_test_loaders(self, sort_pmi):
sorted_domains_separated = []
for test_domain, domain_loader in self.test_loader.items():
if sort_pmi:
scorer = PMIScorer(self.tokenizer, [test_domain])
sorted_ds = scorer.sort_datasets()
collator = MultiTaskCollator(self.tokenizer, const_len=True)
sorted_domains_separated.append([DataLoader(sentiment_data, batch_size=self.config['k_shot'],
collate_fn=collator, shuffle=False) for domain, sentiment_data in sorted_ds.items()])
else:
shuffle(domain_loader)
if sort_pmi:
self.test_loader = self.prepare_balanced_batches(sorted_domains_separated)
def fine_tune(self, mode):
""" Main validation loop """
if mode == 'validate':
logging.info("***** Running evaluation *****")
domains = self.config['val_domains']
episodes = range(self.config['val_episodes'])
elif mode == 'test':
logging.info("***** Running test *****")
domains = self.config['test_domains']
episodes = range(self.config['test_episodes'])
acc_across_domains = 0
loss_across_domains = 0
total_episodes = 0
for fine_tune_domain in domains:
acc_total = 0
loss_total = 0
logging.info("Fine tuning on domain: " + str(fine_tune_domain) + " num episodes: " + str(episodes))
for episode in episodes:
results = self.outer_loop([fine_tune_domain], mode=mode, episode=episode)
acc_total += results['accuracy']
loss_total += results['loss']
mean_accuracy = acc_total / (episode + 1)
mean_loss = loss_total / (episode + 1)
report = (f"[" + mode + "]\t"
f"Query_Accuracy: {mean_accuracy:.3f} "
f"Total Meta_Loss: {mean_loss:.3f}")
logging.info("Domain " + fine_tune_domain + " performance")
logging.info(report)
acc_across_domains += acc_total
loss_across_domains += loss_total
total_episodes += episode+1
### averaging over fine tune domains
acc_across_domains /= total_episodes
loss_across_domains /= total_episodes
if acc_across_domains > self.best_accuracy and mode == 'validate':
self.best_accuracy = acc_across_domains
self.save_checkpoint("unfrozen_bert:"+ str(self.config['unfreeze_layers']) + "_num_examples:" + str(self.config['num_examples']) + "_seed:" + str(self.config['seed']) + "_" + self.BEST_MODEL_FNAME)
self.writer.add_scalar('Avg_FineTune_Accuracy/' + mode, acc_across_domains, self.current_episode)
self.writer.add_scalar('Avg_FineTune_Loss/' + mode, loss_across_domains, self.current_episode)
report = (f"[" + mode + "]\t"
f"Query_Accuracy: {acc_across_domains:.3f} "
f"Total Meta_Loss: {loss_across_domains:.3f}")
logging.info("Average fine tune performance across domains")
logging.info(report)
return acc_across_domains
def outer_loop(self, domains, mode: str, episode = None):
""" Iterate over one batch """
meta_loss = 0
meta_acc = 0
if mode == 'training':
loader = self.train_loader_iterator
elif mode == 'validate':
loader = self.val_loader
elif mode == 'test':
loader = self.test_loader
domain_grads_head = []
domain_grads_bert = []
for domain in domains:
batch_iterator = loader[domain]
grads_head, grads_bert, results = self.inner_loop(batch_iterator, mode = mode, episode=episode)
## return if the train iterator is exhausted
if results == 'exhausted':
### remove domain from selectables
del loader[domain]
del self.train_loader[domain]
### select random replacement from remaining ones
remaining_domians = list(set(loader.keys()) - set(domains))
if len(remaining_domians) == 0:
logging.info("No more populated domains remain, breaking train")
return
logging.info("domain " + domain + " exhausted, appending new one")
new_dom = choice(remaining_domians)
domains.append(new_dom)
continue
domain_grads_head.append(grads_head)
domain_grads_bert.append(grads_bert)
meta_loss += results["loss"]
meta_acc += results["accuracy"]
if mode != 'training':
### reshuffling list, so next fine tuning is random
shuffle(loader[domain])
### updating main parameters - head
if mode == "training":
## summing domain grads
sum_grads_head = domain_grads_head[0]
for grad_ind in range(1, len(domain_grads_head)):
for layer_ind in range(len(domain_grads_head[grad_ind])):
sum_grads_head[layer_ind] += domain_grads_head[grad_ind][layer_ind]
bert_grad_keys = domain_grads_bert[0].keys()
sum_grads_bert = domain_grads_bert[0]
for grad_ind in range(1, len(domain_grads_bert)):
for layer_key in bert_grad_keys:
sum_grads_bert[layer_key] += domain_grads_bert[grad_ind][layer_key]
### putting grads into the parameters
self.model.update_head_grads(sum_grads_head)
self.model.update_bert_grads(sum_grads_bert)
## calling the update
self.ffn_opt.step()
self.bert_opt.step()
self.bert_scheduler.step()
self.ffn_opt.zero_grad()
self.bert_opt.zero_grad()
meta_results = {"loss": meta_loss, "accuracy": meta_acc / len(domains)}
return meta_results
def inner_loop(self, batch_iterator, mode = 'training', episode = None):
# episode number of only used in val/test, to select batches one by one
if mode == 'training':
### checking if the iterator is exhausted
try:
support_batch = next(batch_iterator)
query_batch = [next(batch_iterator)]
query_chunks = 1
except StopIteration:
print("dataset was exhausted, returning")
return None, None, 'exhausted'
else:
### for test/valid, we draw a batch in each episode and test on all the rest
support_batch = batch_iterator[episode]
query_chunks = self.config['valid_chunks']
query_batch = self.concatenate_remaining_batches_and_chunk(batch_iterator,episode, query_chunks)
##rewriting with actual number of chunks
query_chunks = len(query_batch)
support_x, support_masks, support_labels, support_domains = support_batch['x'], support_batch['masks'], support_batch[
'labels'], support_batch['domains']
support_x = support_x.to(self.config['device'])
support_masks = support_masks.to(self.config['device'])
support_labels = support_labels.to(self.config['device'])
### initial update based on the net's weights
##self.bert_opt.zero_grad()
fast_weight_net = deepcopy(self.model)
self.ffn_opt_inner = optim.Adam(fast_weight_net.parameters(), lr=self.config['fast_weight_lr'])
for grad_step in range(0, self.config['inner_gd_steps'] - 1):
output = fast_weight_net(x=support_x, masks=support_masks, labels=support_labels, domains=support_domains)
logits = output['logits']
loss = output['loss']
loss.backward()
torch.nn.utils.clip_grad_norm_(fast_weight_net.parameters(), self.config['clip_grad_norm'])
self.ffn_opt_inner.step()
self.ffn_opt_inner.zero_grad()
### FOMAML - we'll use last step's gradients for update
###looping through the query chunks
loss = 0
query_acc = 0
for chunkInd in range(query_chunks):
##no grad calc if validation/test
if mode != 'training':
torch.set_grad_enabled(False)
query_x, query_masks, query_labels, query_domains = query_batch[chunkInd]['x'], query_batch[chunkInd]['masks'], query_batch[chunkInd]['labels'], \
query_batch[chunkInd]['domains']
query_x = query_x.to(self.config['device'])
query_masks = query_masks.to(self.config['device'])
query_labels = query_labels.to(self.config['device'])
output = fast_weight_net(x=query_x, masks=query_masks, labels=query_labels,
domains=query_domains) # domains is ignored for now
logits = output['logits']
if mode == "training":
loss += output['loss']
else:
#### not saving comp graph if not training
loss += output['loss'].item()
query_acc += output['acc']
query_acc /= query_chunks
if mode == "training":
loss.backward()
torch.nn.utils.clip_grad_norm_(fast_weight_net.parameters(), self.config['clip_grad_norm'])
grad_head = fast_weight_net.get_head_grads()
grad_bert = fast_weight_net.get_bert_grads()
loss = loss.item()
elif mode == "validate" or mode == "test":
grad_head, grad_bert = None, None
results = {'accuracy': query_acc, 'loss': loss}
##resetting require grad
torch.set_grad_enabled(True)
return grad_head, grad_bert, results
def concatenate_remaining_batches_and_chunk(self, iterator, index, num_chunks):
x = torch.chunk(torch.cat([batch["x"] for batch in iterator[0:index] + iterator[index+1:]]), num_chunks)
masks = torch.chunk(torch.cat([batch["masks"] for batch in iterator[0:index] + iterator[index+1:]]), num_chunks)
labels = torch.chunk(torch.cat([batch["labels"] for batch in iterator[0:index]+ iterator[index+1:]]), num_chunks)
### domains not used in the script
domains = None
batches | |
('ipv4', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The IPv4 address used in the URL (e.g., http://1.2.3.4/page.html).'}),
('fqdn', {'ptype': 'inet:fqdn', 'ro': 1,
'doc': 'The fqdn used in the URL (e.g., http://www.woot.com/page.html).'}),
('port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The port of the URL. URLs prefixed with http will be set to port 80 and '
'URLs prefixed with https will be set to port 443 unless otherwise specified.'}),
('user', {'ptype': 'inet:user', 'ro': 1,
'doc': 'The optional username used to access the URL.'}),
('passwd', {'ptype': 'inet:passwd', 'ro': 1,
'doc': 'The optional password used to access the URL.'}),
]),
('inet:urlredir', {}, [
('src', {'ptype': 'inet:url', 'ro': 1, 'req': 1,
'doc': 'The original/source URL before redirect'}),
('src:fqdn', {'ptype': 'inet:fqdn',
'doc': 'The FQDN within the src URL (if present)'}),
('dst', {'ptype': 'inet:url', 'ro': 1, 'req': 1,
'doc': 'The redirected/destination URL'}),
('dst:fqdn', {'ptype': 'inet:fqdn',
'doc': 'The FQDN within the dst URL (if present)'}),
('seen:min', {'ptype': 'time:min',
'doc': 'The earliest known time the URL redirect was active.'}),
('seen:max', {'ptype': 'time:max',
'doc': 'The last known time the URL redirect was active.'}),
]),
('inet:urlfile', {'ptype': 'inet:urlfile'}, [
('url', {'ptype': 'inet:url', 'ro': 1, 'req': 1,
'doc': 'The URL where the file was hosted.'}),
('file', {'ptype': 'file:bytes', 'ro': 1, 'req': 1,
'doc': 'The file that was hosted at the URL.'}),
('seen:min', {'ptype': 'time:min',
'doc': 'The earliest known time the file was hosted at the URL.'}),
('seen:max', {'ptype': 'time:max',
'doc': 'The most recent known time the file was hosted at the URL.'}),
]),
('inet:asn', {'ptype': 'inet:asn'}, (
('name', {'ptype': 'str:lwr', 'defval': '??',
'doc': 'The name of the organization currently responsible for the ASN.'}),
('owner', {'ptype': 'ou:org',
'doc': 'The guid of the organization currently responsible for the ASN.'}),
)),
('inet:asnet4', {'ptype': 'inet:asnet4'}, [
('asn', {'ptype': 'inet:asn', 'ro': 1,
'doc': 'The Autonomous System Number (ASN) of the netblock.'}),
('net4', {'ptype': 'inet:net4', 'ro': 1,
'doc': 'The IPv4 address range assigned to the ASN.'}),
('net4:min', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The first IPv4 in the range assigned to the ASN.'}),
('net4:max', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The last IPv4 in the range assigned to the ASN.'}),
]),
('inet:user', {'ptype': 'inet:user'}, []),
('inet:group', {'ptype': 'inet:group'}, []),
('inet:passwd', {'ptype': 'inet:passwd'}, [
('md5', {'ptype': 'hash:md5', 'ro': 1,
'doc': 'The computed MD5 hash of the password.'}),
('sha1', {'ptype': 'hash:sha1', 'ro': 1,
'doc': 'The computed SHA1 hash of the password.'}),
('sha256', {'ptype': 'hash:sha256', 'ro': 1,
'doc': 'The computed SHA256 hash of the password.'}),
]),
('inet:mac', {'ptype': 'inet:mac'}, [
('vendor', {'ptype': 'str', 'defval': '??',
'doc': 'The vendor associated with the 24-bit prefix of a MAC address.'}),
]),
('inet:fqdn', {'ptype': 'inet:fqdn'}, [
('sfx', {'ptype': 'bool', 'defval': 0,
'doc': 'Set to 1 if the fqdn is considered a "suffix".'}),
('zone', {'ptype': 'bool', 'defval': 0,
'doc': 'Set to 1 if the fqdn is a logical zone (under a suffix).'}),
('domain', {'ptype': 'inet:fqdn', 'ro': 1,
'doc': 'The parent fqdn of the fqdn.'}),
('host', {'ptype': 'str', 'ro': 1,
'doc': 'The host portion of the fqdn.'}),
('created', {'ptype': 'time:min',
'doc': 'The earliest known registration (creation) date for the fqdn.'}),
('updated', {'ptype': 'time:max',
'doc': 'The last known updated date for the fqdn.'}),
('expires', {'ptype': 'time:max',
'doc': 'The current expiration date for the fqdn.'}),
]),
('inet:email', {'ptype': 'inet:email'}, [
('fqdn', {'ptype': 'inet:fqdn', 'ro': 1,
'doc': 'The domain of the email address.'}),
('user', {'ptype': 'inet:user', 'ro': 1,
'doc': 'The username of the email address.'}),
]),
('inet:tcp4', {'ptype': 'inet:srv4'}, [
('ipv4', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The IPv4 address of the TCP server.'}),
('port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The port of the IPv4 TCP server.'}),
]),
('inet:udp4', {'ptype': 'inet:srv4'}, [
('ipv4', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The IPv4 address of the UDP server.'}),
('port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The port of the IPv4 UDP server.'}),
]),
('inet:tcp6', {'ptype': 'inet:srv6'}, [
('ipv6', {'ptype': 'inet:ipv6', 'ro': 1,
'doc': 'The IPv6 address of the TCP server.'}),
('port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The port of the IPv6 TCP server.'}),
]),
('inet:udp6', {'ptype': 'inet:srv6'}, [
('ipv6', {'ptype': 'inet:ipv6', 'ro': 1,
'doc': 'The IPv6 address of the UDP server.'}),
('port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The port of the IPv6 UDP server.'}),
]),
('inet:flow', {}, (
('time', {'ptype': 'time',
'doc': 'The time the network connection was initiated.'}),
('duration', {'ptype': 'int',
'doc': 'The duration of the flow in seconds.'}),
('dst:host', {'ptype': 'it:host',
'doc': 'The guid of the destination host.'}),
('dst:proc', {'ptype': 'it:exec:proc',
'doc': 'The guid of the destination process.'}),
('dst:exe', {'ptype': 'file:bytes',
'doc': 'The file (executable) that received the connection.'}),
('dst:txbytes', {'ptype': 'int',
'doc': 'The number of bytes sent by the destination host / process / file.'}),
('dst:tcp4', {'ptype': 'inet:tcp4',
'doc': 'The destination IPv4 address / port for an IPv4 TCP connection.'}),
('dst:tcp4:ipv4', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The destination IPv4 address.'}),
('dst:tcp4:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The destination IPv4 port.'}),
('dst:udp4', {'ptype': 'inet:udp4',
'doc': 'The destination IPv4 address / port for an IPv4 UDP connection.'}),
('dst:udp4:ipv4', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The destination IPv4 address.'}),
('dst:udp4:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The destination IPv4 port.'}),
('dst:tcp6', {'ptype': 'inet:tcp6',
'doc': 'The destination IPv6 address / port for an IPv6 TCP connection.'}),
('dst:tcp6:ipv6', {'ptype': 'inet:ipv6', 'ro': 1,
'doc': 'The destination IPv6 address.'}),
('dst:tcp6:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The destination IPv6 port.'}),
('dst:udp6', {'ptype': 'inet:udp6',
'doc': 'The destination IPv6 address / port for an IPv6 UDP connection.'}),
('dst:udp6:ipv6', {'ptype': 'inet:ipv6', 'ro': 1,
'doc': 'The destination IPv6 address.'}),
('dst:udp6:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The destination IPv6 port.'}),
('src:host', {'ptype': 'it:host',
'doc': 'The guid of the source host.'}),
('src:proc', {'ptype': 'it:exec:proc',
'doc': 'The guid of the source process.'}),
('src:exe', {'ptype': 'file:bytes',
'doc': 'The file (executable) that created the connection.'}),
('src:txbytes', {'ptype': 'int',
'doc': 'The number of bytes sent by the source host / process / file.'}),
('src:tcp4', {'ptype': 'inet:tcp4',
'doc': 'The source IPv4 address / port for an IPv4 TCP connection.'}),
('src:tcp4:ipv4', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The source IPv4 address.'}),
('src:tcp4:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The source IPv4 port.'}),
('src:udp4', {'ptype': 'inet:udp4',
'doc': 'The source IPv4 address / port for an IPv4 UDP connection.'}),
('src:udp4:ipv4', {'ptype': 'inet:ipv4', 'ro': 1,
'doc': 'The source IPv4 address.'}),
('src:udp4:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The source IPv4 port.'}),
('src:tcp6', {'ptype': 'inet:tcp6',
'doc': 'The source IPv6 address / port for an IPv6 TCP connection.'}),
('src:tcp6:ipv6', {'ptype': 'inet:ipv6', 'ro': 1,
'doc': 'The source IPv6 address.'}),
('src:tcp6:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The source IPv6 port.'}),
('src:udp6', {'ptype': 'inet:udp6',
'doc': 'The source IPv6 address / port for an IPv6 UDP connection.'}),
('src:udp6:ipv6', {'ptype': 'inet:ipv6', 'ro': 1,
'doc': 'The source IPv6 address.'}),
('src:udp6:port', {'ptype': 'inet:port', 'ro': 1,
'doc': 'The source IPv6 port.'}),
('from', {'ptype': 'guid',
'doc': 'The ingest source file/iden. Used for reparsing.'}),
)),
('inet:iface', {}, (
('latlong', {'ptype': 'geo:latlong',
'doc': 'The last known latitude/longitude for the node'}),
('host', {'ptype': 'it:host',
'doc': 'The guid of the host the interface is associated with.'}),
('type', {'ptype': 'str:lwr',
'doc': 'The free-form interface type'}),
('mac', {'ptype': 'inet:mac',
'doc': 'The ethernet (MAC) address of the interface.'}),
('ipv4', {'ptype': 'inet:ipv4',
'doc': 'The IPv4 address of the interface.'}),
('ipv6', {'ptype': 'inet:ipv6',
'doc': 'The IPv6 address of the interface.'}),
('phone', {'ptype': 'tel:phone',
'doc': 'The telephone number of the interface.'}),
('wifi:ssid', {'ptype': 'inet:wifi:ssid',
'doc': 'The wifi SSID of the interface.'}),
('wifi:bssid', {'ptype': 'inet:mac',
'doc': 'The wifi BSSID of the interface.'}),
('mob:imei', {'ptype': 'tel:mob:imei',
'doc': 'The IMEI of the interface.'}),
('mob:imsi', {'ptype': 'tel:mob:imsi',
'doc': 'The IMSI of the interface.'}),
)),
('inet:wifi:ap', {}, [
('ssid', {'ptype': 'inet:wifi:ssid',
'doc': 'The SSID for the wireless access point.'}),
('bssid', {'ptype': 'inet:wifi:ssid',
'doc': 'The SSID for the wireless access point.'}),
]),
('inet:wifi:ssid', {}, []),
('inet:web:acct', {'ptype': 'inet:web:acct'}, [
('site', {'ptype': 'inet:fqdn', 'ro': 1,
'doc': 'The site or service associated with the account.'}),
('user', {'ptype': 'inet:user', 'ro': 1,
'doc': 'The unique identifier for the account (may be different from the common '
'name or display name).'}),
('url', {'ptype': 'inet:url',
'doc': 'The service provider URL where the account is hosted.'}),
('name', {'ptype': | |
<reponame>Webbah/sec-for-reinforcement-learning
import platform
import time
import gym_electric_motor as gem
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch as th
from stable_baselines3 import DDPG
# imports net to define reward and executes script to register experiment
from stable_baselines3.common.noise import OrnsteinUhlenbeckActionNoise
from GEM.env.env_wrapper_GEM import FeatureWrapper, FeatureWrapper_pastVals, BaseWrapper
# from experiments.GEM.env.GEM_env import AppendLastActionWrapper
from GEM.util.config import cfg
from GEM.util.recorder_GEM import Recorder
from gym.wrappers import FlattenObservation
import gym_electric_motor as gem
from gym_electric_motor.reference_generators import MultipleReferenceGenerator, ConstReferenceGenerator, \
WienerProcessReferenceGenerator
from gym_electric_motor.visualization import MotorDashboard
from gym_electric_motor.visualization.motor_dashboard_plots import MeanEpisodeRewardPlot
from gym_electric_motor.physical_systems.mechanical_loads import ConstantSpeedLoad
from gym.core import Wrapper
from gym.spaces import Box, Tuple
from gym_electric_motor.constraints import SquaredConstraint
test_length = 10000
folder_name = cfg['STUDY_NAME']
node = platform.uname().node
# mongo_recorder = Recorder(database_name=folder_name)
mongo_recorder = Recorder(node=node,
database_name=folder_name) # store to port 12001 for ssh data to cyberdyne or locally as json to cfg[meas_data_folder]
Ki_ddpg_combi = 182
class AppendLastActionWrapper(Wrapper):
"""
The following environment considers the dead time in the real-world motor control systems.
The real-world system changes its state, while the agent simultaneously calculates the next action based on a
previously measured observation.
Therefore, for the agents it seems as if the applied action affects the environment with one step delay
(with a dead time of one time step).
As a measure of feature engineering we append the last selected action to the observation of each time step,
because this action will be the one that is active while the agent has to make the next decision.
"""
def __init__(self, environment):
super().__init__(environment)
# append the action space dimensions to the observation space dimensions
self.observation_space = Tuple((Box(
np.concatenate((environment.observation_space[0].low, environment.action_space.low)),
np.concatenate((environment.observation_space[0].high, environment.action_space.high))
), environment.observation_space[1]))
self.v_d_mess = []
self.v_q_mess = []
def step(self, action):
(state, ref), rew, term, info = self.env.step(action)
self.v_d_mess.append(np.float64(state[2]))
self.v_q_mess.append(np.float64(state[3]))
state = np.delete(state, [2, 3])
# extend the output state by the selected action
# state = np.concatenate((state, action))
return (state, ref), rew, term, info
def reset(self, **kwargs):
# extend the output state by zeros after reset
# no action can be appended yet, but the dimension must fit
# state = np.concatenate((state, np.zeros(self.env.action_space.shape)))
self.v_d_mess = []
self.v_q_mess = []
# set random reference values
self.env.reference_generator._sub_generators[0]._reference_value = np.random.uniform(-1, 0)
self.env.reference_generator._sub_generators[1]._reference_value = np.random.uniform(-1, 1)
state, ref = self.env.reset()
self.v_d_mess.append(np.float64(state[2]))
self.v_q_mess.append(np.float64(state[3]))
state = np.delete(state, [2, 3]) # remove vdq from state
return state, ref
class AppendLastActionWrapper_testsetting(AppendLastActionWrapper):
def __init__(self, environment, new_ref_d, new_ref_q, ref_change):
"""
new_ref_d/q mus be list of length test_steps/1000!
"""
super().__init__(environment)
self.step_number = 0
self.ref_count = 0
self.new_ref_d = new_ref_d
self.new_ref_q = new_ref_q
self.ref_change = ref_change
def step(self, action):
self.step_number += 1
if self.step_number % self.ref_change == 0:
self.ref_count += 1
self.env.reference_generator._sub_generators[0]._reference_value = self.new_ref_d[
self.ref_count] # np.random.uniform(-1, 0)
self.env.reference_generator._sub_generators[1]._reference_value = self.new_ref_q[
self.ref_count] # np.random.uniform(-1, 1)
(state, ref), rew, term, info = super().step(action)
return (state, ref), rew, term, info
def reset(self, **kwargs):
self.v_d_mess = []
self.v_q_mess = []
self.env.reference_generator._sub_generators[0]._reference_value = self.new_ref_d[
self.ref_count] # np.random.uniform(-1, 0)
self.env.reference_generator._sub_generators[1]._reference_value = self.new_ref_q[
self.ref_count] # np.random.uniform(-1, 1)
state, ref = self.env.reset()
self.v_d_mess.append(np.float64(state[2]))
self.v_q_mess.append(np.float64(state[3]))
state = np.delete(state, [2, 3]) # remove vdq from state
return state, ref
def experiment_fit_DDPG(learning_rate, gamma, use_gamma_in_rew, weight_scale, bias_scale, alpha_relu_actor,
batch_size,
actor_hidden_size, actor_number_layers, critic_hidden_size, critic_number_layers,
alpha_relu_critic,
noise_var, noise_theta, noise_var_min, noise_steps_annealing, error_exponent,
training_episode_length, buffer_size, # learning_starts,
tau, number_learning_steps, integrator_weight, antiwindup_weight,
penalty_I_weight, penalty_P_weight,
train_freq_type, train_freq, t_start_penalty_I, t_start_penalty_P, optimizer, n_trail,
number_past_vals=0):
if node not in cfg['lea_vpn_nodes']:
# assume we are on pc2
log_path = f'/scratch/hpc-prf-reinfl/weber/OMG/{folder_name}/{n_trail}/'
else:
log_path = f'{folder_name}/{n_trail}/'
####################################################################################################################
# GEM
# Define reference generators for both currents of the flux oriented dq frame
# d current reference is chosen to be constantly at zero to simplify this showcase scenario
d_generator = ConstReferenceGenerator('i_sd', 0)
# q current changes dynamically
q_generator = ConstReferenceGenerator('i_sq', 0)
# The MultipleReferenceGenerator allows to apply these references simultaneously
rg = MultipleReferenceGenerator([d_generator, q_generator])
# Set the electric parameters of the motor
motor_parameter = dict(
r_s=15e-3, l_d=0.37e-3, l_q=1.2e-3, psi_p=65.6e-3, p=3, j_rotor=0.06
)
# Change the motor operational limits (important when limit violations can terminate and reset the environment)
limit_values = dict(
i=160 * 1.41,
omega=12000 * np.pi / 30,
u=450
)
# Change the motor nominal values
nominal_values = {key: 0.7 * limit for key, limit in limit_values.items()}
# Create the environment
env_row = gem.make(
# Choose the permanent magnet synchronous motor with continuous-control-set
'DqCont-CC-PMSM-v0',
# Pass a class with extra parameters
visualization=MotorDashboard(
state_plots=['i_sq', 'i_sd'],
action_plots='all',
reward_plot=True,
additional_plots=[MeanEpisodeRewardPlot()]
),
# Set the mechanical load to have constant speed
load=ConstantSpeedLoad(omega_fixed=1000 * np.pi / 30),
# Define which numerical solver is to be used for the simulation
ode_solver='scipy.solve_ivp',
# Pass the previously defined reference generator
reference_generator=rg,
reward_function=dict(
# Set weighting of different addends of the reward function
reward_weights={'i_sq': 1, 'i_sd': 1},
# Exponent of the reward function
# Here we use a square root function
reward_power=0.5,
),
# Define which state variables are to be monitored concerning limit violations
# Here, only overcurrent will lead to termination
constraints=(),
# Consider converter dead time within the simulation
# This means that a given action will show effect only with one step delay
# This is realistic behavior of drive applications
converter=dict(
dead_time=True,
),
# Set the DC-link supply voltage
supply=dict(
u_nominal=400
),
motor=dict(
# Pass the previously defined motor parameters
motor_parameter=motor_parameter,
# Pass the updated motor limits and nominal values
limit_values=limit_values,
nominal_values=nominal_values,
),
# Define which states will be shown in the state observation (what we can "measure")
state_filter=['i_sd', 'i_sq', 'u_sd', 'u_sq'], # , 'epsilon'],
)
# Now we apply the wrapper defined at the beginning of this script
env_train = AppendLastActionWrapper(env_row)
# We flatten the observation (append the reference vector to the state vector such that
# the environment will output just a single vector with both information)
# This is necessary for compatibility with kerasRL2
env_train = FlattenObservation(env_train)
####################################################################################################################
if cfg['env_wrapper'] == 'past':
env = FeatureWrapper_pastVals(env_train, number_of_features=4 + number_past_vals * 2,
training_episode_length=training_episode_length,
recorder=mongo_recorder, n_trail=n_trail, integrator_weight=integrator_weight,
antiwindup_weight=antiwindup_weight, gamma=gamma,
penalty_I_weight=penalty_I_weight, penalty_P_weight=penalty_P_weight,
t_start_penalty_I=t_start_penalty_I, t_start_penalty_P=t_start_penalty_P,
number_learing_steps=number_learning_steps, number_past_vals=number_past_vals)
elif cfg['env_wrapper'] == 'no-I-term':
env = BaseWrapper(env_train, number_of_features=2 + number_past_vals * 2,
training_episode_length=training_episode_length,
recorder=mongo_recorder, n_trail=n_trail, gamma=gamma,
number_learing_steps=number_learning_steps, number_past_vals=number_past_vals)
else:
env = FeatureWrapper(env_train, number_of_features=11, training_episode_length=training_episode_length,
recorder=mongo_recorder, n_trail=n_trail, integrator_weight=integrator_weight,
antiwindup_weight=antiwindup_weight, gamma=gamma,
penalty_I_weight=penalty_I_weight, penalty_P_weight=penalty_P_weight,
t_start_penalty_I=t_start_penalty_I, t_start_penalty_P=t_start_penalty_P,
number_learing_steps=number_learning_steps) # , use_past_vals=True, number_past_vals=30)
if cfg['env_wrapper'] not in ['no-I-term', 'I-controller']:
env.action_space = gym.spaces.Box(low=np.full(4, -1), high=np.full(4, 1))
n_actions = env.action_space.shape[-1]
noise_var = noise_var # 20#0.2
noise_theta = noise_theta # 50 # stiffness of OU
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), theta=noise_theta * np.ones(n_actions),
sigma=noise_var * np.ones(n_actions), dt=1e-4)
print(optimizer)
if optimizer == 'SGD':
used_optimzer = th.optim.SGD
elif optimizer == 'RMSprop':
used_optimzer = th.optim.RMSprop
# elif optimizer == 'LBFGS':
# needs in step additional argument
# used_optimzer = th.optim.LBFGS
else:
used_optimzer = th.optim.Adam
policy_kwargs = dict(activation_fn=th.nn.LeakyReLU, net_arch=dict(pi=[actor_hidden_size] * actor_number_layers
, qf=[critic_hidden_size] * critic_number_layers),
optimizer_class=used_optimzer)
model = DDPG('MlpPolicy', env, verbose=1, tensorboard_log=log_path,
# model = myDDPG('MlpPolicy', env, verbose=1, tensorboard_log=f'{folder_name}/{n_trail}/',
policy_kwargs=policy_kwargs,
learning_rate=learning_rate, buffer_size=buffer_size,
# learning_starts=int(learning_starts * training_episode_length),
batch_size=batch_size, tau=tau, gamma=gamma, action_noise=action_noise,
train_freq=(train_freq, train_freq_type), gradient_steps=- 1,
optimize_memory_usage=False,
create_eval_env=False, seed=None, device='auto', _init_setup_model=True)
# Adjust network -> maybe change to Costume net like https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html
# adn scale weights and biases
count = 0
for kk in range(actor_number_layers + 1):
model.actor.mu._modules[str(count)].weight.data = model.actor.mu._modules[str(count)].weight.data * weight_scale
model.actor_target.mu._modules[str(count)].weight.data = model.actor_target.mu._modules[
str(count)].weight.data * weight_scale
model.actor.mu._modules[str(count)].bias.data = model.actor.mu._modules[str(count)].bias.data * bias_scale
model.actor_target.mu._modules[str(count)].bias.data = model.actor.mu._modules[
str(count)].bias.data * bias_scale
if kk < actor_number_layers:
model.actor.mu._modules[str(count + 1)].negative_slope = alpha_relu_actor
model.actor_target.mu._modules[str(count + 1)].negative_slope = alpha_relu_actor
count = count + 2
count = 0
for kk in range(critic_number_layers + 1):
if kk < critic_number_layers:
model.critic.qf0._modules[str(count + 1)].negative_slope = alpha_relu_critic
model.critic_target.qf0._modules[str(count + 1)].negative_slope = alpha_relu_critic
count = count + 2
if cfg['env_wrapper'] not in ['no-I-term', 'I-controller']:
env.action_space = gym.spaces.Box(low=np.full(2, -1), high=np.full(2, 1))
# start training
model.learn(total_timesteps=number_learning_steps)
# Log Train-info data
train_data = {"Name": "After_Training",
"Mean_eps_reward": env.reward_episode_mean,
"Trial number": n_trail,
"Database name": folder_name,
"Sum_eps_reward": env.get_episode_rewards()
}
mongo_recorder.save_to_json('Trial_number_' + n_trail, train_data)
model.save(log_path + f'model.zip')
####### Run Test #########
return_sum = 0.0
limit_exceeded_in_test = False
limit_exceeded_penalty = 0
# Refs created with https://github.com/max-schenke/DESSCA
i_d_refs = [-0.5718831392706399, -0.11155989917458595, -0.8444233463864655, -0.19260596846844558,
-0.48986342384598824,
-0.08540375784816023, -0.6983532259844449, -0.3409346664209051, -0.9852563901175903,
-0.019589794863040133,
-0.3057052318511703, -0.010759738176742362, -0.7264074671265837, -0.7003086456948622,
-0.5205127876117279,
-0.0035883351279332454, -0.24656126983332566, -0.7385108721382044, -0.8711444379999949,
-0.5322348905850738,
-0.16443631057073907, -0.26335305001172343, -0.8339056052207534, -0.9840272325710973,
-0.00099042967089491,
-0.4276376345373605, -0.4392085789117308, -0.29885945214798054, -0.3526213053117569,
-0.15544590095444902,
-0.38133627476871246, -0.0007362814213280888, -0.13766159578201825, -0.6998437778149555,
-0.02941718441323049,
-0.14911600490992516, -0.8711008909873345, -0.5803207691231205, -0.3908087722441505,
-0.30424273624679143,
-0.6032911651567467, -0.6097285170523984, -0.23000688296189783, -0.009050042083058152,
-0.13450601442490417,
-0.8117883556545268, -0.7542685229940803, -0.4627233964160423, -0.23713451030767801, -0.580302276033946]
i_q_refs | |
from functools import singledispatchmethod
from contextlib import contextmanager
from itertools import islice
import ir
import type_resolution as tr
# https://docs.python.org/3/reference/expressions.html#operator-precedence
binop_ordering = {"**": 1, "*": 3, "@": 3, "/": 3, "//": 3, "%": 3, "+": 4, "-": 4, "<<": 5, ">>": 5, "&": 6,
"^": 7, "|": 8, "in": 9, "not in": 9, "<": 9, "<=": 9, ">": 9, ">=": 9, "!=": 9,
"==": 9}
# Todo: Given the boolean refactoring, not should probably derive from BoolOp, similar to TRUTH.
# Todo: Pretty printer should provide most of the infrastructure for C code gen. For plain C, most of the statement
# structure used is the same, so this should be handled as much as posible via different expression visitors.
# I'll also need to check for differences in operator precedence.
# Note, python docs don't specify truth precedence, but it should match logical "not"
scalar_pretty_types = {tr.Int32: "numpy.int32",
tr.Int64: "numpy.int64",
tr.Float32: "numpy.float32",
tr.Float64: "numpy.float64",
tr.Predicate32: "32_bit_mask",
tr.Predicate64: "64_bit_mask",
tr.BoolType: "numpy.bool_"}
def get_pretty_scalar_type(t):
scalar_type = scalar_pretty_types.get(t)
return scalar_type
def get_pretty_type(t):
if isinstance(t, ir.ArrayType):
scalar_type = get_pretty_scalar_type(t.dtype)
assert scalar_type is not None
pt = f"numpy.ndarray[{scalar_type}]"
else:
pt = get_pretty_scalar_type(t)
return pt
class pretty_formatter:
"""
The pretty printer is intended as a way to show the state of the IR in a way that resembles a
typical source representation.
Note: This will parenthesize some expressions that are unsupported yet accepted by plain Python.
It's designed this way, because the alternative is more confusing.
"""
def __call__(self, node):
expr = self.visit(node)
return expr
def parenthesized(self, expr):
expr = self.visit(expr)
return f"({expr})"
@singledispatchmethod
def visit(self, node):
msg = f"No method to format node: {node}."
raise NotImplementedError(msg)
@visit.register
def _(self, node: ir.SingleDimRef):
expr = self.visit(node.base)
if node.dim == ir.Zero:
return f"len({expr})"
else:
dim = self.visit(node.dim)
return f"{expr}.shape[{dim}]"
@visit.register
def _(self, node: ir.MaxReduction):
args = ", ".join(self.visit(arg) for arg in node.subexprs)
return f"max({args})"
@visit.register
def _(self, node: ir.Max):
args = ", ".join(self.visit(arg) for arg in node.subexprs)
return f"max({args})"
@visit.register
def _(self, node: ir.MinReduction):
args = ", ".join(self.visit(arg) for arg in node.subexprs)
return f"min({args})"
@visit.register
def _(self, node: ir.Min):
args = ", ".join(self.visit(arg) for arg in node.subexprs)
return f"min({args})"
@visit.register
def _(self, node: ir.Select):
(predicate, on_true, on_false) = (self.parenthesized(term)
if isinstance(term, (ir.Select, ir.Tuple)) else self.visit(term)
for term in (node.predicate, node.on_true, node.on_false))
expr = f"{on_true} if {predicate} else {on_false}"
return expr
@visit.register
def _(self, node: ir.BoolConst):
return str(node.value)
@visit.register
def _(self, node: ir.IntConst):
return str(node.value)
@visit.register
def _(self, node: ir.FloatConst):
return str(node.value)
@visit.register
def _(self, node: ir.StringConst):
return f"\"{node.value}\""
@visit.register
def _(self, node: ir.BinOp):
op = node.op
if node.in_place:
left = self.visit(node.left)
right = self.visit(node.right)
else:
op_ordering = binop_ordering[op]
terms = []
for term in (node.left, node.right):
if isinstance(term, ir.BinOp):
if op_ordering < binop_ordering[term.op]:
term = self.parenthesized(term)
else:
term = self.visit(term)
elif isinstance(term, ir.UnaryOp):
if op == "**":
term = self.parenthesized(term)
elif isinstance(term, (ir.BoolOp, ir.CompareOp, ir.Select, ir.Tuple)):
term = self.parenthesized(term)
else:
term = self.visit(term)
terms.append(term)
left, right = terms
expr = f"{left} {op} {right}"
return expr
@visit.register
def _(self, node: ir.CompareOp):
terms = []
for term in (node.left, node.right):
if isinstance(term, (ir.BoolOp, ir.CompareOp, ir.Select, ir.Tuple)):
term = self.parenthesized(term)
else:
term = self.visit(term)
terms.append(term)
left, right = terms
expr = f"{left} {node.op} {right}"
return expr
@visit.register
def _(self, node: ir.AND):
# Todo: move rebalancing to external pass
groups = []
start = 0
count = len(node.operands)
assert count > 1
while start < count:
# group things like a < b < c
# whether they arose from ir changes or input source
first = node.operands[0]
if isinstance(first, ir.CompareOp):
cmp_op = first.op
group = [first.left, first.right]
prev_rhs = first.right
for operand in islice(node.operands, start+1, None):
if not (isinstance(operand, ir.CompareOp) and cmp_op == operand.op and operand.left == prev_rhs):
break
group.append(operand.right)
prev_rhs = operand.right
groups.append((group, cmp_op))
start += len(group) - 1
else:
# something else, anded
cmp_op = None
groups.append((first, cmp_op))
start += 1
operands = []
expr = None
for group, cmp_op in groups:
if cmp_op is not None:
op = f" {cmp_op} "
chain = op.join(self.visit(suboperand) for suboperand in group)
operands.append(chain)
else:
# single expression
assert isinstance(group, ir.ValueRef)
if isinstance(group, (ir.AND, ir.OR, ir.Select, ir.Tuple)):
formatted = self.parenthesized(group)
else:
formatted = self.visit(group)
operands.append(formatted)
expr = "and ".join(operand for operand in operands)
assert expr is not None
return expr
@visit.register
def _(self, node: ir.OR):
operands = []
for operand in node.operands:
if isinstance(operand, (ir.Select, ir.Tuple)):
formatted = self.parenthesized(operand)
else:
formatted = self.visit(operand)
operands.append(formatted)
expr = " or ".join(operand for operand in operands)
return expr
@visit.register
def _(self, node: ir.NOT):
formatted = self.visit(node.operand)
if isinstance(node.operand, (ir.AND, ir.OR, ir.Select)):
formatted = self.parenthesized(formatted)
expr = f"not {formatted}"
return expr
@visit.register
def _(self, node: ir.TRUTH):
formatted = self.visit(node.operand)
if node.constant:
if not isinstance(node, ir.BoolConst):
# We don't distinguish between bools and predicates here in
# truth testing, since Python doesn't have any notion of
# predicate types.
formatted = f"bool({formatted})"
return formatted
@visit.register
def _(self, node: ir.NameRef):
expr = node.name
return expr
@visit.register
def _(self, node: ir.Call):
func_name = self.visit(node.func)
args = ", ".join(self.visit(arg) for arg in node.args)
func = f"{func_name}({args})"
return func
@visit.register
def _(self, node: ir.Reversed):
return f"reversed({self.visit(node.iterable)})"
@visit.register
def _(self, node: ir.Subscript):
s = f"{self.visit(node.value)}[{self.visit(node.slice)}]"
return s
@visit.register
def _(self, node: ir.AffineSeq):
# Initial input source may not be easily disernible,
# print as range
start = self.visit(node.start)
stop = self.visit(node.stop) if node.stop is not None else f"None"
step = self.visit(node.step)
return f"range({start}, {stop}, {step})"
@visit.register
def _(self, node: ir.Tuple):
elements = []
for e in node.elements:
# parenthesize nested tuples, leave everything else
if isinstance(e, ir.Tuple):
expr = self.parenthesized(expr)
else:
expr = self.visit(e)
elements.append(expr)
s = ", ".join(e for e in elements)
return s
@visit.register
def _(self, node: ir.UnaryOp):
op = node.op
if isinstance(node.operand, ir.BinOp) and not node.operand.in_place:
if node.operand.op != "**":
operand = parenthesized(operand)
else:
operand = self.visit(node.operand)
elif isinstance(node.operand, (ir.UnaryOp, ir.BoolOp, ir.Select)):
# if we have an unfolded double unary expression such as --,
# '--expr' would be correct but it's visually jarring. Adding
# unnecessary parentheses makes it '-(-expr)'.
operand = self.parenthesized(operand)
else:
operand = self.visit(operand)
expr = f"{op}({operand})"
return expr
@visit.register
def _(self, node: ir.Enumerate):
iterable = self.visit(node.iterable)
if node.start == ir.Zero:
expr = f"enumerate({iterable})"
else:
start = self.visit(node.start)
expr = f"enumerate({iterable}, {start})"
return expr
@visit.register
def _(self, node: ir.Zip):
exprs = []
for elem in node.elements:
formatted = self.visit(elem)
if isinstance(elem, ir.Tuple):
# This nesting is unsupported elsewhere, but this
# would be a confusing place to throw an error.
formatted = parenthesized(formatted)
exprs.append(formatted)
# handle case of enumerate
expr = ", ".join(e for e in exprs)
expr = f"zip({expr})"
return expr
class pretty_printer:
"""
Pretty prints tree.
Inserts pass on empty if statements or for/while loops.
"""
def __init__(self, single_indent=" ", print_annotations=True):
self.indent = ""
self._increment = len(single_indent)
self._single_indent = single_indent
self.print_annotations = print_annotations
self.format = pretty_formatter()
self.symbols = None
def __call__(self, tree, symbols):
assert self.indent == ""
with(self.symbols_loaded(symbols)):
self.visit(tree)
@contextmanager
def symbols_loaded(self, symbols):
assert self.symbols is None
self.symbols = symbols
yield
self.symbols = None
@contextmanager
def indented(self):
self.indent = f"{self.indent}{self._single_indent}"
yield
self.indent = self.indent[:-self._increment]
def print_line(self, as_str):
line = f"{self.indent}{as_str}"
print(line)
def make_elif(self, node: ir.IfElse):
assert isinstance(node, ir.IfElse)
test = self.format(node.test)
on_true = f"elif {test}:"
self.print_line(on_true)
with self.indented():
if node.if_branch:
self.visit(node.if_branch)
else:
self.print_line("pass")
if node.else_branch:
# Make another elif if all conditions are met
if len(node.else_branch) == 1:
first, = node.else_branch
if isinstance(first, ir.IfElse):
self.make_elif(first)
return
self.print_line("else:")
with self.indented():
self.visit(node.else_branch)
@singledispatchmethod
def visit(self, node):
msg = f"No method to pretty print node {node}."
raise NotImplementedError(msg)
@visit.register
def _(self, node: ir.ModImport):
module = self.format(node.module)
module_alias = self.format(node.as_name)
if module == module_alias:
as_str = f"import {module}"
else:
as_str = f"import {module} as {module_alias}"
self.print_line(as_str)
@visit.register
def _(self, node: ir.NameImport):
module = self.visit(node.module)
imported_name = self.visit(node.name)
import_alias = self.visit(node.as_name)
if imported_name == import_alias:
as_str = f"from {module} import {imported_name}"
else:
as_str = f"from {module} import {imported_name} as {import_alias}"
self.print_line(as_str)
@visit.register
def _(self, node: ir.Return):
if node.value is None:
self.print_line("return")
else:
expr = | |
= not runlevel[level]
elif (args[0] == 'return' and runlevel[level] == True):
return(result)
elif (args[0] == "endif"):
ifcount = ifcount - 1
if (ifcount < level):
level = level - 1
if (level < 0):
print("Macro: Unmatched if/endif pairs.")
return ''
else:
if (runlevel[level] == True):
if (result == ""):
result = subvars(line,_vars)
else:
result = result + "\n" + subvars(line,_vars)
return(result)
def subvars(script,_vars):
if (_vars == None): return script
remainder = script
result = ""
done = False
while done == False:
bv = remainder.find("{")
if (bv == -1):
done = True
continue
ev = remainder.find("}")
if (ev == -1):
done = True
continue
result = result + remainder[:bv]
vvar = remainder[bv+1:ev]
remainder = remainder[ev+1:]
upper = False
allvars = False
if (vvar[0] == "^"):
upper = True
vvar = vvar[1:]
elif (vvar[0] == "*"):
vvar = vvar[1:]
allvars = True
else:
pass
if (vvar in _vars):
if (upper == True):
items = _vars[vvar].upper()
elif (allvars == True):
try:
iVar = int(vvar)
except:
return(script)
items = ""
sVar = str(iVar)
while sVar in _vars:
if (items == ""):
items = _vars[sVar]
else:
items = items + " " + _vars[sVar]
iVar = iVar + 1
sVar = str(iVar)
else:
items = _vars[vvar]
else:
if (allvars == True):
items = ""
else:
items = "null"
result = result + items
if (remainder != ""):
result = result + remainder
return(result)
def sqlTimer(hdbc, runtime, inSQL):
count = 0
t_end = time.time() + runtime
while time.time() < t_end:
try:
stmt = ibm_db.exec_immediate(hdbc,inSQL)
if (stmt == False):
db2_error(flag(["-q","-quiet"]))
return(-1)
ibm_db.free_result(stmt)
except Exception as err:
db2_error(False)
return(-1)
count = count + 1
return(count)
def splitargs(arguments):
import types
# String the string and remove the ( and ) characters if they at the beginning and end of the string
results = []
step1 = arguments.strip()
if (len(step1) == 0): return(results) # Not much to do here - no args found
if (step1[0] == '('):
if (step1[-1:] == ')'):
step2 = step1[1:-1]
step2 = step2.strip()
else:
step2 = step1
else:
step2 = step1
# Now we have a string without brackets. Start scanning for commas
quoteCH = ""
pos = 0
arg = ""
args = []
while pos < len(step2):
ch = step2[pos]
if (quoteCH == ""): # Are we in a quote?
if (ch in ('"',"'")): # Check to see if we are starting a quote
quoteCH = ch
arg = arg + ch
pos += 1
elif (ch == ","): # Are we at the end of a parameter?
arg = arg.strip()
args.append(arg)
arg = ""
inarg = False
pos += 1
else: # Continue collecting the string
arg = arg + ch
pos += 1
else:
if (ch == quoteCH): # Are we at the end of a quote?
arg = arg + ch # Add the quote to the string
pos += 1 # Increment past the quote
quoteCH = "" # Stop quote checking (maybe!)
else:
pos += 1
arg = arg + ch
if (quoteCH != ""): # So we didn't end our string
arg = arg.strip()
args.append(arg)
elif (arg != ""): # Something left over as an argument
arg = arg.strip()
args.append(arg)
else:
pass
results = []
for arg in args:
result = []
if (len(arg) > 0):
if (arg[0] in ('"',"'")):
value = arg[1:-1]
isString = True
isNumber = False
else:
isString = False
isNumber = False
try:
value = eval(arg)
if (type(value) == int):
isNumber = True
elif (isinstance(value,float) == True):
isNumber = True
else:
value = arg
except:
value = arg
else:
value = ""
isString = False
isNumber = False
result = [value,isString,isNumber]
results.append(result)
return results
def sqlParser(sqlin,local_ns):
sql_cmd = ""
encoded_sql = sqlin
firstCommand = "(?:^\s*)([a-zA-Z]+)(?:\s+.*|$)"
findFirst = re.match(firstCommand,sqlin)
if (findFirst == None): # We did not find a match so we just return the empty string
return sql_cmd, encoded_sql
cmd = findFirst.group(1)
sql_cmd = cmd.upper()
#
# Scan the input string looking for variables in the format :var. If no : is found just return.
# Var must be alpha+number+_ to be valid
#
if (':' not in sqlin): # A quick check to see if parameters are in here, but not fool-proof!
return sql_cmd, encoded_sql
inVar = False
inQuote = ""
varName = ""
encoded_sql = ""
STRING = 0
NUMBER = 1
LIST = 2
RAW = 3
for ch in sqlin:
if (inVar == True): # We are collecting the name of a variable
if (ch.upper() in "@_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789[]"):
varName = varName + ch
continue
else:
if (varName == ""):
encode_sql = encoded_sql + ":"
elif (varName[0] in ('[',']')):
encoded_sql = encoded_sql + ":" + varName
else:
if (ch == '.'): # If the variable name is stopped by a period, assume no quotes are used
flag_quotes = False
else:
flag_quotes = True
varValue, varType = getContents(varName,flag_quotes,local_ns)
if (varValue == None):
encoded_sql = encoded_sql + ":" + varName
else:
if (varType == STRING):
encoded_sql = encoded_sql + varValue
elif (varType == NUMBER):
encoded_sql = encoded_sql + str(varValue)
elif (varType == RAW):
encoded_sql = encoded_sql + varValue
elif (varType == LIST):
start = True
for v in varValue:
if (start == False):
encoded_sql = encoded_sql + ","
if (isinstance(v,int) == True): # Integer value
encoded_sql = encoded_sql + str(v)
elif (isinstance(v,float) == True):
encoded_sql = encoded_sql + str(v)
else:
flag_quotes = True
try:
if (v.find('0x') == 0): # Just guessing this is a hex value at beginning
encoded_sql = encoded_sql + v
else:
encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String
except:
encoded_sql = encoded_sql + addquotes(str(v),flag_quotes)
start = False
encoded_sql = encoded_sql + ch
varName = ""
inVar = False
elif (inQuote != ""):
encoded_sql = encoded_sql + ch
if (ch == inQuote): inQuote = ""
elif (ch in ("'",'"')):
encoded_sql = encoded_sql + ch
inQuote = ch
elif (ch == ":"): # This might be a variable
varName = ""
inVar = True
else:
encoded_sql = encoded_sql + ch
if (inVar == True):
varValue, varType = getContents(varName,True,local_ns) # We assume the end of a line is quoted
if (varValue == None):
encoded_sql = encoded_sql + ":" + varName
else:
if (varType == STRING):
encoded_sql = encoded_sql + varValue
elif (varType == NUMBER):
encoded_sql = encoded_sql + str(varValue)
elif (varType == LIST):
flag_quotes = True
start = True
for v in varValue:
if (start == False):
encoded_sql = encoded_sql + ","
if (isinstance(v,int) == True): # Integer value
encoded_sql = encoded_sql + str(v)
elif (isinstance(v,float) == True):
encoded_sql = encoded_sql + str(v)
else:
try:
if (v.find('0x') == 0): # Just guessing this is a hex value
encoded_sql = encoded_sql + v
else:
encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String
except:
encoded_sql = encoded_sql + addquotes(str(v),flag_quotes)
start = False
return sql_cmd, encoded_sql
def getContents(varName,flag_quotes,local_ns):
#
# Get the contents of the variable name that is passed to the routine. Only simple
# variables are checked, i.e. arrays and lists are not parsed
#
STRING = 0
NUMBER = 1
LIST = 2
RAW = 3
DICT = 4
try:
value = eval(varName,None,local_ns) # globals()[varName] # eval(varName)
except:
return(None,STRING)
if (isinstance(value,dict) == True): # Check to see if this is JSON dictionary
return(addquotes(value,flag_quotes),STRING)
elif(isinstance(value,list) == True): # List - tricky
return(value,LIST)
elif (isinstance(value,int) == True): # Integer value
return(value,NUMBER)
elif (isinstance(value,float) == True): # Float value
return(value,NUMBER)
else:
try:
# The pattern needs to be in the first position (0 in Python terms)
if (value.find('0x') == | |
in deployed_releases and
r.namespace in deployed_releases[r.release] and
r.version != deployed_releases[r.release][r.namespace]):
self._dbapi.kube_app_chart_release_update(
app.id, r.release, r.namespace,
{'version': deployed_releases[r.release][r.namespace]})
except Exception as e:
LOG.exception(e)
raise exception.SysinvException(_(
"Failed to update/record application %s releases' versions." % str(e)))
def _create_app_releases_version(self, app_name, app_charts):
"""Create application helm releases records
This method creates/initializes the helm releases objects for the application.
:param app_name: the name of the application
:param app_charts: the charts of the application
"""
kube_app = self._dbapi.kube_app_get(app_name)
app_releases = self._dbapi.kube_app_chart_release_get_all(kube_app.id)
if app_releases:
return
for chart in app_charts:
values = {
'release': chart.release,
'version': 0,
'namespace': chart.namespace,
'app_id': kube_app.id
}
try:
self._dbapi.kube_app_chart_release_create(values)
except Exception as e:
LOG.exception(e)
def _get_metadata_value(self, app, key_or_keys, default=None,
enforce_type=False):
"""
Get application metadata value from nested dictionary.
If a default value is specified, this will enforce that
the value returned is of the same type.
:param app: application object
:param key_or_keys: single key string, or list of keys
:param default: default value (and type)
:param enforce_type: enforce type check between return value and default
:return: The value from nested dictionary D[key1][key2][...] = value
assuming all keys are present, otherwise default.
"""
value = default
if isinstance(key_or_keys, list):
keys = key_or_keys
else:
keys = [key_or_keys]
metadata_file = os.path.join(app.inst_path,
constants.APP_METADATA_FILE)
if os.path.exists(metadata_file) and os.path.getsize(metadata_file) > 0:
with io.open(metadata_file, 'r', encoding='utf-8') as f:
try:
metadata = yaml.safe_load(f) or {}
value = cutils.deep_get(metadata, keys, default=default)
# TODO(jgauld): There is inconsistent treatment of YAML
# boolean between the module ruamel.yaml and module yaml
# in utils.py, health.py, and kube_app.py. Until these
# usage variants are unified, leave the following check
# as optional.
if enforce_type and default is not None and value is not None:
default_type = type(default)
if type(value) != default_type:
raise exception.SysinvException(_(
"Invalid {}: {} {!r} expected value is {}."
"".format(metadata_file, '.'.join(keys),
value, default_type)))
except KeyError:
# metadata file does not have the key
pass
LOG.debug('_get_metadata_value: metadata_file=%s, keys=%s, default=%r, value=%r',
metadata_file, keys, default, value)
return value
def _preserve_user_overrides(self, from_app, to_app):
"""Dump user overrides
In the scenario of updating application to a new version, this
method is used to copy the user overrides from the old version
to the new version.
:param from_app: application object that application updating from
:param to_app: application object that application updating to
"""
to_db_app = self._dbapi.kube_app_get(to_app.name)
from_db_app = self._dbapi.kube_app_get_inactive_by_name_version(
from_app.name, version=from_app.version)
from_app_db_charts = self._dbapi.helm_override_get_all(from_db_app.id)
from_app_charts = {}
for chart in from_app_db_charts:
from_app_charts.setdefault(chart.name, {}).update(
{chart.namespace: chart.user_overrides})
for chart in to_app.charts:
if (chart.name in from_app_charts and
chart.namespace in from_app_charts[chart.name] and
from_app_charts[chart.name][chart.namespace]):
user_overrides = {'user_overrides': from_app_charts[chart.name][chart.namespace]}
try:
self._dbapi.helm_override_update(
app_id=to_db_app.id, name=chart.name,
namespace=chart.namespace, values=user_overrides)
except exception.HelmOverrideNotFound:
# Unexpected
values = {
'name': chart.name,
'namespace': chart.namespace,
'app_id': to_db_app.id
}
values.update(user_overrides)
self._dbapi.helm_override_create(values=values)
LOG.info("Application %s (%s) will apply the user overrides for"
"Chart %s from version %s" % (to_app.name, to_app.version,
chart.name, from_app.version))
@retry(retry_on_exception=lambda x: isinstance(x, exception.ApplicationApplyFailure),
stop_max_attempt_number=5, wait_fixed=30 * 1000)
def _make_armada_request_with_monitor(self, app, request, overrides_str=None):
"""Initiate armada request with monitoring
This method delegates the armada request to docker helper and starts
a monitoring thread to persist status and progress along the way.
:param app: application data object
:param request: type of request (apply or delete)
:param overrides_str: list of overrides in string format to be applied
"""
def _get_armada_log_stats(pattern, logfile):
"""
TODO(tngo): In the absence of an Armada API that provides the current
status of an apply/delete manifest operation, the progress is derived
from specific log entries extracted from the execution logs. This
inner method is to be replaced with an official API call when
it becomes available.
"""
if pattern == ROLLBACK_SEARCH_PATTERN:
print_chart = '{print $10}'
else:
print_chart = '{print $NF}'
p1 = subprocess.Popen(['grep', pattern, logfile],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(['awk', print_chart], stdin=p1.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
p1.stdout.close()
result, err = p2.communicate()
if result:
# Scrape information from command output, example 'validate' log:
# 2020-03-26 09:47:58.594 1105 INFO armada.cli [-] Successfully validated:\
# ('/tmp/manifests/oidc-auth-apps/1.0-0/oidc-auth-apps-manifest.yaml',)
# Strip out ANSI color code that might be in the text stream
r = re.compile("\x1b\[[0-9;]*m")
result = r.sub('', result).replace(',', '')
matches = result.split()
num_chart_processed = len(matches)
last_chart_processed = matches[num_chart_processed - 1]
if '=' in last_chart_processed:
last_chart_processed = last_chart_processed.split('=')[1]
return last_chart_processed, num_chart_processed
return None, None
def _check_progress(monitor_flag, app, pattern, logfile):
""" Progress monitoring task, to be run in a separate thread """
LOG.info("Starting progress monitoring thread for app %s" % app.name)
try:
adjust = self._get_metadata_value(app,
constants.APP_METADATA_APPLY_PROGRESS_ADJUST,
constants.APP_METADATA_APPLY_PROGRESS_ADJUST_DEFAULT_VALUE)
with Timeout(INSTALLATION_TIMEOUT,
exception.KubeAppProgressMonitorTimeout()):
charts_count = len(app.charts)
while True:
try:
monitor_flag.get_nowait()
LOG.debug("Received monitor stop signal for %s" % app.name)
monitor_flag.task_done()
break
except queue.Empty:
last, num = _get_armada_log_stats(pattern, logfile)
if last:
if charts_count == 0:
percent = 100
else:
tadjust = 0
if app.system_app:
tadjust = adjust
if tadjust >= charts_count:
LOG.error("Application metadata key '{}'"
"has an invalid value {} (too few charts)".
format(constants.APP_METADATA_APPLY_PROGRESS_ADJUST,
adjust))
tadjust = 0
percent = round((float(num) / # pylint: disable=W1619
(charts_count - tadjust)) * 100)
progress_str = "processing chart: {}, overall completion: {}%".\
format(last, percent)
if app.progress != progress_str:
LOG.info("%s" % progress_str)
self._update_app_status(app, new_progress=progress_str)
greenthread.sleep(1)
except Exception as e:
# timeout or subprocess error
LOG.exception(e)
finally:
LOG.info("Exiting progress monitoring thread for app %s" % app.name)
def _cleanup_armada_log(location, app_name, request):
"""Cleanup the oldest armada log if reach the maximum"""
list_of_logs = [os.path.join(location, f) for f in os.listdir(location)
if re.match(r'{}-{}.*.log'.format(app_name, request), f)]
try:
if len(list_of_logs) > ARMADA_LOG_MAX:
oldest_logfile = min(list_of_logs, key=os.path.getctime)
os.remove(oldest_logfile)
except OSError:
pass
# Body of the outer method
# This check is for cases where an abort is issued while
# this function waits between retries. In such cases, it
# should just return False
if AppOperator.is_app_aborted(app.name):
return False
# TODO(dvoicule): Maybe pass a hook from outside to this function
# need to change perform_app_recover/rollback/update to support this.
# All the other hooks store the operation of the app itself (apply,
# remove, delete, upload, update) yet this hook stores the armada
# operation in the operation field. This is inconsistent behavior and
# should be changed the moment a hook from outside is passed here.
lifecycle_hook_info = LifecycleHookInfo()
lifecycle_hook_info.operation = request
lifecycle_hook_info.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info.lifecycle_type = constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST
self.app_lifecycle_actions(None, None, app._kube_app, lifecycle_hook_info)
mqueue = queue.Queue()
rc = True
logname = time.strftime(app.name + '-' + request + '_%Y-%m-%d-%H-%M-%S.log')
logfile = ARMADA_HOST_LOG_LOCATION + '/' + logname
if request == constants.APP_APPLY_OP:
pattern = APPLY_SEARCH_PATTERN
elif request == constants.APP_DELETE_OP:
pattern = DELETE_SEARCH_PATTERN
else:
pattern = ROLLBACK_SEARCH_PATTERN
monitor = greenthread.spawn_after(1, _check_progress, mqueue, app,
pattern, logfile)
rc = self._armada.make_armada_request(request, app.armada_service_mfile,
overrides_str, app.releases, logfile)
_cleanup_armada_log(ARMADA_HOST_LOG_LOCATION, app.name, request)
mqueue.put('done')
monitor.kill()
# Here a manifest retry can be performed by throwing ApplicationApplyFailure
lifecycle_hook_info.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info.lifecycle_type = constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST
lifecycle_hook_info[LifecycleConstants.EXTRA][LifecycleConstants.RETURN_CODE] = rc
self.app_lifecycle_actions(None, None, app._kube_app, lifecycle_hook_info)
return rc
def _record_auto_update_failed_versions(self, from_app, to_app):
"""Record the new application version in the old application
metadata when the new application fails to be updated"""
new_metadata = copy.deepcopy(from_app.app_metadata)
try:
failed_versions = new_metadata[constants.APP_METADATA_UPGRADES][
constants.APP_METADATA_FAILED_VERSIONS]
if to_app.version not in failed_versions:
failed_versions.append(to_app.version)
except KeyError:
new_metadata.setdefault(constants.APP_METADATA_UPGRADES, {}).update(
{constants.APP_METADATA_FAILED_VERSIONS: [to_app.version]})
with self._lock:
from_app.update_app_metadata(new_metadata)
def _perform_app_recover(self, old_app, new_app, armada_process_required=True):
"""Perform application recover
This recover method is triggered when application update failed, it cleans
up the files/data for the new application and recover helm charts for the
old application. If the armada process is required, armada apply is invoked
to recover the application releases for the old version.
The app status will be populated to "apply-failed" if recover fails so that
the user can re-apply app.
:param old_app: the application object that application recovering to
:param new_app: the application object that application recovering from
:param armada_process_required: boolean, whether armada operation is needed
"""
def _activate_old_app_plugins(old_app):
# Enable the old app plugins.
self._plugins.activate_plugins(old_app)
LOG.info("Starting recover Application %s from version: %s to version: %s" %
(old_app.name, new_app.version, old_app.version))
# Ensure that the the failed app plugins are disabled prior to cleanup
self._plugins.deactivate_plugins(new_app)
self._update_app_status(
old_app, constants.APP_RECOVER_IN_PROGRESS,
constants.APP_PROGRESS_UPDATE_ABORTED.format(old_app.version, new_app.version) +
constants.APP_PROGRESS_RECOVER_IN_PROGRESS.format(old_app.version))
# Set the status for the new app to inactive
self._update_app_status(new_app, constants.APP_INACTIVE_STATE)
try:
self._cleanup(new_app, app_dir=False)
self._utils._patch_report_app_dependencies(
new_app.name + '-' + new_app.version)
self._dbapi.kube_app_destroy(new_app.name,
version=new_app.version,
inactive=True)
LOG.info("Recovering helm charts for Application %s (%s)..."
% (old_app.name, old_app.version))
self._update_app_status(old_app,
new_progress=constants.APP_PROGRESS_RECOVER_CHARTS)
with self._lock:
self._upload_helm_charts(old_app)
rc = True
if | |
# -*- coding: utf-8 -*-
import datetime
import math
import os.path
import re
import time
import urllib
import urlparse
import uuid as uuid_
import bleach
from django.contrib.humanize.templatetags import humanize
from django.template import defaultfilters
from django.utils.html import escape, strip_tags
from django.utils.safestring import mark_safe
from jinja2 import Markup, contextfunction
from apps.activity.redis_models import DailyFreeStickersActivity
from apps.activity.jinja_tags import activity_stream_item
from apps.canvas_auth.models import User
from apps.tags.models import get_tracked_tags
from canvas import stickers, knobs, util, economy, template
from canvas.cache_patterns import CachedCall
from canvas.details_models import CommentDetails, ContentDetails
from canvas.models import Category, Comment, Visibility, SpecialCategory, CommentFlag
from canvas.templatetags.jinja_base import (global_tag, filter_tag, render_jinja_to_string,
jinja_context_tag, update_context)
from canvas.templatetags.helpers import TemplateComment
from canvas.view_helpers import top_timeperiod_urls
from services import Services
from django.conf import settings
register = template.Library()
DEFAULT_AVATAR_COUNT = 6
@filter_tag
def pluralize(condition, argstr=",s"):
one, many = argstr.split(',')
try:
condition = len(condition)
except TypeError:
pass
return one if condition == 1 else many
@global_tag
def comment_is_pinned(comment_details, viewer):
"""
`viewer` should be request.user - the user viewing the comment.
Returns "pinned" if pinned, else None.
"""
if not isinstance(comment_details, CommentDetails):
comment_details = CommentDetails(comment_details)
return 'pinned' if comment_details.is_pinned(viewer) else None
@global_tag
def timestamp():
return Markup(str(time.time()))
##############################
# Used for content dimensions.
def _wh(content_data, ratio):
return dict((dimension, int(content_data[dimension] / ratio),)
for dimension in ['width', 'height'])
def _fit_inside(fit_w, fit_h, content_data):
rw = float(content_data['width']) / fit_w
rh = content_data['height'] / fit_h
ratio = max(1, rw, rh)
return _wh(content_data, ratio)
def _fit_height(fit_h, content_data):
ratio = float(content_data['height']) / fit_h
return _wh(content_data, ratio)
def _fit_width(fit_w, content_data):
ratio = float(content_data['width']) / fit_w
return _wh(content_data, ratio)
def _size_attrs(content_data, fit_w=None, fit_h=None):
if fit_w is None and fit_h is None:
# Do nothing
return {'width': content_data['width'], 'height': content_data['height']}
elif fit_w is None:
return _fit_height(fit_h, content_data)
elif fit_h is None:
return _fit_width(fit_w, content_data)
else:
return _fit_inside(fit_w, fit_h, content_data)
###############################
# For sizing content assuming
# a container to catch overflow
# Must supply fit_w and fit_h
###############################
def _size_attrs_overflow(content_data, fit_w, fit_h):
rw = float(content_data['width']) / fit_w
rh = float(content_data['height']) / fit_h
ratio = min(rw, rh)
return _wh(content_data, ratio)
@global_tag
def content_fit(comment_or_content, fit_w, fit_h, *image_types, **kwargs):
"""
Renders an html snippet (includes an img tag) for `content_details`.
`image_types`:
Could be "column", "stream" for example. Image types are tried in order,
and the first one that exists for an image is used.
"""
lazy = kwargs.pop('lazy', False)
if hasattr(comment_or_content, 'reply_content'):
details = comment_or_content.reply_content
visibility = comment_or_content.visibility
else:
details = comment_or_content
visibility = True
if not isinstance(details, ContentDetails):
details = ContentDetails(details)
for image_type in image_types:
try:
data = details[image_type]
url = details.get_absolute_url_for_image_type(image_type)
wh = _size_attrs(data, fit_w, fit_h)
except (AttributeError, KeyError, IndexError,):
# For now, we just show blank images when the content is missing, until we fix the thumbnailer.
url = str(type)
wh = {'width': 100, 'height': 100}
else:
break
attribs = {
'url': url,
'width': wh['width'],
'height': wh['height'],
# @TODO: Note the @comment here is a dict. The current version of the details does not store
# is_visbile. Hence, we're duplicating the is_visibile code here.
#TODO just wrap with CommentDetails and call is_visible on it.
'alt': '',
'classes': '',
'extra': '',
}
if lazy:
attribs.update({
'url': '/static/img/0.gif',
'extra': 'data-original="{}"'.format(url),
'classes': 'lazy',
})
tag = u'<img class="ugc_img {classes}" src="{url}" width="{width}" height="{height}" alt="{alt}" {extra}>'.format(**attribs)
return Markup(tag)
@global_tag
def content(comment_or_content, *image_types, **kwargs):
"""
Renders an html snippet (includes an img tag) for `content_details`.
`image_types`:
Could be "column", "stream" for example. Image types are tried in order,
and the first one that exists for an image is used.
"""
return content_fit(comment_or_content, None, None, *image_types, **kwargs)
def _avatar(url, size, fit_w, fit_h, username):
wh = _size_attrs_overflow(size, fit_w, fit_h)
return Markup("""
<img class="user_avatar ugc_img" src="%(url)s" width="%(width)s" height="%(height)s" alt="%(alt)s">
""" % {
'url': url,
'size_width': fit_w,
'size_height': fit_h,
'width': wh['width'],
'height': wh['height'],
'alt': username + "'s profile image",
})
def _default_avatar_url(key):
idx = int(key) % DEFAULT_AVATAR_COUNT
return '/static/img/default_avatar_{}.png'.format(idx)
@global_tag
def avatar_url(user):
""" DO NOT CALL THIS FOR ANONYMOUS POSTS. """
key = 'column'
avatar, = CachedCall.multicall([
User.avatar_by_username(user.username),
])
if key in avatar:
url = avatar[key]['name']
else:
key = user.id if user.is_authenticated() else 0
url = _default_avatar_url(key)
return url
@global_tag
def header_avatar(username):
return _square_avatar(username, 'tiny_square', 22, 22)
@global_tag
def tiny_avatar(username):
return _square_avatar(username, 'tiny_square', 30, 30)
@global_tag
def small_avatar(username):
return _square_avatar(username, 'small_square', 50, 50)
@global_tag
def big_avatar(username):
return _square_avatar(username, 'medium_square', 150, 150)
def _square_avatar(username, image_type, width, height):
avatar, = CachedCall.multicall([
User.avatar_by_username(username),
])
if image_type in avatar:
url = avatar[image_type]['name']
size = avatar[image_type]
else:
key = reduce(lambda acc, x: ord(x) + acc, username, 0)
url = _default_avatar_url(key)
size = {'width':width, 'height':height}
return _avatar(url, size, width, height, username)
@global_tag
def content_size(content_details, image_type):
data = content_details[image_type]
try:
wh = _size_attrs(data)
except KeyError:
wh = {'width': 0, 'height': 0}
return wh
@global_tag
def mobile_tile(comment):
return Markup(render_jinja_to_string("mobile/tile.html", {'comment': comment}))
def _relative_timestamp(timestamp):
""" Only returns the humanized time delta, without the HTML which relative_timestamp adds. """
now = Services.time.time()
delta_s = now - timestamp
units = ((60, 'a moment ago'),
(60*60, 'minute'),
(60*60*24, 'hour'),
(60*60*24*7, 'day'),
(60*60*24*30, 'week'),
(60*60*24*365, 'month'),
('infinity', 'year'),)
for index, t in enumerate(units):
cutoff, unit = t
if cutoff == 'infinity' or delta_s < cutoff:
if index == 0:
return unit
val = int(delta_s // units[index - 1][0]) # Divide by the previous cutoff.
return str(val) + ' ' + unit + pluralize(val) + ' ago'
@global_tag
def relative_timestamp(timestamp):
human_time = _relative_timestamp(timestamp)
return Markup(u'<span class="rel-timestamp" data-timestamp="{0}">{1}</span>'.format(timestamp, human_time))
_GROUP_LINK_PATTERN = re.compile(r'((?:^|\s))\#(\w{3,})')
_SAFE_URI_CHARS = '~@#$&()*!+=:;,.?/\'' # Equivalent to javascript's encodeURI
@global_tag
def ugc_text(text, max_length=knobs.POST_TEXT_TRUNCATION_LENGTH,
should_oembed=False, linkify=True, truncation_markup=u'…'):
# When using this, you can't specify keyword arguments (until Django 1.3), use them as positional args.
# They serve only to provide defaults.
def _linkify(text, href):
safe_href = urllib.quote(unicode(href).encode('utf-8'), safe=_SAFE_URI_CHARS)
attrs = {
'href': safe_href,
'title': safe_href,
'target': '_blank',
}
return u'<a {0}>{1}</a>'.format(u' '.join(u'{0}="{1}"'.format(key, val) for key,val in attrs.iteritems()),
text)
def linkify_group(match):
#TODO make group 404s show a page asking if you want to create the group.
group = match.group(2)
return match.group(1) + _linkify(u'#' + group, u'/x/' + group)
# Remove <tag> <shenanigans/>
text = strip_tags(text)
# Escape any HTML entities.
text = escape(text)
if len(text) > max_length:
#TODO split on a word.
text = text[:max_length] + truncation_markup
# Enter means newline bitch
text = text.replace('\n', '<br>\n')
# Linkify links.
if linkify:
text = bleach.linkify(text, nofollow=True, target='_blank')
# Replace all #foobar forms with http://example.com/x/foobar,
# but not '#', '#1', '#1-ocle', et cetera.
text = _GROUP_LINK_PATTERN.sub(linkify_group, text)
#TODO linkify @names
# Escape Django template tokens for jinja_adapter funkiness. Nasty. Delete once we move all over to Jinja.
text = text.replace('{{', '{' * 2)
text = text.replace('}}', '}' * 2)
text = text.replace('{%', '{%')
text = text.replace('%}', '%}')
text = text.replace('{#', '{#')
text = text.replace('#}', '#}')
ugc_text_id = 'ugc_text_' + uuid_.uuid4().hex
span_classes = 'ugc_text'
return Markup(u'<span id="{0}" class="{1}">{2}</span>'.format(ugc_text_id, span_classes, text))
@filter_tag
@global_tag
def to_json(things):
"""
If the model/object defines a "to_client" then call it first.
This way objects can implement the "to_client" interface to return a dictionary representation of themselves to
be serialized as json.
"""
return util.js_safety(util.client_dumps(things), django=False)
@global_tag
def uuid():
return uuid_.uuid4().hex
@filter_tag
def to_escaped_json(things):
return util.js_safety(util.client_dumps(things), django=False, escape_html=True)
@filter_tag
def to_escaped_comment_details_json(comment_details):
""" Escapes Django template language tokens too. """
ret = to_escaped_json(comment_details)
return ret
def image_tile(context, tile, render_options, nav_category, template='comment/explore_tile.html'):
request = context['request']
user = request.user
if not hasattr(tile, 'comment'):
raise TypeError("A tile should be an instance of TileDetails or LastReplyTileDetails; something that "
"has a .comment. Received a %s" % type(tile))
comment = tile.comment
new_activity = False
if render_options.get('show_activity') and render_options.get('show_pins') and comment.last_reply_time:
pinned_lastviewed = user.kv.pinned_lastviewed.get() or 1303426306. # When we launched pinned.
new_activity = pinned_lastviewed < float(comment.last_reply_time)
def nav_aware(url):
default_category = Category.get_default(request.user).details()
# If we don't have a nav_category (user / about page) it should be the default for this user.
_nav_category = nav_category or default_category
if _nav_category['name'] != default_category['name']:
url += "?nav=%s" % _nav_category['name']
return url
#TODO probably change TemplateComment to TemplateTile instead.
# This is weird - we pass tile to the template too, but tile.comment != comment now.
comment = TemplateComment(tile.comment, request_context=context)
float_sticker = False
sticky_text = getattr(tile, 'text', None)
return render_jinja_to_string(template, locals())
@register.context_tag
def render_jinja(context, jinja_template_name):
return render_jinja_to_string(jinja_template_name, context)
@jinja_context_tag
def disposition_tile(context, tile_renderer, tile, render_options={}, nav_category={}):
from apps.monster.jinja_tags import monster_image_tile
tile_renderer = {
'monster_image_tiles' : monster_image_tile,
'explore_tiles' : explore_tile,
}[tile_renderer]
render_options['image_type'] = 'explore_column'
return tile_renderer(context, tile, render_options, nav_category)
@register.context_tag
def explore_tiles(context, tiles, render_options, nav_category={}):
return Markup(mark_safe(u''.join(explore_tile(context, tile) for tile in tiles)))
@jinja_context_tag
def explore_tile(context, tile, render_options={}, nav_category={}):
comment = TemplateComment(tile.comment, request_context=context)
sticky_text = getattr(tile, 'text', None)
viewer_sticker = tile.viewer_sticker
viewer = context['request'].user
remixer_count = comment.thread.author_count
return Markup(render_jinja_to_string('/comment/explore_tile.html', locals()))
@jinja_context_tag
def jinja_thread_reply(context, comment_details, | |
-5): (0, 1),
(8, 31, 3, -4): (0, 1),
(8, 31, 3, -3): (0, 1),
(8, 31, 3, -2): (0, 0),
(8, 31, 3, -1): (-1, -1),
(8, 31, 3, 0): (0, 1),
(8, 31, 3, 1): (0, 1),
(8, 31, 3, 2): (0, 1),
(8, 31, 3, 3): (0, 1),
(8, 31, 3, 4): (0, 1),
(8, 31, 3, 5): (0, 1),
(8, 31, 4, -5): (0, 1),
(8, 31, 4, -4): (0, 1),
(8, 31, 4, -3): (0, 1),
(8, 31, 4, -2): (0, 0),
(8, 31, 4, -1): (-1, -1),
(8, 31, 4, 0): (0, 1),
(8, 31, 4, 1): (0, 1),
(8, 31, 4, 2): (0, 1),
(8, 31, 4, 3): (0, 1),
(8, 31, 4, 4): (0, 1),
(8, 31, 4, 5): (0, 1),
(8, 31, 5, -5): (0, 1),
(8, 31, 5, -4): (0, 1),
(8, 31, 5, -3): (0, 1),
(8, 31, 5, -2): (0, 0),
(8, 31, 5, -1): (-1, -1),
(8, 31, 5, 0): (0, 1),
(8, 31, 5, 1): (0, 1),
(8, 31, 5, 2): (0, 1),
(8, 31, 5, 3): (0, 1),
(8, 31, 5, 4): (0, 1),
(8, 31, 5, 5): (0, 1),
(8, 32, -5, -5): (0, 1),
(8, 32, -5, -4): (0, 1),
(8, 32, -5, -3): (0, 1),
(8, 32, -5, -2): (0, 1),
(8, 32, -5, -1): (0, 1),
(8, 32, -5, 0): (0, 1),
(8, 32, -5, 1): (0, 0),
(8, 32, -5, 2): (-1, -1),
(8, 32, -5, 3): (-1, -1),
(8, 32, -5, 4): (-1, -1),
(8, 32, -5, 5): (0, 1),
(8, 32, -4, -5): (0, 1),
(8, 32, -4, -4): (0, 1),
(8, 32, -4, -3): (-1, 1),
(8, 32, -4, -2): (-1, 1),
(8, 32, -4, -1): (0, 1),
(8, 32, -4, 0): (0, 1),
(8, 32, -4, 1): (0, 0),
(8, 32, -4, 2): (-1, -1),
(8, 32, -4, 3): (-1, -1),
(8, 32, -4, 4): (-1, -1),
(8, 32, -4, 5): (-1, 1),
(8, 32, -3, -5): (0, 1),
(8, 32, -3, -4): (0, 1),
(8, 32, -3, -3): (0, 1),
(8, 32, -3, -2): (-1, 1),
(8, 32, -3, -1): (-1, 1),
(8, 32, -3, 0): (-1, 1),
(8, 32, -3, 1): (-1, 0),
(8, 32, -3, 2): (-1, -1),
(8, 32, -3, 3): (-1, -1),
(8, 32, -3, 4): (-1, -1),
(8, 32, -3, 5): (-1, 1),
(8, 32, -2, -5): (-1, 1),
(8, 32, -2, -4): (-1, 1),
(8, 32, -2, -3): (-1, 1),
(8, 32, -2, -2): (-1, 1),
(8, 32, -2, -1): (-1, 1),
(8, 32, -2, 0): (-1, 1),
(8, 32, -2, 1): (-1, 0),
(8, 32, -2, 2): (-1, -1),
(8, 32, -2, 3): (-1, -1),
(8, 32, -2, 4): (-1, -1),
(8, 32, -2, 5): (-1, 1),
(8, 32, -1, -5): (-1, 1),
(8, 32, -1, -4): (-1, 1),
(8, 32, -1, -3): (-1, 1),
(8, 32, -1, -2): (-1, 1),
(8, 32, -1, -1): (-1, 1),
(8, 32, -1, 0): (-1, 0),
(8, 32, -1, 1): (-1, -1),
(8, 32, -1, 2): (-1, -1),
(8, 32, -1, 3): (-1, -1),
(8, 32, -1, 4): (-1, -1),
(8, 32, -1, 5): (-1, 1),
(8, 32, 0, -5): (1, 1),
(8, 32, 0, -4): (1, 1),
(8, 32, 0, -3): (1, 1),
(8, 32, 0, -2): (-1, 1),
(8, 32, 0, -1): (-1, 1),
(8, 32, 0, 0): (-1, 0),
(8, 32, 0, 1): (-1, -1),
(8, 32, 0, 2): (-1, -1),
(8, 32, 0, 3): (-1, -1),
(8, 32, 0, 4): (-1, 1),
(8, 32, 0, 5): (-1, 1),
(8, 32, 1, -5): (1, 1),
(8, 32, 1, -4): (1, 1),
(8, 32, 1, -3): (1, 0),
(8, 32, 1, -2): (-1, 1),
(8, 32, 1, -1): (-1, 1),
(8, 32, 1, 0): (-1, 1),
(8, 32, 1, 1): (-1, 0),
(8, 32, 1, 2): (-1, -1),
(8, 32, 1, 3): (-1, 1),
(8, 32, 1, 4): (-1, 1),
(8, 32, 1, 5): (-1, 1),
(8, 32, 2, -5): (0, 1),
(8, 32, 2, -4): (0, 1),
(8, 32, 2, -3): (0, 0),
(8, 32, 2, -2): (0, -1),
(8, 32, 2, -1): (0, 1),
(8, 32, 2, 0): (0, 1),
(8, 32, 2, 1): (0, 1),
(8, 32, 2, 2): (0, 1),
(8, 32, 2, 3): (0, 1),
(8, 32, 2, 4): (0, 1),
(8, 32, 2, 5): (0, 1),
(8, 32, 3, -5): (0, 1),
(8, 32, 3, -4): (0, 1),
(8, 32, 3, -3): (0, 0),
(8, 32, 3, -2): (-1, -1),
(8, 32, 3, -1): (0, 1),
(8, 32, 3, 0): (0, 1),
(8, 32, 3, 1): (0, 1),
(8, 32, 3, 2): (0, 1),
(8, 32, 3, 3): (0, 1),
(8, 32, 3, 4): (0, 1),
(8, 32, 3, 5): (0, 1),
(8, 32, 4, -5): (0, 1),
(8, 32, 4, -4): (0, 1),
(8, 32, 4, -3): (0, 0),
(8, 32, 4, -2): (-1, -1),
(8, 32, 4, -1): (0, 1),
(8, 32, 4, 0): (0, 1),
(8, 32, 4, 1): (0, 1),
(8, 32, 4, 2): (0, 1),
(8, 32, 4, 3): (0, 1),
(8, 32, 4, 4): (0, 1),
(8, 32, 4, 5): (0, 1),
(8, 32, 5, -5): (0, 1),
(8, 32, 5, -4): (0, 1),
(8, 32, 5, -3): (0, 0),
(8, 32, 5, -2): (-1, -1),
(8, 32, 5, -1): (0, 1),
(8, 32, 5, 0): (0, 1),
(8, 32, 5, 1): (0, 1),
(8, 32, 5, 2): (0, 1),
(8, 32, 5, 3): (0, 1),
(8, 32, 5, 4): (0, 1),
(8, 32, 5, 5): (0, 1),
(8, 33, -5, -5): (0, 1),
(8, 33, -5, -4): (0, 1),
(8, 33, -5, -3): (0, 1),
(8, 33, -5, -2): (0, 1),
(8, 33, -5, -1): (0, 1),
(8, 33, -5, 0): (0, 1),
(8, 33, -5, 1): (0, 0),
(8, 33, -5, 2): (-1, -1),
(8, 33, -5, 3): (-1, -1),
(8, 33, -5, 4): (0, 1),
(8, 33, -5, 5): (0, 1),
(8, 33, -4, -5): (0, 1),
(8, 33, -4, -4): (-1, 1),
(8, 33, -4, -3): (-1, 1),
(8, 33, -4, -2): (0, 1),
(8, 33, -4, -1): (0, 1),
(8, 33, -4, 0): (0, 1),
(8, 33, -4, 1): (0, 0),
(8, 33, -4, 2): (-1, -1),
(8, 33, -4, 3): (-1, -1),
(8, 33, -4, 4): (-1, 1),
(8, 33, -4, 5): (-1, 1),
(8, 33, -3, -5): (0, 1),
(8, 33, -3, -4): (0, 1),
(8, 33, -3, -3): (-1, 1),
(8, 33, -3, -2): (0, 1),
(8, 33, -3, -1): (-1, 1),
(8, 33, -3, 0): (-1, 1),
(8, 33, -3, 1): (-1, 0),
(8, 33, -3, 2): (-1, -1),
(8, 33, -3, 3): (-1, -1),
(8, 33, -3, 4): (-1, 1),
(8, 33, -3, 5): (-1, 1),
(8, 33, -2, -5): (-1, 1),
(8, 33, -2, -4): (-1, 1),
(8, 33, -2, -3): (-1, 1),
(8, 33, -2, -2): (-1, 1),
(8, 33, -2, -1): (-1, 1),
(8, 33, -2, 0): (-1, 0),
(8, 33, -2, 1): (-1, -1),
(8, 33, -2, 2): (-1, -1),
(8, 33, -2, 3): (-1, -1),
(8, 33, -2, 4): (-1, 1),
(8, 33, -2, 5): (-1, 1),
(8, 33, -1, -5): (-1, 1),
(8, 33, -1, -4): (-1, 1),
(8, 33, -1, -3): (-1, 1),
(8, 33, -1, -2): (-1, 1),
(8, 33, -1, -1): (-1, 1),
(8, 33, -1, 0): (-1, 0),
(8, 33, -1, 1): (-1, -1),
(8, 33, -1, 2): (-1, -1),
(8, 33, -1, 3): (-1, -1),
(8, 33, -1, 4): (-1, 1),
(8, 33, -1, 5): (-1, 1),
(8, 33, 0, -5): (1, 1),
(8, 33, 0, -4): (1, 1),
(8, 33, 0, -3): (1, 1),
(8, 33, 0, -2): (-1, 1),
(8, 33, 0, -1): (-1, | |
self._parameters
elif isinstance(result, TrajDataFrame) and not result._is_trajdataframe():
result.__class__ = pd.DataFrame
return result
def settings_from(self, trajdataframe):
"""
Copy the attributes from another TrajDataFrame.
Parameters
----------
trajdataframe : TrajDataFrame
the TrajDataFrame from which to copy the attributes.
Examples
--------
>>> import skmob
>>> import pandas as pd
>>> # read the trajectory data (GeoLife, Beijing, China)
>>> url = 'https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/geolife_sample.txt.gz'
>>> df = pd.read_csv(url, sep=',', compression='gzip')
>>> tdf1 = skmob.TrajDataFrame(df, latitude='lat', longitude='lon', user_id='user', datetime='datetime')
>>> tdf1 = skmob.TrajDataFrame(df, latitude='lat', longitude='lon', user_id='user', datetime='datetime')
>>> print(tdf1.parameters)
{}
>>> tdf2.parameters['hasProperty'] = True
>>> print(tdf2.parameters)
{'hasProperty': True}
>>> tdf1.settings_from(tdf2)
>>> print(tdf1.parameters)
{'hasProperty': True}
"""
for k in trajdataframe.metadata:
value = getattr(trajdataframe, k)
setattr(self, k, value)
@classmethod
def from_file(cls, filename, latitude=constants.LATITUDE, longitude=constants.LONGITUDE, datetime=constants.DATETIME,
user_id=constants.UID, trajectory_id=constants.TID, encoding=None,
usecols=None, header='infer', timestamp=False, crs={"init": "epsg:4326"}, sep=",", parameters=None):
df = pd.read_csv(filename, sep=sep, header=header, usecols=usecols, encoding=encoding)
if parameters is None:
# Init prop dictionary
parameters = {'from_file': filename}
return cls(df, latitude=latitude, longitude=longitude, datetime=datetime, user_id=user_id,
trajectory_id=trajectory_id, parameters=parameters, crs=crs, timestamp=timestamp)
@property
def lat(self):
if constants.LATITUDE not in self:
raise AttributeError("The TrajDataFrame does not contain the column '%s.'" % constants.LATITUDE)
return self[constants.LATITUDE]
@property
def lng(self):
if constants.LONGITUDE not in self:
raise AttributeError("The TrajDataFrame does not contain the column '%s.'"%constants.LONGITUDE)
return self[constants.LONGITUDE]
@property
def datetime(self):
if constants.DATETIME not in self:
raise AttributeError("The TrajDataFrame does not contain the column '%s.'"%constants.DATETIME)
return self[constants.DATETIME]
@property
def _constructor(self):
return TrajDataFrame
@property
def _constructor_sliced(self):
return TrajSeries
@property
def _constructor_expanddim(self):
return TrajDataFrame
@property
def metadata(self):
md = ['crs', 'parameters'] # Add here all the metadata that are accessible from the object
return md
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self """
# merge operation: using metadata of the left object
if method == 'merge':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.left, name, None))
# concat operation: using metadata of the first object
elif method == 'concat':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.objs[0], name, None))
else:
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def set_parameter(self, key, param):
self._parameters[key] = param
@property
def crs(self):
return self._crs
@crs.setter
def crs(self, crs):
self._crs = crs
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, parameters):
self._parameters = dict(parameters)
def __operate_on(self):
"""
Check which optional fields are present and return a list of them plus mandatory fields to which apply
built-in pandas functions such as sort_values or groupby.
:return: list
"""
cols = []
if constants.UID in self:
cols.append(constants.UID)
if constants.TID in self:
cols.append(constants.TID)
cols.append(constants.DATETIME)
return cols
# Sorting
def sort_by_uid_and_datetime(self):
if constants.UID in self.columns:
return self.sort_values(by=[constants.UID, constants.DATETIME], ascending=[True, True])
else:
return self.sort_values(by=[constants.DATETIME], ascending=[True])
# Plot methods
def plot_trajectory(self, map_f=None, max_users=10, max_points=1000, style_function=plot.traj_style_function,
tiles='cartodbpositron', zoom=12, hex_color=-1, weight=2, opacity=0.75, start_end_markers=True):
"""
Plot the trajectories on a Folium map.
Parameters
----------
map_f : folium.Map, optional
a `folium.Map` object where the trajectory will be plotted. If `None`, a new map will be created. The default is `None`.
max_users : int, optional
maximum number of users whose trajectories should be plotted. The default is `10`.
max_points : int, optional
maximum number of points per individual to plot. The default is `1000`. If necessary, an individual's trajectory will be down-sampled to have at most `max_points` points.
style_function : lambda function, optional
function specifying the style (weight, color, opacity) of the GeoJson object. The default is `plot.traj_style_function`.
tiles : str, optional
folium's `tiles` parameter. The default is 'cartodbpositron'.
zoom : int, optional
the initial zoom on the map. The default is `12`.
hex_color : str or int, optional
hex color of the trajectory line. If `-1` a random color will be generated for each trajectory. The default is `-1`.
weight : float, optional
thickness of the trajectory line. The default is `2`.
opacity : float, optional
opacity (alpha level) of the trajectory line. The default is `0.75`.
start_end_markers: boolean, optional
if `True`, add markers on the start and end points of the trajectory. The default is `True`.
Returns
-------
folium.Map
a `folium.Map` object with the plotted trajectories.
Examples
--------
>>> import skmob
>>> import pandas as pd
>>> # read the trajectory data (GeoLife, Beijing, China)
>>> url = 'https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/geolife_sample.txt.gz'
>>> df = pd.read_csv(url, sep=',', compression='gzip')
>>> tdf = skmob.TrajDataFrame(df, latitude='lat', longitude='lon', user_id='user', datetime='datetime')
>>> print(tdf.head())
lat lng datetime uid
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984198 116.319322 2008-10-23 05:53:06 1
2 39.984224 116.319402 2008-10-23 05:53:11 1
3 39.984211 116.319389 2008-10-23 05:53:16 1
4 39.984217 116.319422 2008-10-23 05:53:21 1
>>> m = tdf.plot_trajectory(zoom=12, weight=3, opacity=0.9, tiles='Stamen Toner')
>>> m
.. image:: https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/examples/plot_trajectory_example.png
"""
return plot.plot_trajectory(self, map_f=map_f, max_users=max_users, max_points=max_points,
style_function=style_function, tiles=tiles, zoom=zoom, hex_color=hex_color,
weight=weight, opacity=opacity, start_end_markers=start_end_markers)
def plot_stops(self, map_f=None, max_users=10, tiles='cartodbpositron', zoom=12, hex_color=-1, opacity=0.3,
radius=12, popup=True):
"""
Plot the stops in the TrajDataFrame on a Folium map. This function requires a TrajDataFrame with stops or clusters, output of `preprocessing.detection.stops` or `preprocessing.clustering.cluster` functions. The column `constants.LEAVING_DATETIME` must be present.
Parameters
----------
map_f : folium.Map
a `folium.Map` object where the trajectory will be plotted. If `None`, a new map will be created. The default is `None`.
max_users : int, optional
maximum number of users whose trajectories should be plotted. The default is `10`.
tiles : str, optional
folium's `tiles` parameter. The default is 'cartodbpositron'.
zoom : int, optional
the initial zoom on the map. The default is `12`.
hex_color : str or int, optional
hex color of the trajectory line. If `-1` a random color will be generated for each trajectory. The default is `-1`.
opacity : float, optional
opacity (alpha level) of the trajectory line. The default is `0.75`.
radius : float, optional
size of the markers. The defeault is `12`.
popup : boolean, optional
if `True`, when clicking on a marker a popup window displaying information on the stop will appear. The default is `True`.
Returns
-------
folium.Map
a `folium.Map` object with the plotted stops.
Examples
--------
>>> import skmob
>>> from skmob.preprocessing import detection
>>> import pandas as pd
>>> # read the trajectory data (GeoLife, Beijing, China)
>>> url = 'https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/geolife_sample.txt.gz'
>>> df = pd.read_csv(url, sep=',', compression='gzip')
>>> tdf = skmob.TrajDataFrame(df, latitude='lat', longitude='lon', user_id='user', datetime='datetime')
>>> print(tdf.head())
lat lng datetime uid
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984198 116.319322 2008-10-23 05:53:06 1
2 39.984224 116.319402 2008-10-23 05:53:11 1
3 39.984211 116.319389 2008-10-23 05:53:16 1
4 39.984217 116.319422 2008-10-23 05:53:21 1
>>> stdf = detection.stops(tdf, stop_radius_factor=0.5, minutes_for_a_stop=20.0, spatial_radius_km=0.2, leaving_time=True)
>>> print(stdf.head())
lat lng datetime uid leaving_datetime
0 39.978030 116.327481 2008-10-23 06:01:37 1 2008-10-23 10:32:53
1 40.013820 116.306532 2008-10-23 11:10:19 1 2008-10-23 23:45:27
2 39.978419 116.326870 2008-10-24 00:21:52 1 2008-10-24 01:47:30
3 39.981166 116.308475 2008-10-24 02:02:31 1 2008-10-24 02:30:29
4 39.981431 116.309902 2008-10-24 02:30:29 1 2008-10-24 03:16:35
>>> map_f = tdf.plot_trajectory(max_points=1000, hex_color=-1, start_end_markers=False)
>>> stdf.plot_stops(map_f=map_f, hex_color=-1)
.. image:: https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/examples/plot_stops_example.png
"""
return plot.plot_stops(self, map_f=map_f, max_users=max_users, tiles=tiles, zoom=zoom,
hex_color=hex_color, opacity=opacity, radius=radius, popup=popup)
def plot_diary(self, user, start_datetime=None, end_datetime=None, ax=None, legend=False):
"""
Plot a mobility diary of an individual in a TrajDataFrame. It requires a TrajDataFrame with clusters, output of `preprocessing.clustering.cluster`. The column `constants.CLUSTER` must be present.
Parameters
----------
user : str or int
user identifier whose diary should be plotted.
start_datetime : datetime.datetime, optional
only stops made after this date will be plotted. If `None` the datetime of the oldest stop will be selected. The default is `None`.
end_datetime : datetime.datetime, optional
only stops made before this date will be plotted. If `None` the datetime of the newest stop will be selected. The default is `None`.
ax : matplotlib.axes, optional
axes where the diary will be plotted. If `None` a new ax is created. The default is `None`.
legend : bool, optional
If `True`, legend with cluster IDs is shown. The default is `False`.
Returns
-------
matplotlib.axes
the `matplotlib.axes` object of the plotted diary.
Examples
--------
>>> import skmob
>>> from skmob.preprocessing import detection, clustering
>>> import pandas as pd
>>> # | |
((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 +
q_cut**2/mbkin**4)*((3780*mckin**2*muG**2)/mbkin**2 -
(23760*mckin**4*muG**2)/mbkin**4 - (39348*mckin**6*muG**2)/mbkin**6 +
(673560*mckin**8*muG**2)/mbkin**8 - (1433376*mckin**10*muG**2)/
mbkin**10 + (4650048*mckin**12*muG**2)/mbkin**12 +
(2922696*mckin**14*muG**2)/mbkin**14 - (16610832*mckin**16*muG**2)/
mbkin**16 + (7950420*mckin**18*muG**2)/mbkin**18 +
(3388176*mckin**20*muG**2)/mbkin**20 - (1472148*mckin**22*muG**2)/
mbkin**22 - (39816*mckin**24*muG**2)/mbkin**24 +
(30600*mckin**26*muG**2)/mbkin**26 + (1260*mckin**2*muG*mupi)/
mbkin**2 - (15840*mckin**4*muG*mupi)/mbkin**4 -
(41148*mckin**6*muG*mupi)/mbkin**6 + (855864*mckin**8*muG*mupi)/
mbkin**8 - (2008800*mckin**10*muG*mupi)/mbkin**10 -
(4847904*mckin**12*muG*mupi)/mbkin**12 + (5623128*mckin**14*muG*
mupi)/mbkin**14 + (7036848*mckin**16*muG*mupi)/mbkin**16 -
(5620644*mckin**18*muG*mupi)/mbkin**18 - (1584000*mckin**20*muG*
mupi)/mbkin**20 + (577764*mckin**22*muG*mupi)/mbkin**22 +
(29592*mckin**24*muG*mupi)/mbkin**24 - (6120*mckin**26*muG*mupi)/
mbkin**26 - (5040*mckin**2*muG**2*q_cut)/mbkin**4 +
(13320*mckin**4*muG**2*q_cut)/mbkin**6 - (7776*mckin**6*muG**2*q_cut)/
mbkin**8 + (1056600*mckin**8*muG**2*q_cut)/mbkin**10 -
(2260800*mckin**10*muG**2*q_cut)/mbkin**12 - (9882000*mckin**12*muG**2*
q_cut)/mbkin**14 - (20960928*mckin**14*muG**2*q_cut)/mbkin**16 -
(25217712*mckin**16*muG**2*q_cut)/mbkin**18 +
(700272*mckin**18*muG**2*q_cut)/mbkin**20 + (4524552*mckin**20*muG**2*q_cut)/
mbkin**22 - (91008*mckin**22*muG**2*q_cut)/mbkin**24 -
(124200*mckin**24*muG**2*q_cut)/mbkin**26 - (5040*mckin**2*muG*mupi*q_cut)/
mbkin**4 + (47160*mckin**4*muG*mupi*q_cut)/mbkin**6 +
(197856*mckin**6*muG*mupi*q_cut)/mbkin**8 - (2047608*mckin**8*muG*mupi*
q_cut)/mbkin**10 - (561024*mckin**10*muG*mupi*q_cut)/mbkin**12 +
(15349392*mckin**12*muG*mupi*q_cut)/mbkin**14 +
(25799328*mckin**14*muG*mupi*q_cut)/mbkin**16 +
(16085232*mckin**16*muG*mupi*q_cut)/mbkin**18 -
(815184*mckin**18*muG*mupi*q_cut)/mbkin**20 -
(1783368*mckin**20*muG*mupi*q_cut)/mbkin**22 -
(36864*mckin**22*muG*mupi*q_cut)/mbkin**24 + (24840*mckin**24*muG*mupi*
q_cut)/mbkin**26 - (5040*mckin**2*muG**2*q_cut**2)/mbkin**6 +
(40320*mckin**4*muG**2*q_cut**2)/mbkin**8 + (177840*mckin**6*muG**2*q_cut**2)/
mbkin**10 - (877248*mckin**8*muG**2*q_cut**2)/mbkin**12 -
(3926304*mckin**10*muG**2*q_cut**2)/mbkin**14 +
(735264*mckin**12*muG**2*q_cut**2)/mbkin**16 +
(4323744*mckin**14*muG**2*q_cut**2)/mbkin**18 -
(2464704*mckin**16*muG**2*q_cut**2)/mbkin**20 -
(2724912*mckin**18*muG**2*q_cut**2)/mbkin**22 +
(240480*mckin**20*muG**2*q_cut**2)/mbkin**24 +
(126000*mckin**22*muG**2*q_cut**2)/mbkin**26 +
(5040*mckin**2*muG*mupi*q_cut**2)/mbkin**6 - (30240*mckin**4*muG*mupi*
q_cut**2)/mbkin**8 - (169200*mckin**6*muG*mupi*q_cut**2)/mbkin**10 +
(720000*mckin**8*muG*mupi*q_cut**2)/mbkin**12 +
(2313504*mckin**10*muG*mupi*q_cut**2)/mbkin**14 -
(372384*mckin**12*muG*mupi*q_cut**2)/mbkin**16 -
(815904*mckin**14*muG*mupi*q_cut**2)/mbkin**18 +
(1880064*mckin**16*muG*mupi*q_cut**2)/mbkin**20 +
(886320*mckin**18*muG*mupi*q_cut**2)/mbkin**22 -
(37440*mckin**20*muG*mupi*q_cut**2)/mbkin**24 -
(25200*mckin**22*muG*mupi*q_cut**2)/mbkin**26 -
(5040*mckin**2*muG**2*q_cut**3)/mbkin**8 - (11880*mckin**4*muG**2*q_cut**3)/
mbkin**10 + (407808*mckin**6*muG**2*q_cut**3)/mbkin**12 -
(2325168*mckin**8*muG**2*q_cut**3)/mbkin**14 - (12438432*mckin**10*muG**2*
q_cut**3)/mbkin**16 - (17994816*mckin**12*muG**2*q_cut**3)/mbkin**18 -
(13595904*mckin**14*muG**2*q_cut**3)/mbkin**20 -
(2472912*mckin**16*muG**2*q_cut**3)/mbkin**22 +
(702288*mckin**18*muG**2*q_cut**3)/mbkin**24 +
(124200*mckin**20*muG**2*q_cut**3)/mbkin**26 +
(5040*mckin**2*muG*mupi*q_cut**3)/mbkin**8 - (39960*mckin**4*muG*mupi*
q_cut**3)/mbkin**10 - (266976*mckin**6*muG*mupi*q_cut**3)/mbkin**12 +
(1733904*mckin**8*muG*mupi*q_cut**3)/mbkin**14 +
(6554304*mckin**10*muG*mupi*q_cut**3)/mbkin**16 +
(8608032*mckin**12*muG*mupi*q_cut**3)/mbkin**18 +
(6371424*mckin**14*muG*mupi*q_cut**3)/mbkin**20 +
(1519344*mckin**16*muG*mupi*q_cut**3)/mbkin**22 -
(74736*mckin**18*muG*mupi*q_cut**3)/mbkin**24 -
(24840*mckin**20*muG*mupi*q_cut**3)/mbkin**26 +
(32760*mckin**2*muG**2*q_cut**4)/mbkin**10 - (81000*mckin**4*muG**2*q_cut**4)/
mbkin**12 - (1250064*mckin**6*muG**2*q_cut**4)/mbkin**14 +
(1785384*mckin**8*muG**2*q_cut**4)/mbkin**16 + (13305744*mckin**10*muG**2*
q_cut**4)/mbkin**18 + (14415336*mckin**12*muG**2*q_cut**4)/mbkin**20 +
(2808000*mckin**14*muG**2*q_cut**4)/mbkin**22 -
(1709064*mckin**16*muG**2*q_cut**4)/mbkin**24 -
(307800*mckin**18*muG**2*q_cut**4)/mbkin**26 -
(12600*mckin**2*muG*mupi*q_cut**4)/mbkin**10 +
(66600*mckin**4*muG*mupi*q_cut**4)/mbkin**12 +
(499248*mckin**6*muG*mupi*q_cut**4)/mbkin**14 -
(1575720*mckin**8*muG*mupi*q_cut**4)/mbkin**16 -
(6865776*mckin**10*muG*mupi*q_cut**4)/mbkin**18 -
(6544296*mckin**12*muG*mupi*q_cut**4)/mbkin**20 -
(1386720*mckin**14*muG*mupi*q_cut**4)/mbkin**22 +
(305928*mckin**16*muG*mupi*q_cut**4)/mbkin**24 +
(61560*mckin**18*muG*mupi*q_cut**4)/mbkin**26 -
(15120*mckin**2*muG**2*q_cut**5)/mbkin**12 + (68040*mckin**4*muG**2*q_cut**5)/
mbkin**14 + (291168*mckin**6*muG**2*q_cut**5)/mbkin**16 -
(2070936*mckin**8*muG**2*q_cut**5)/mbkin**18 -
(5521680*mckin**10*muG**2*q_cut**5)/mbkin**20 -
(2596392*mckin**12*muG**2*q_cut**5)/mbkin**22 +
(455616*mckin**14*muG**2*q_cut**5)/mbkin**24 +
(113400*mckin**16*muG**2*q_cut**5)/mbkin**26 +
(5040*mckin**2*muG*mupi*q_cut**5)/mbkin**12 -
(27720*mckin**4*muG*mupi*q_cut**5)/mbkin**14 -
(120672*mckin**6*muG*mupi*q_cut**5)/mbkin**16 +
(514296*mckin**8*muG*mupi*q_cut**5)/mbkin**18 +
(1383984*mckin**10*muG*mupi*q_cut**5)/mbkin**20 +
(487656*mckin**12*muG*mupi*q_cut**5)/mbkin**22 -
(104832*mckin**14*muG*mupi*q_cut**5)/mbkin**24 -
(22680*mckin**16*muG*mupi*q_cut**5)/mbkin**26 -
(25200*mckin**2*muG**2*q_cut**6)/mbkin**14 + (68040*mckin**4*muG**2*q_cut**6)/
mbkin**16 + (1398456*mckin**6*muG**2*q_cut**6)/mbkin**18 +
(3709584*mckin**8*muG**2*q_cut**6)/mbkin**20 +
(3680928*mckin**10*muG**2*q_cut**6)/mbkin**22 +
(1273608*mckin**12*muG**2*q_cut**6)/mbkin**24 +
(113400*mckin**14*muG**2*q_cut**6)/mbkin**26 +
(5040*mckin**2*muG*mupi*q_cut**6)/mbkin**14 -
(7560*mckin**4*muG*mupi*q_cut**6)/mbkin**16 - (302616*mckin**6*muG*mupi*
q_cut**6)/mbkin**18 - (713232*mckin**8*muG*mupi*q_cut**6)/mbkin**20 -
(725472*mckin**10*muG*mupi*q_cut**6)/mbkin**22 -
(261576*mckin**12*muG*mupi*q_cut**6)/mbkin**24 -
(22680*mckin**14*muG*mupi*q_cut**6)/mbkin**26 +
(25200*mckin**2*muG**2*q_cut**7)/mbkin**16 - (94680*mckin**4*muG**2*q_cut**7)/
mbkin**18 - (1141920*mckin**6*muG**2*q_cut**7)/mbkin**20 -
(2010528*mckin**8*muG**2*q_cut**7)/mbkin**22 -
(1093680*mckin**10*muG**2*q_cut**7)/mbkin**24 -
(81000*mckin**12*muG**2*q_cut**7)/mbkin**26 - (5040*mckin**2*muG*mupi*
q_cut**7)/mbkin**16 + (8280*mckin**4*muG*mupi*q_cut**7)/mbkin**18 +
(264960*mckin**6*muG*mupi*q_cut**7)/mbkin**20 +
(486720*mckin**8*muG*mupi*q_cut**7)/mbkin**22 +
(234000*mckin**10*muG*mupi*q_cut**7)/mbkin**24 +
(16200*mckin**12*muG*mupi*q_cut**7)/mbkin**26 -
(6300*mckin**2*muG**2*q_cut**8)/mbkin**18 + (27000*mckin**4*muG**2*q_cut**8)/
mbkin**20 + (288612*mckin**6*muG**2*q_cut**8)/mbkin**22 +
(305712*mckin**8*muG**2*q_cut**8)/mbkin**24 + (10800*mckin**10*muG**2*
q_cut**8)/mbkin**26 + (1260*mckin**2*muG*mupi*q_cut**8)/mbkin**18 -
(1800*mckin**4*muG*mupi*q_cut**8)/mbkin**20 -
(67284*mckin**6*muG*mupi*q_cut**8)/mbkin**22 -
(59904*mckin**8*muG*mupi*q_cut**8)/mbkin**24 -
(2160*mckin**10*muG*mupi*q_cut**8)/mbkin**26 -
(25200*mckin**4*muG**2*q_cut**9)/mbkin**22 - (71136*mckin**6*muG**2*q_cut**9)/
mbkin**24 - (25200*mckin**8*muG**2*q_cut**9)/mbkin**26 +
(5040*mckin**4*muG*mupi*q_cut**9)/mbkin**22 +
(11232*mckin**6*muG*mupi*q_cut**9)/mbkin**24 +
(5040*mckin**8*muG*mupi*q_cut**9)/mbkin**26 +
(27000*mckin**4*muG**2*q_cut**10)/mbkin**24 + (27000*mckin**6*muG**2*
q_cut**10)/mbkin**26 - (5400*mckin**4*muG*mupi*q_cut**10)/mbkin**24 -
(5400*mckin**6*muG*mupi*q_cut**10)/mbkin**26 -
(7200*mckin**4*muG**2*q_cut**11)/mbkin**26 + (1440*mckin**4*muG*mupi*
q_cut**11)/mbkin**26 - 72*mckin**2*muG*((-1 + mckin**2/mbkin**2)**2*(
-35 + (370*mckin**2)/mbkin**2 + (1918*mckin**4)/mbkin**4 -
(20308*mckin**6)/mbkin**6 + (13266*mckin**8)/mbkin**8 +
(181504*mckin**10)/mbkin**10 + (193544*mckin**12)/mbkin**12 +
(10116*mckin**14)/mbkin**14 - (17183*mckin**16)/mbkin**16 -
(482*mckin**18)/mbkin**18 + (170*mckin**20)/mbkin**20) -
(2*(-70 + (655*mckin**2)/mbkin**2 + (2748*mckin**4)/mbkin**4 -
(28439*mckin**6)/mbkin**6 - (7792*mckin**8)/mbkin**8 +
(213186*mckin**10)/mbkin**10 + (358324*mckin**12)/mbkin**12 +
(223406*mckin**14)/mbkin**14 - (11322*mckin**16)/mbkin**16 -
(24769*mckin**18)/mbkin**18 - (512*mckin**20)/mbkin**20 +
(345*mckin**22)/mbkin**22)*q_cut)/mbkin**2 +
(4*(-35 + (210*mckin**2)/mbkin**2 + (1175*mckin**4)/mbkin**4 -
(5000*mckin**6)/mbkin**6 - (16066*mckin**8)/mbkin**8 +
(2586*mckin**10)/mbkin**10 + (5666*mckin**12)/mbkin**12 -
(13056*mckin**14)/mbkin**14 - (6155*mckin**16)/mbkin**16 +
(260*mckin**18)/mbkin**18 + (175*mckin**20)/mbkin**20)*q_cut**2)/
mbkin**4 + (2*(-70 + (555*mckin**2)/mbkin**2 + (3708*mckin**4)/
mbkin**4 - (24082*mckin**6)/mbkin**6 - (91032*mckin**8)/
mbkin**8 - (119556*mckin**10)/mbkin**10 - (88492*mckin**12)/
mbkin**12 - (21102*mckin**14)/mbkin**14 + (1038*mckin**16)/
mbkin**16 + (345*mckin**18)/mbkin**18)*q_cut**3)/mbkin**6 -
(2*(-175 + (925*mckin**2)/mbkin**2 + (6934*mckin**4)/mbkin**4 -
(21885*mckin**6)/mbkin**6 - (95358*mckin**8)/mbkin**8 -
(90893*mckin**10)/mbkin**10 - (19260*mckin**12)/mbkin**12 +
(4249*mckin**14)/mbkin**14 + (855*mckin**16)/mbkin**16)*q_cut**4)/
mbkin**8 + (2*(-70 + (385*mckin**2)/mbkin**2 + (1676*mckin**4)/
mbkin**4 - (7143*mckin**6)/mbkin**6 - (19222*mckin**8)/
mbkin**8 - (6773*mckin**10)/mbkin**10 + (1456*mckin**12)/
mbkin**12 + (315*mckin**14)/mbkin**14)*q_cut**5)/mbkin**10 +
(2*(-70 + (105*mckin**2)/mbkin**2 + (4203*mckin**4)/mbkin**4 +
(9906*mckin**6)/mbkin**6 + (10076*mckin**8)/mbkin**8 +
(3633*mckin**10)/mbkin**10 + (315*mckin**12)/mbkin**12)*q_cut**6)/
mbkin**12 - (10*(-14 + (23*mckin**2)/mbkin**2 + (736*mckin**4)/
mbkin**4 + (1352*mckin**6)/mbkin**6 + (650*mckin**8)/mbkin**8 +
(45*mckin**10)/mbkin**10)*q_cut**7)/mbkin**14 +
((-35 + (50*mckin**2)/mbkin**2 + (1869*mckin**4)/mbkin**4 +
(1664*mckin**6)/mbkin**6 + (60*mckin**8)/mbkin**8)*q_cut**8)/mbkin**
16 - (4*mckin**2*(35 + (78*mckin**2)/mbkin**2 + (35*mckin**4)/
mbkin**4)*q_cut**9)/mbkin**20 + (150*mckin**2*(mbkin**2 + mckin**2)*
q_cut**10)/mbkin**24 - (40*mckin**2*q_cut**11)/mbkin**24) -
4*(-((-1 + mckin**2/mbkin**2)**2*(-575 + (1630*mckin**2)/mbkin**2 +
(85106*mckin**4)/mbkin**4 - (282442*mckin**6)/mbkin**6 -
(1378372*mckin**8)/mbkin**8 + (1535906*mckin**10)/mbkin**10 +
(4125290*mckin**12)/mbkin**12 + (1796594*mckin**14)/mbkin**14 -
(249373*mckin**16)/mbkin**16 - (188768*mckin**18)/mbkin**18 -
(3836*mckin**20)/mbkin**20 + (2040*mckin**22)/mbkin**22)) +
(4*(-585 + (285*mckin**2)/mbkin**2 + (64813*mckin**4)/mbkin**4 -
(59927*mckin**6)/mbkin**6 - (921308*mckin**8)/mbkin**8 +
(129898*mckin**10)/mbkin**10 + (2676538*mckin**12)/mbkin**12 +
(3044602*mckin**14)/mbkin**14 + (904567*mckin**16)/mbkin**16 -
(261887*mckin**18)/mbkin**18 - (134543*mckin**20)/mbkin**20 -
(1323*mckin**22)/mbkin**22 + (2070*mckin**24)/mbkin**24)*q_cut)/
mbkin**2 - (4*(-595 - (2310*mckin**2)/mbkin**2 + (33975*mckin**4)/
mbkin**4 + (77820*mckin**6)/mbkin**6 - (95082*mckin**8)/
mbkin**8 - (7416*mckin**10)/mbkin**10 + (117360*mckin**12)/
mbkin**12 - (247224*mckin**14)/mbkin**14 - (273243*mckin**16)/
mbkin**16 - (64490*mckin**18)/mbkin**18 + (5505*mckin**20)/
mbkin**20 + (2100*mckin**22)/mbkin**22)*q_cut**2)/mbkin**4 -
(4*(-585 - (2055*mckin**2)/mbkin**2 + (59033*mckin**4)/mbkin**4 +
(170055*mckin**6)/mbkin**6 - (69365*mckin**8)/mbkin**8 -
(373829*mckin**10)/mbkin**10 - (644871*mckin**12)/mbkin**12 -
(469615*mckin**14)/mbkin**14 - (106890*mckin**16)/mbkin**16 +
(4692*mckin**18)/mbkin**18 + (2070*mckin**20)/mbkin**20)*q_cut**3)/
mbkin**6 + (2*(-2895 - (21000*mckin**2)/mbkin**2 +
(119453*mckin**4)/mbkin**4 + (428252*mckin**6)/mbkin**6 -
(312001*mckin**8)/mbkin**8 - (1414660*mckin**10)/mbkin**10 -
(1024579*mckin**12)/mbkin**12 - (162412*mckin**14)/mbkin**14 +
(50862*mckin**16)/mbkin**16 + (10260*mckin**18)/mbkin**18)*q_cut**4)/
mbkin**8 - (4*(-525 - (4515*mckin**2)/mbkin**2 + (17451*mckin**4)/
mbkin**4 + (35671*mckin**6)/mbkin**6 - (71799*mckin**8)/
mbkin**8 - (98271*mckin**10)/mbkin**10 - (16201*mckin**12)/
mbkin**12 + (10059*mckin**14)/mbkin**14 + (1890*mckin**16)/
mbkin**16)*q_cut**5)/mbkin**10 - (4*(-525 - (7560*mckin**2)/
mbkin**2 - (15337*mckin**4)/mbkin**4 + (10568*mckin**6)/
mbkin**6 + (44285*mckin**8)/mbkin**8 + (50502*mckin**10)/
mbkin**10 + (20097*mckin**12)/mbkin**12 + (1890*mckin**14)/
mbkin**14)*q_cut**6)/mbkin**12 + (20*(-69 - (1443*mckin**2)/
mbkin**2 - (1933*mckin**4)/mbkin**4 + (4085*mckin**6)/mbkin**6 +
(7964*mckin**8)/mbkin**8 + (3702*mckin**10)/mbkin**10 +
(270*mckin**12)/mbkin**12)*q_cut**7)/mbkin**14 +
((135 + (7260*mckin**2)/mbkin**2 + (2877*mckin**4)/mbkin**4 -
(24560*mckin**6)/mbkin**6 - (18312*mckin**8)/mbkin**8 -
(720*mckin**10)/mbkin**10)*q_cut**8)/mbkin**16 +
(16*(-35 - (70*mckin**2)/mbkin**2 + (29*mckin**4)/mbkin**4 +
(181*mckin**6)/mbkin**6 + (105*mckin**8)/mbkin**8)*q_cut**9)/mbkin**
18 - (120*(-5 - (5*mckin**2)/mbkin**2 + (13*mckin**4)/mbkin**4 +
(15*mckin**6)/mbkin**6)*q_cut**10)/mbkin**20 -
(160*(mbkin**4 - 3*mckin**4)*q_cut**11)/mbkin**26)*rG +
24*mbkin*(-((-1 + mckin**2/mbkin**2)**2*(65 - (810*mckin**2)/
mbkin**2 - (516*mckin**4)/mbkin**4 + (110224*mckin**6)/
mbkin**6 - (410120*mckin**8)/mbkin**8 - (2939462*mckin**10)/
mbkin**10 - (3104774*mckin**12)/mbkin**12 - (475058*mckin**14)/
mbkin**14 + (163447*mckin**16)/mbkin**16 - (52384*mckin**18)/
mbkin**18 - (4742*mckin**20)/mbkin**20 + (850*mckin**22)/
mbkin**22)) + (2*(135 - (1420*mckin**2)/mbkin**2 -
(3196*mckin**4)/mbkin**4 + (162656*mckin**6)/mbkin**6 -
(138305*mckin**8)/mbkin**8 - (3423984*mckin**10)/mbkin**10 -
(6202792*mckin**12)/mbkin**12 - (3867608*mckin**14)/mbkin**14 -
(95475*mckin**16)/mbkin**16 + (226796*mckin**18)/mbkin**18 -
(77740*mckin**20)/mbkin**20 - (7352*mckin**22)/mbkin**22 +
(1725*mckin**24)/mbkin**24)*q_cut)/mbkin**2 -
(4*(70 - (455*mckin**2)/mbkin**2 - (3470*mckin**4)/mbkin**4 +
(37665*mckin**6)/mbkin**6 + (138922*mckin**8)/mbkin**8 +
(83452*mckin**10)/mbkin**10 + (121324*mckin**12)/mbkin**12 +
(190868*mckin**14)/mbkin**14 + (16424*mckin**16)/mbkin**16 -
(25445*mckin**18)/mbkin**18 - (790*mckin**20)/mbkin**20 +
(875*mckin**22)/mbkin**22)*q_cut**2)/mbkin**4 -
(2*(135 - (880*mckin**2)/mbkin**2 - (7621*mckin**4)/mbkin**4 +
(133182*mckin**6)/mbkin**6 + (543892*mckin**8)/mbkin**8 +
(658370*mckin**10)/mbkin**10 + (525528*mckin**12)/mbkin**12 +
(55122*mckin**14)/mbkin**14 - (76843*mckin**16)/mbkin**16 -
(2082*mckin**18)/mbkin**18 + (1725*mckin**20)/mbkin**20)*q_cut**3)/
mbkin**6 + (2*(330 - (1015*mckin**2)/mbkin**2 - (21973*mckin**4)/
mbkin**4 + (116514*mckin**6)/mbkin**6 + (621711*mckin**8)/
mbkin**8 + (591448*mckin**10)/mbkin**10 - (14853*mckin**12)/
mbkin**12 - (96558*mckin**14)/mbkin**14 + (6769*mckin**16)/
mbkin**16 + (4275*mckin**18)/mbkin**18)*q_cut**4)/mbkin**8 -
(2*(105 - (280*mckin**2)/mbkin**2 - (8822*mckin**4)/mbkin**4 +
(25052*mckin**6)/mbkin**6 + (79356*mckin**8)/mbkin**8 -
(7796*mckin**10)/mbkin**10 - (25990*mckin**12)/mbkin**12 +
(3136*mckin**14)/mbkin**14 + (1575*mckin**16)/mbkin**16)*q_cut**5)/
mbkin**10 - (2*(105 + (455*mckin**2)/mbkin**2 - (8521*mckin**4)/
mbkin**4 - (25699*mckin**6)/mbkin**6 - (20961*mckin**8)/
mbkin**8 + (4285*mckin**10)/mbkin**10 + (11193*mckin**12)/
mbkin**12 + (1575*mckin**14)/mbkin**14)*q_cut**6)/mbkin**12 +
(10*(3 + (88*mckin**2)/mbkin**2 - (1667*mckin**4)/mbkin**4 -
(3298*mckin**6)/mbkin**6 + (671*mckin**8)/mbkin**8 +
(2090*mckin**10)/mbkin**10 + (225*mckin**12)/mbkin**12)*q_cut**7)/
mbkin**14 + ((45 - (310*mckin**2)/mbkin**2 + (4303*mckin**4)/
mbkin**4 + (694*mckin**6)/mbkin**6 - (5204*mckin**8)/mbkin**8 -
(300*mckin**10)/mbkin**10)*q_cut**8)/mbkin**16 +
(4*(35 + (70*mckin**2)/mbkin**2 + (94*mckin**4)/mbkin**4 +
(218*mckin**6)/mbkin**6 + (175*mckin**8)/mbkin**8)*q_cut**9)/mbkin**
18 - (30*(5 + (5*mckin**2)/mbkin**2 + (17*mckin**4)/mbkin**4 +
(25*mckin**6)/mbkin**6)*q_cut**10)/mbkin**20 +
(40*(mbkin**4 + 5*mckin**4)*q_cut**11)/mbkin**26)*rhoD + 1260*sB +
(9240*mckin**2*sB)/mbkin**2 - (444348*mckin**4*sB)/mbkin**4 +
(379944*mckin**6*sB)/mbkin**6 + (15149640*mckin**8*sB)/mbkin**8 +
(2327616*mckin**10*sB)/mbkin**10 - (26470152*mckin**12*sB)/
mbkin**12 - (6409872*mckin**14*sB)/mbkin**14 +
(7311276*mckin**16*sB)/mbkin**16 + (7117560*mckin**18*sB)/mbkin**18 +
(2594244*mckin**20*sB)/mbkin**20 - (1478808*mckin**22*sB)/mbkin**22 -
(108000*mckin**24*sB)/mbkin**24 + (20400*mckin**26*sB)/mbkin**26 -
(5040*q_cut*sB)/mbkin**2 - (62160*mckin**2*q_cut*sB)/mbkin**4 +
(1228416*mckin**4*q_cut*sB)/mbkin**6 + (4170192*mckin**6*q_cut*sB)/
mbkin**8 - (27720144*mckin**8*q_cut*sB)/mbkin**10 -
(98929248*mckin**10*q_cut*sB)/mbkin**12 - (111450432*mckin**12*q_cut*sB)/
mbkin**14 - (55878048*mckin**14*q_cut*sB)/mbkin**16 -
(16085904*mckin**16*q_cut*sB)/mbkin**18 + (3898032*mckin**18*q_cut*sB)/
mbkin**20 + (4635456*mckin**20*q_cut*sB)/mbkin**22 +
(171600*mckin**22*q_cut*sB)/mbkin**24 - (82800*mckin**24*q_cut*sB)/
mbkin**26 + (5040*q_cut**2*sB)/mbkin**4 + (87360*mckin**2*q_cut**2*sB)/
| |
<reponame>onesk/aoc2017
data = """
0 <-> 1543
1 <-> 66, 1682
2 <-> 1525
3 <-> 958
4 <-> 593, 1542
5 <-> 484
6 <-> 297
7 <-> 372, 743, 1965
8 <-> 934
9 <-> 1224, 1489
10 <-> 10, 129, 147, 1394
11 <-> 1244
12 <-> 12, 994, 1954
13 <-> 1027
14 <-> 875, 1211
15 <-> 405
16 <-> 1437, 1476
17 <-> 996
18 <-> 216, 777
19 <-> 404, 1524, 1539, 1941
20 <-> 1365
21 <-> 21, 460, 1431, 1624
22 <-> 313, 530
23 <-> 942, 1125
24 <-> 180, 338
25 <-> 771, 1547, 1561
26 <-> 463, 1012, 1276, 1760
27 <-> 738
28 <-> 311, 1023, 1461, 1739, 1853, 1900
29 <-> 1075
30 <-> 355, 467, 1605
31 <-> 1137, 1740
32 <-> 1125, 1756
33 <-> 658, 1149, 1790
34 <-> 344, 899
35 <-> 337, 716, 1416, 1780
36 <-> 1641
37 <-> 812
38 <-> 86, 1195
39 <-> 1967
40 <-> 1125, 1550
41 <-> 1881
42 <-> 151
43 <-> 222
44 <-> 73, 1075
45 <-> 1865
46 <-> 634, 837, 1143
47 <-> 647
48 <-> 695, 901
49 <-> 49
50 <-> 1598, 1619
51 <-> 125, 550, 1161
52 <-> 241, 387, 1951
53 <-> 286, 1414
54 <-> 1231, 1926
55 <-> 437, 1292, 1919
56 <-> 1108, 1597
57 <-> 1073, 1205, 1565
58 <-> 555
59 <-> 59, 1410
60 <-> 1735
61 <-> 61
62 <-> 149, 525, 1341
63 <-> 356, 1251
64 <-> 146, 1187
65 <-> 1582
66 <-> 1
67 <-> 126, 303, 1942
68 <-> 1135, 1742
69 <-> 1904
70 <-> 70, 371
71 <-> 513
72 <-> 1877
73 <-> 44
74 <-> 1617
75 <-> 1495
76 <-> 326, 867
77 <-> 420, 961, 1425
78 <-> 1517
79 <-> 1233
80 <-> 415, 1075, 1354
81 <-> 958, 1089
82 <-> 1677
83 <-> 403
84 <-> 85, 1877
85 <-> 84, 260
86 <-> 38, 191, 442
87 <-> 969, 1993
88 <-> 610, 1507
89 <-> 151, 758, 1081, 1521, 1596
90 <-> 474, 1958
91 <-> 950, 1554, 1949
92 <-> 117, 169
93 <-> 1899
94 <-> 819
95 <-> 1886
96 <-> 360, 1626
97 <-> 916, 1146
98 <-> 675, 1502, 1566
99 <-> 384
100 <-> 1905
101 <-> 403, 1387, 1717
102 <-> 196, 1281
103 <-> 947
104 <-> 104
105 <-> 975
106 <-> 303, 546, 750
107 <-> 380, 1929
108 <-> 108
109 <-> 999, 1059
110 <-> 1617
111 <-> 340
112 <-> 112
113 <-> 1063, 1281, 1758
114 <-> 114
115 <-> 631, 1067, 1904
116 <-> 1036
117 <-> 92, 318
118 <-> 1269
119 <-> 898
120 <-> 1052
121 <-> 121
122 <-> 1046
123 <-> 1127
124 <-> 717, 1018
125 <-> 51, 535, 639, 709
126 <-> 67, 557, 1060, 1098, 1640
127 <-> 1286
128 <-> 128
129 <-> 10, 1467
130 <-> 492, 1961
131 <-> 1103, 1433
132 <-> 1191
133 <-> 309, 1049
134 <-> 1361, 1390
135 <-> 683, 1575
136 <-> 702, 1188
137 <-> 290, 302, 1527
138 <-> 656, 975, 1279
139 <-> 882, 1537, 1542, 1930
140 <-> 1200
141 <-> 562, 680, 1865
142 <-> 1233
143 <-> 653, 1076
144 <-> 1334
145 <-> 145, 1797
146 <-> 64
147 <-> 10, 863, 1090
148 <-> 1138
149 <-> 62, 463
150 <-> 1223, 1228, 1239
151 <-> 42, 89, 404, 694
152 <-> 1389, 1672
153 <-> 1596
154 <-> 1193
155 <-> 980
156 <-> 164
157 <-> 157, 1270
158 <-> 272, 1461
159 <-> 777, 1153
160 <-> 595, 1209
161 <-> 365, 686
162 <-> 516, 987
163 <-> 546, 1004, 1056
164 <-> 156, 164
165 <-> 165, 1696
166 <-> 310, 370, 397
167 <-> 1446
168 <-> 1900
169 <-> 92, 1680
170 <-> 240, 619, 1088, 1509
171 <-> 186, 1610
172 <-> 1622, 1698
173 <-> 447, 542, 912
174 <-> 669, 1687
175 <-> 689
176 <-> 176, 1816
177 <-> 518, 1422, 1493
178 <-> 479, 731, 1615, 1718
179 <-> 483
180 <-> 24, 180
181 <-> 329, 1941
182 <-> 405, 1175, 1685
183 <-> 291, 466, 558, 891
184 <-> 527
185 <-> 185, 868, 1136
186 <-> 171, 867
187 <-> 1622, 1634
188 <-> 211
189 <-> 797
190 <-> 1307, 1504
191 <-> 86, 996
192 <-> 1810
193 <-> 315
194 <-> 194, 1198
195 <-> 1401, 1581, 1904
196 <-> 102
197 <-> 539
198 <-> 1996
199 <-> 1601
200 <-> 1617, 1776
201 <-> 294, 390
202 <-> 839, 986
203 <-> 1683
204 <-> 546
205 <-> 1673, 1894
206 <-> 1825
207 <-> 207, 222
208 <-> 210, 1679
209 <-> 384, 421, 1249
210 <-> 208, 210, 1721
211 <-> 188, 211, 388, 394, 440, 1205
212 <-> 834, 1857
213 <-> 1102
214 <-> 1803
215 <-> 1033, 1831
216 <-> 18, 1039
217 <-> 1168, 1983
218 <-> 1273, 1944
219 <-> 845, 1271
220 <-> 321, 640
221 <-> 629
222 <-> 43, 207, 285, 1486, 1508
223 <-> 809, 1371
224 <-> 480
225 <-> 904, 1190, 1378
226 <-> 226
227 <-> 1044, 1294
228 <-> 793, 911
229 <-> 1450, 1940
230 <-> 822
231 <-> 321, 352
232 <-> 232
233 <-> 491, 543
234 <-> 1880
235 <-> 861, 1349
236 <-> 1738, 1977
237 <-> 590, 1246, 1805
238 <-> 238, 665
239 <-> 577, 818, 877
240 <-> 170
241 <-> 52, 241, 946, 1439, 1441
242 <-> 262, 1226, 1647, 1661
243 <-> 316, 1117, 1831
244 <-> 345
245 <-> 527, 1392, 1526
246 <-> 1335
247 <-> 1754, 1842, 1905
248 <-> 1572
249 <-> 1066, 1185, 1593
250 <-> 934, 1775, 1821
251 <-> 726, 749
252 <-> 252
253 <-> 253
254 <-> 1765
255 <-> 753, 1337
256 <-> 411, 1190
257 <-> 1664, 1731
258 <-> 846
259 <-> 450, 1062, 1975
260 <-> 85, 1097, 1620
261 <-> 662, 1667
262 <-> 242, 1285, 1641
263 <-> 263
264 <-> 1481
265 <-> 470, 1671
266 <-> 971
267 <-> 1002, 1893
268 <-> 393, 1435
269 <-> 413, 806, 1287, 1525
270 <-> 1182
271 <-> 1377
272 <-> 158, 867
273 <-> 273, 499, 568, 845, 1293
274 <-> 800
275 <-> 936
276 <-> 925
277 <-> 1295
278 <-> 1085, 1140
279 <-> 873, 883, 989
280 <-> 280
281 <-> 1211
282 <-> 445, 674, 1234
283 <-> 771
284 <-> 1874
285 <-> 222, 647, 1092
286 <-> 53, 1191, 1678
287 <-> 595, 1928
288 <-> 947
289 <-> 822, 1836, 1962
290 <-> 137, 1034, 1735
291 <-> 183
292 <-> 331, 1038
293 <-> 1025
294 <-> 201, 630, 1421
295 <-> 1083, 1366
296 <-> 701, 1187, 1618
297 <-> 6, 990
298 <-> 1093
299 <-> 299
300 <-> 846, 1990
301 <-> 1306
302 <-> 137, 1011
303 <-> 67, 106, 1779
304 <-> 1202
305 <-> 439
306 <-> 622, 1858
307 <-> 1379
308 <-> 688, 1631, 1700
309 <-> 133
310 <-> 166, 362
311 <-> 28, 933
312 <-> 881
313 <-> 22, 549, 678, 1145
314 <-> 1439
315 <-> 193, 650, 1572
316 <-> 243
317 <-> 317, 990, 1638
318 <-> 117, 499
319 <-> 1300, 1309, 1614
320 <-> 633, 1693
321 <-> 220, 231
322 <-> 977, 1722
323 <-> 730, 1372, 1996
324 <-> 755, 1184
325 <-> 1155, 1857
326 <-> 76
327 <-> 1072, 1814, 1985
328 <-> 1125, 1279
329 <-> 181
330 <-> 936, 1241
331 <-> 292, 1172
332 <-> 1859, 1882
333 <-> 940
334 <-> 468
335 <-> 697, 802
336 <-> 437
337 <-> 35, 737
338 <-> 24, 1540
339 <-> 493
340 <-> 111, 422, 525
341 <-> 1790
342 <-> 496, 1007
343 <-> 343, 1264
344 <-> 34, 344
345 <-> 244, 709
346 <-> 553, 1616
347 <-> 909
348 <-> 521, 1660
349 <-> 363, 1294
350 <-> 719, 1782, 1974
351 <-> 405, 915
352 <-> 231, 1694
353 <-> 1140
354 <-> 363, 1339
355 <-> 30
356 <-> 63, 771, 1110
357 <-> 1299, 1347
358 <-> 635
359 <-> 1541
360 <-> 96, 360, 1741
361 <-> 361
362 <-> 310
363 <-> 349, 354
364 <-> 1827
365 <-> 161, 1734
366 <-> 900
367 <-> 1139, 1545
368 <-> 535
369 <-> 1622
370 <-> 166, 1463
371 <-> 70
372 <-> 7, 452, 810, 1283
373 <-> 997, 1658
374 <-> 467, 1774
375 <-> 716, 1841
376 <-> 638, 1079, 1262
377 <-> 606
378 <-> 993
379 <-> 379, 791
380 <-> 107
381 <-> 475, 1510
382 <-> 780
383 <-> 383
384 <-> 99, 209, 1590
385 <-> 1388
386 <-> 1829
387 <-> 52, 1532, 1874
388 <-> 211, 576, 1281
389 <-> 1309
390 <-> 201, 1183
391 <-> 959, 1944
392 <-> 688, 1062, 1299
393 <-> 268
394 <-> 211, 828, 1701
395 <-> 1587
396 <-> 1082
397 <-> 166
398 <-> 1454, 1508
399 <-> 1007
400 <-> 1531
401 <-> 511, 1140
402 <-> 652, 1065
403 <-> 83, 101, 1585
404 <-> 19, 151
405 <-> 15, 182, 351
406 <-> 769
407 <-> 1275, 1578, 1752
408 <-> 1173
409 <-> 409, 432
410 <-> 487, 1334
411 <-> 256
412 <-> 412, 590
413 <-> 269
414 <-> 1371
415 <-> 80, 1832
416 <-> 939, 1644
417 <-> 562, 1510
418 <-> 536
419 <-> 473
420 <-> 77, 1059, 1535, 1863
421 <-> 209
422 <-> 340, 913, 989
423 <-> 854, 951
424 <-> 512
425 <-> 1087
426 <-> 773
427 <-> 1121, 1574
428 <-> 745
429 <-> 1669
430 <-> 1018
431 <-> 1377
432 <-> 409
433 <-> 1641, 1999
434 <-> 1605
435 <-> 1412, 1500
436 <-> 879, 1704
437 <-> 55, 336, 548, 1839, 1987
438 <-> 980, 1399
439 <-> 305, 439, 627, | |
from typing import Union
from qtpy.QtCore import Qt, QObject, Signal, QTimer
from qtpy.QtWidgets import (
QWidget,
QLabel,
QComboBox,
QSpinBox,
QDoubleSpinBox,
QLineEdit,
QPushButton
)
from superqt import QLabeledSlider
from qtpy.QtWidgets import QFormLayout, QGridLayout, QGroupBox
from abc import ABC, abstractmethod
from dataclasses import replace
from napari_live_recording.common import ROI
from enum import Enum
class Timer(QTimer):
pass
class WidgetEnum(Enum):
ComboBox = 0,
SpinBox = 1,
DoubleSpinBox = 2,
LabeledSlider = 3,
LineEdit = 4
class LocalWidget(ABC):
def __init__(self, internalWidget : QWidget, name: str, unit: str = "", orientation: str = "left") -> None:
"""Common widget constructor.
Args:
internalWidget (QWidget): widget to construct the form layout.
name (str): parameter label description.
unit (str, optional): parameter unit measure. Defaults to "".
orientation (str, optional): label orientation on the layout. Defaults to "left".
"""
self.__name = name
self.__unit = unit
labelStr = (self.__name + " (" + self.__unit + ")" if self.__unit != "" else self.__name)
self.label = QLabel(labelStr)
self.label.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.widget = internalWidget
@property
def isEnabled(self) -> bool:
"""Widget is enabled for editing (True) or not (False).
"""
return self.widget.isEnabled()
@isEnabled.setter
def isEnabled(self, enable : bool) -> None:
"""Sets widget enabled for editing (True) or not (False).
"""
self.widget.setEnabled(enable)
@abstractmethod
def changeWidgetSettings(self, newParam) -> None:
"""Common widget update parameter abstract method.
"""
pass
@property
@abstractmethod
def value(self) -> None:
"""Widget current value.
"""
pass
@value.setter
@abstractmethod
def value(self, value: Union[str, int, float]) -> None:
"""Widget value setter.
"""
pass
@property
@abstractmethod
def signals(self) -> dict[str, Signal]:
"""Common widget method to expose signals to the device.
"""
pass
class ComboBox(LocalWidget):
def __init__(self, param : list[str], name : str, unit : str = "", orientation: str = "left") -> None:
"""ComboBox widget.
Args:
param (list[str]): list of parameters added to the ComboBox.
name (str): parameter label description.
unit (str, optional): parameter unit measure. Defaults to "".
orientation (str, optional): label orientation on the layout. Defaults to "left".
"""
self.combobox = QComboBox()
self.combobox.addItems(param)
super().__init__(self.combobox, name, unit, orientation)
def changeWidgetSettings(self, newParam: list[str]) -> None:
"""ComboBox update widget parameter method. Old list of items is deleted.
Args:
newParam (list[str]): new list of parameters to add to the ComboBox.
"""
self.combobox.clear()
self.combobox.addItems(newParam)
@property
def value(self) -> tuple[str, int]:
"""Returns a tuple containing the ComboBox current text and index.
"""
return (self.combobox.currentText(), self.combobox.currentIndex())
@value.setter
def value(self, value: int) -> None:
"""Sets the ComboBox current showed value (based on elements indeces).
Args:
value (int): index of value to show on the ComboBox.
"""
self.combobox.setCurrentIndex(value)
@property
def signals(self) -> dict[str, Signal]:
"""Returns a dictionary of signals available for the ComboBox widget.
Exposed signals are:
- currentIndexChanged,
- currentTextChanged
Returns:
dict: dict of signals (key: function name, value: function objects).
"""
return {
"currentIndexChanged" : self.combobox.currentIndexChanged,
"currentTextChanged" : self.combobox.currentTextChanged
}
class SpinBox(LocalWidget):
def __init__(self, param: tuple[int, int, int], name: str, unit: str = "", orientation: str = "left") -> None:
"""SpinBox widget.
Args:
param (tuple[int, int, int]): parameters for SpinBox settings: (<minimum_value>, <maximum_value>, <starting_value>)
name (str): parameter label description.
unit (str, optional): parameter unit measure. Defaults to "".
orientation (str, optional): label orientation on the layout. Defaults to "left".
"""
self.spinbox = QSpinBox()
self.spinbox.lineEdit().setAlignment(Qt.AlignmentFlag.AlignCenter)
self.spinbox.setRange(param[0], param[1])
self.spinbox.setValue(param[2])
super().__init__(self.spinbox, name, unit, orientation)
def changeWidgetSettings(self, newParam : tuple[int, int, int]) -> None:
"""SpinBox update widget parameter method.
Args:
newParam (tuple(int, int, int)): new parameters for SpinBox settings: (<minimum_value>, <maximum_value>, <starting_value>)
"""
self.spinbox.setRange(newParam[0], newParam[1])
self.spinbox.setValue(newParam[2])
@property
def value(self) -> int:
"""Returns the SpinBox current value.
"""
return self.spinbox.value()
@value.setter
def value(self, value: int) -> None:
"""Sets the SpinBox current value to show on the widget.
Args:
value (int): value to set.
"""
self.spinbox.setValue(value)
@property
def signals(self) -> dict[str, Signal]:
"""Returns a dictionary of signals available for the SpinBox widget.
Exposed signals are:
- valueChanged,
- textChanged
Returns:
dict: dict of signals (key: function name, value: function objects).
"""
return {
"valueChanged" : self.spinbox.valueChanged,
"textChanged" : self.spinbox.textChanged
}
class DoubleSpinBox(LocalWidget):
def __init__(self, param: tuple[float, float, float], name: str, unit: str = "", orientation: str = "left") -> None:
"""DoubleSpinBox widget.
Args:
param (tuple[float, float, float]): parameters for spinbox settings: (<minimum_value>, <maximum_value>, <starting_value>)
name (str): parameter label description.
unit (str, optional): parameter unit measure. Defaults to "".
orientation (str, optional): label orientation on the layout. Defaults to "left".
"""
self.__spinbox = QDoubleSpinBox()
self.__spinbox.setRange(param[0], param[1])
self.__spinbox.setValue(param[2])
super().__init__(self.__spinbox, name, unit, orientation)
def changeWidgetSettings(self, newParam : tuple[float, float, float]) -> None:
"""DoubleSpinBox update widget parameter method.
Args:
newParam (tuple[float, float, float]): new parameters for SpinBox settings: (<minimum_value>, <maximum_value>, <starting_value>)
"""
self.__spinbox.setRange(newParam[0], newParam[1])
self.__spinbox.setValue(newParam[2])
@property
def value(self) -> float:
"""Returns the DoubleSpinBox current value.
"""
return self.__spinbox.value()
@value.setter
def value(self, value: float) -> None:
"""Sets the DoubleSpinBox current value to show on the widget.
Args:
value (float): value to set.
"""
self.__spinbox.setValue(value)
@property
def signals(self) -> dict[str, Signal]:
"""Returns a dictionary of signals available for the SpinBox widget.
Exposed signals are:
- valueChanged,
- textChanged
Returns:
dict: dict of signals (key: function name, value: function objects).
"""
return {
"valueChanged" : self.__spinbox.valueChanged,
"textChanged" : self.__spinbox.textChanged
}
class LabeledSlider(LocalWidget):
def __init__(self, param: tuple[int, int, int], name: str, unit: str = "", orientation: str = "left") -> None:
"""Slider widget.
Args:
param (tuple[int, int, int])): parameters for spinbox settings: (<minimum_value>, <maximum_value>, <starting_value>)
name (str): parameter label description.
unit (str, optional): parameter unit measure. Defaults to "".
orientation (str, optional): label orientation on the layout. Defaults to "left".
"""
self.__slider = QLabeledSlider(Qt.Horizontal)
self.__slider.setRange(param[0], param[1])
self.__slider.setValue(param[2])
super().__init__(self.__slider, name, unit, orientation)
def changeWidgetSettings(self, newParam : tuple[int, int, int]) -> None:
"""Slider update widget parameter method.
Args:
newParam (tuple[int, int, int]): new parameters for SpinBox settings: (<minimum_value>, <maximum_value>, <starting_value>)
"""
self.__slider.setRange(newParam[0], newParam[1])
self.__slider.setValue(newParam[2])
@property
def value(self) -> int:
"""Returns the Slider current value.
"""
return self.__slider.value()
@value.setter
def value(self, value: int) -> None:
"""Sets the DoubleSpinBox current value to show on the widget.
Args:
value (float): value to set.
"""
self.__slider.setValue(value)
@property
def signals(self) -> dict[str, Signal]:
"""Returns a dictionary of signals available for the SpinBox widget.
Exposed signals are:
- valueChanged
Returns:
dict: dict of signals (key: function name, value: function objects).
"""
return {
"valueChanged" : self.__slider.valueChanged
}
class LineEdit(LocalWidget):
def __init__(self, param: str, name: str, unit: str = "", orientation: str = "left") -> None:
"""LineEdit widget.
Args:
param (str): line edit contents
name (str): parameter label description.
unit (str, optional): parameter unit measure. Defaults to "".
orientation (str, optional): label orientation on the layout. Defaults to "left".
editable (bool, optional): sets the LineEdit to be editable. Defaults to False.
"""
self.__lineEdit = QLineEdit(param)
super().__init__(self.__lineEdit, name, unit, orientation)
def changeWidgetSettings(self, newParam : str) -> None:
"""Updates LineEdit text contents.
Args:
newParam (str): new string for LineEdit.
"""
self.__lineEdit.setText(newParam)
@property
def value(self) -> str:
"""Returns the LineEdit current text.
"""
return self.__lineEdit.text()
@value.setter
def value(self, value: str) -> None:
"""Sets the LineEdit current text to show on the widget.
Args:
value (str): string to set.
"""
self.__lineEdit.setText(value)
@property
def signals(self) -> dict[str, Signal]:
"""Returns a dictionary of signals available for the LineEdit widget.
Exposed signals are:
- textChanged,
- textEdited
Returns:
dict: dict of signals (key: function name, value: function objects).
"""
return {
"textChanged" : self.__lineEdit.textChanged,
"textEdited" : self.__lineEdit.textEdited
}
class CameraSelection(QObject):
newCameraRequested = Signal(str, str, str)
def __init__(self) -> None:
"""Camera selection widget. It includes the following widgets:
- a ComboBox for camera selection based on strings to identify each camera type;
- a LineEdit for camera ID or serial number input
- a QPushButton to add the camera
Widget grid layout:
|(0,0-1) ComboBox |(0,2) QPushButton|
|(1,0) LineEdit|Line Edit(1,1) |(1,2) |
The QPushButton remains disabled as long as no camera is selected (first index is highlited).
"""
super(CameraSelection, self).__init__()
self.group = QGroupBox()
self.camerasComboBox = ComboBox([], "Interface")
self.nameLineEdit = LineEdit(param="MyCamera", name="Camera name")
self.idLineEdit = LineEdit(param="0", name="Camera ID/SN", orientation="right")
self.addButton = QPushButton("Add camera")
self.addButton.setEnabled(False)
| |
from __future__ import with_statement
import optparse
import os
import logging
import platform
import signal
import sys
import threading
import time
import traceback
from w3testrunner.webapp import WebApp
from w3testrunner import teststores
from w3testrunner.browsers.browser import BrowserInfo, BrowserException
from w3testrunner.browsers.manager import browsers_manager
log = logging.getLogger(__name__)
# Keep this in sync with the statuses in
# w3testrunner/resources/testrunner/testrunner.jsrunner.py, order matters.
STATUSES = "INITIALIZING RUNNING FINISHED STOPPED ERROR"
STATUS_TO_NAME = {}
for value, name in enumerate(STATUSES.split()):
locals()[name] = value
STATUS_TO_NAME[value] = name
# from http://code.activestate.com/recipes/465057/
def synchronized(lock):
""" Synchronization decorator. """
def wrap(f):
def newFunction(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return newFunction
return wrap
runner_lock = threading.RLock()
class Runner(object):
def __init__(self, options, start_loop=True):
self.options = options
self.running = False
self.last_test_store = None
self.running_test = None
self.hang_timer = None
self.last_hung_testid = None
self.start_loop = start_loop
self.batch = False
# TODO: rename to browsers when implementing multiple browser support.
self.browser = None
self.active_browser = None
self.tests_finished_event = threading.Event()
self.end_event = threading.Event()
self.reset()
self.webapp = WebApp(self)
# Guard in an exception handler so that the webapp can shutdown if
# there's an exception raised in _post_init().
try:
self._post_init()
except:
traceback.print_exc()
def _post_init(self):
if self.options.browser:
# Batch mode is active if there's a browser to control.
self.batch = True
name = path = None
if len(os.path.split(self.options.browser)) > 1:
name = self.options.browser
else:
path = self.options.browser
browser_info = BrowserInfo(name=name, path=path)
self.browser = browsers_manager.find_browser(browser_info)
log.info("Using browser: %s", self.browser)
store_info = self._options_to_store_info(self.options)
if self.batch:
if not store_info:
raise Exception("No tests to load. You should specify options "
"for test loading such as --tests-path or "
"--username and --token")
self.test_store = self._create_store(store_info)
threading.Thread(target=self._main_loop).start()
else:
if store_info:
self.load_tests(store_info)
log.info("The runner is started. You should now point your "
"browser to http://localhost:8888/")
self.status = STOPPED
self.running = True
if not self.start_loop:
return
try:
while self.running:
time.sleep(1)
except KeyboardInterrupt:
# If not killed other thread will keep it running
if hasattr(os, "kill"):
os.kill(os.getpid(), signal.SIGKILL)
sys.exit()
log.info("End of main()")
self.end_event.set()
def _get_ua_string(self):
if self.batch:
if not self.active_browser:
return None
return self.active_browser.ua_string
return self._ua_string
def _set_ua_string(self, value):
if self.batch:
if not self.active_browser:
assert not value
return
assert self.active_browser
self.active_browser.ua_string = value
return
self._ua_string = value
ua_string = property(_get_ua_string, _set_ua_string)
def _options_to_store_info(self, options):
store_infos = []
for store_class in teststores.STORES:
store_info = store_class.options_to_store_info(self.options)
if store_info:
store_infos.append(store_info)
if len(store_infos) > 1:
raise Exception("More than one store found for the specified "
"command line options.\n"
"There may be some conflicting parameters.")
if len(store_infos) == 1:
return store_infos[0]
return None
def _create_store(self, store_info):
name = store_info["name"]
store_classes = [sc for sc in teststores.STORES if sc.name == name]
if len(store_classes) == 0:
raise Exception("Can't find a store for name %s" % name)
assert len(store_classes) == 1, "Duplicate stores?"
return store_classes[0](self, store_info)
def _main_loop(self):
log.debug("in main_loop %s", self)
try:
# TODO: remove once multiple browser support is implemented.
self.active_browser = self.browser
# The browser is launched before the tests are loaded from the
# store, so that we can get the useragent string and give it to
# the store.
self.browser.terminate()
while True:
log.info("Loading tests...")
self.set_status(INITIALIZING, "Initializing browser.")
self.active_browser.launch()
with runner_lock:
found_tests = self._do_load_tests()
self.set_status(RUNNING, "Running tests.", True)
if not found_tests:
# TODO: wait a moment and try again to find tests?
log.info("No tests found, terminating")
break
log.debug("Waiting for tests to finish...")
self.tests_finished_event.wait()
self.tests_finished_event.clear()
log.debug("...Finished waiting for end of tests")
self._do_save_tests()
if self.status == ERROR:
break
self.reset()
if self.test_store.load_once:
break
except Exception, e:
self.set_status(ERROR, "Exception in _main_loop: %s" % e)
if self.tests:
self._do_save_tests()
if self.status == ERROR:
log.error("\n\nError encountered while running tests, "
"terminating.\n Status message: %s\n\n",
self.status_message)
self.browser.cleanup()
self.running = False
self.webapp.running = False
self.end_event.set()
def _set_tests(self, tests):
self.tests = tests
self.testid_to_test = dict([(test["id"], test) for test in self.tests])
self.finished_tests_count = 0
self.status = STOPPED
def _get_metadata(self):
metadata = {}
if self.active_browser:
metadata["browser_info.platform"] = \
self.active_browser.browser_info.platform
metadata["browser_info.name"] = \
self.active_browser.browser_info.name
metadata["browser_info.path"] = \
self.active_browser.browser_info.path
metadata["ua_string"] = self.active_browser.ua_string
metadata["system"] = platform.system()
if platform.system() == "Windows":
metadata["win32_ver"] = platform.win32_ver()
elif platform.system() == "Darwin":
metadata["mac_ver"] = platform.mac_ver()
elif platform.system() == "Linux":
metadata["linux_distribution"] = platform.linux_distribution()
return metadata
def _do_load_tests(self):
tests = self.test_store.load(self._get_metadata())
self.last_test_store = self.test_store
self._set_tests(tests)
return len(tests) > 0
@synchronized(runner_lock)
def _do_save_tests(self):
self.test_store.save(self._get_metadata())
def _stop_hang_timer(self):
if not self.hang_timer:
return
self.hang_timer.cancel()
self.hang_timer = None
self.last_hung_testid = None
def _ensure_status(self, *allowed_statuses):
if self.status in allowed_statuses:
return
raise Exception("Unexpected status %r. Allowed statuses are: %r" % (
STATUS_TO_NAME[self.status],
[STATUS_TO_NAME[s] for s in allowed_statuses]))
@synchronized(runner_lock)
def reset(self):
self._stop_hang_timer()
self.status = STOPPED
self.status_message = ""
self._ua_string = None
self.testid_to_test = {}
if self.last_test_store:
self.last_test_store.cleanup()
self.last_test_store = None
self.tests = []
@synchronized(runner_lock)
def clear_results(self):
self._ensure_status(STOPPED, RUNNING, FINISHED)
for test in self.tests:
if "result" in test:
del test["result"]
@synchronized(runner_lock)
def get_state(self):
"""Return a JSONifiable object representing the Runner state."""
state = {
"status": self.status,
"status_message": self.status_message,
"ua_string": self.ua_string,
"batch": self.batch,
"timeout": self.options.timeout,
}
state["tests"] = self.tests
return state
@synchronized(runner_lock)
def load_tests(self, store_info):
log.info("Loading tests using store_info: %s", store_info)
self._ensure_status(STOPPED, FINISHED)
self.reset()
self.test_store = self._create_store(store_info)
if not self.test_store:
raise Exception("Can't find a store for store_info %s", store_info)
self._do_load_tests()
def _do_hang_timer_callback(self):
self.hang_timer = None
self._ensure_status(RUNNING)
# XXX investigate how this sometimes happen.
if not self.running_test:
log.error("No running test when hang timer fired. "
"How did that happen?")
return
log.info("Detected hang while running test %s",
self.running_test["id"])
self.last_hung_testid = self.running_test["id"]
status = "timeout"
status_message = "Timeout detected from server side"
if self.browser and not self.browser.is_alive():
log.debug("Detected browser crash")
status = "crash"
status_message = "Browser crash detected from server side"
self.set_result(self.running_test["id"], {
"status": status,
"status_message": status_message,
}, True)
self.running_test = None
if self.tests_finished_event.is_set():
return
if self.active_browser:
self.set_status(INITIALIZING, "Initializing browser.")
try:
self.active_browser.launch()
self.set_status(RUNNING, "Running after browser restart.", True)
except BrowserException, e:
self.set_status(ERROR, "Exception while restarting the "
"browser: %s" % e)
def _hang_timer_callback(self):
try:
self._do_hang_timer_callback()
except Exception, e:
self.set_status(ERROR, "Error in _hang_timer_callback: %s" % e)
@synchronized(runner_lock)
def set_status(self, status, message, allow_leaving_initializing=False):
if (not allow_leaving_initializing and
self.status == INITIALIZING and
status != INITIALIZING and
status != ERROR):
raise Exception("Not allowed to leave INITIALIZING state.")
if self.status == ERROR and status == ERROR:
log.warn("Setting ERROR state twice. The message is ignored")
return
if self.status == ERROR:
log.warn("Can't override ERROR status. reset() or load_tests() "
"should be called")
return
self.status = status
self.status_message = message
if status == ERROR:
self._stop_hang_timer()
self.tests_finished_event.set()
def _get_test(self, testid):
if not testid in self.testid_to_test:
raise Exception("Test with identifier %s not found" % testid)
return self.testid_to_test[testid]
@synchronized(runner_lock)
def test_started(self, testid):
log.info("Test %s started", testid)
self._ensure_status(RUNNING)
test = self._get_test(testid)
if "result" in test:
raise Exception("Starting a test which already has a result "
"(test id: %s, existing result: %s)" % (
testid, test["result"]))
self.running_test = test
if self.options.timeout <= 0:
return
# How much to multiply the timeout duration to get the server side
# waiting time. The intention is to have a timeout larger than
# the timout used in the client-side harness to allow it to catch
# hangs first.
SERVER_HANG_TIMER_RATIO = 1.2
self._stop_hang_timer()
self.hang_timer = threading.Timer(self.options.timeout *
SERVER_HANG_TIMER_RATIO,
self._hang_timer_callback)
self.hang_timer.start()
def _ensure_running_test(self, testid):
if not self.running_test:
raise Exception("test_started wasn't called")
if testid != self.running_test["id"]:
raise Exception("test_started was called with a different "
"test id (old: %s, new: %s)" %
(self.running_test["id"], testid))
@synchronized(runner_lock)
def suspend_timer(self, testid, suspended):
log.debug("suspend_timer testid: %s, suspended: %s", testid, suspended)
self._ensure_status(RUNNING, STOPPED)
test = self._get_test(testid)
self._ensure_running_test(testid)
if suspended:
self._stop_hang_timer()
else:
self.test_started(testid)
@synchronized(runner_lock)
def set_result(self, testid, result, did_start_notify):
log.info("Saving result for testid: %s", testid)
self._ensure_status(RUNNING, STOPPED, FINISHED)
self._stop_hang_timer()
test = self._get_test(testid)
if did_start_notify:
# The last_hung_testid instance variable and this check are
# used to ignore the rare case when a test finishes after a hang
# was detected. It could happen if the tests used an alert() which
# would freeze the client side timeout.
if (not self.running_test and self.last_hung_testid and
self.last_hung_testid == testid):
log.info("Detecting a test which completed after a timout"
"was detected on the server side, ignoring the result")
self.last_hung_testid = None
return
self._ensure_running_test(testid)
self.running_test = None
if not result:
if not "result" in test:
raise Exception("Test with id %s has no result to clear" %
testid)
del test["result"]
self.finished_tests_count -= 1
else:
if "result" in test:
raise Exception("Overwriting an existing result | |
else:
# encoder_output_for_decoder = encoder_outputs_tensor
# encoder_outputs = (encoder_output_for_decoder, ) + encoder_outputs[1:]
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
column_spans=column_spans,
copy_span=copy_span,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
column_spans=column_spans,
copy_span=copy_span,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
return output
def _generate_no_beam_search(
self,
input_ids,
column_spans,
copy_span,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
logits_history = []
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, column_spans=column_spans, copy_span=copy_span, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(next_token_logits, batch_size, 1, input_ids, repetition_penalty)
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
next_token_logits[:, eos_token_id] = -float("inf")
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
cur_len = cur_len + 1
# if there are different sentences lengths in the batch, some batches have to be padded
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths"
# finished sents are filled with pad_token
decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(pad_token_id)
else:
decoded = input_ids
for hypo_idx, hypo in enumerate(input_ids):
decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]
return decoded
def _generate_beam_search(
self,
input_ids,
column_spans,
copy_span,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, column_spans=column_spans, copy_span=copy_span, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits, batch_size, num_beams, input_ids, repetition_penalty,
)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.prepare_logits_for_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in | |
and Bboard.b1d !='P' and Bboard.b1e !='P' and Bboard.b1f !='P' and Bboard.b1g !='P' and Bboard.b1h !='P' and Bboard.b1i !='P'):
moves = 'P*1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s1h =='':
if Bboard.P>0 and (Bboard.b1b !='P' and Bboard.b1c !='P' and Bboard.b1d !='P' and Bboard.b1e !='P' and Bboard.b1f !='P' and Bboard.b1g !='P' and Bboard.b1h !='P' and Bboard.b1i !='P'):
moves = 'P*1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s1i =='':
if Bboard.P>0 and (Bboard.b1b !='P' and Bboard.b1c !='P' and Bboard.b1d !='P' and Bboard.b1e !='P' and Bboard.b1f !='P' and Bboard.b1g !='P' and Bboard.b1h !='P' and Bboard.b1i !='P'):
moves = 'P*1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2a =='':
if Bboard.S>0:
moves = 'S*2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2b =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2c =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2d =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2e =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2f =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2g =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2h =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s2i =='':
if Bboard.P>0 and (Bboard.b2b !='P' and Bboard.b2c !='P' and Bboard.b2d !='P' and Bboard.b2e !='P' and Bboard.b2f !='P' and Bboard.b2g !='P' and Bboard.b2h !='P' and Bboard.b2i !='P'):
moves = 'P*2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.N>0:
moves = 'N*2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s3a =='':
if Bboard.S>0:
moves = 'S*3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = 'G*3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.B>0:
moves = 'B*3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.R>0:
moves = 'R*3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s3b =='':
if Bboard.P>0 and (Bboard.b3b !='P' and Bboard.b3c !='P' and Bboard.b3d !='P' and Bboard.b3e !='P' and Bboard.b3f !='P' and Bboard.b3g !='P' and Bboard.b3h !='P' and Bboard.b3i !='P'):
moves = 'P*3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.L>0:
moves = 'L*3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.S>0:
moves = 'S*3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.G>0:
moves = | |
1
# Determining the overlap between the human label and the automated
# label
intersection = human_arr * bot_arr
# Determining the union between the human label and the automated
# label
union = human_arr + bot_arr
union[union == 2] = 1
# Determining how much of the human label and the automated label
# overlap with respect to time
intersection_count = np.count_nonzero(
intersection == 1) / SAMPLE_RATE
# Determining the span of the human label and the automated label
# with respect to time.
union_count = np.count_nonzero(union == 1) / SAMPLE_RATE
# Placing the Intersection over Union Percentage into it's
# respective position in the array.
IoU_Matrix[row, column] = round(
intersection_count / union_count, 4)
# Resetting the automated label to zero
bot_arr[bot_arr == 1] = 0
# Resetting the human label to zero
human_arr[human_arr == 1] = 0
return IoU_Matrix
def matrix_IoU_Scores(IoU_Matrix, manual_df, threshold):
"""
Function that takes in the IoU Matrix from the clip_IoU function and ouputs
the number of true positives and false positives, as well as calculating
the precision, recall, and f1 metrics.
Args:
IoU_Matrix (arr)
- (human label count) x (automated label count) matrix where each
row contains the IoU of each automated annotation with respect
to a human label.
manual_df (Dataframe)
- Dataframe of human labels for an audio clip.
threshold (float)
- IoU threshold for determining true positives, false
positives, and false negatives.
Returns:
Dataframe of clip statistics such as True Positive, False Negative,
False Positive, Precision, Recall, and F1 values for an audio clip.
"""
clip_class = manual_df["MANUAL ID"][0]
audio_dir = manual_df["FOLDER"][0]
filename = manual_df["IN FILE"][0]
# TODO make sure that all of these calculations are correct. It is
# confusing to me that the Precision and Recall scores have a positive
# correlation. Determining which automated label has the highest IoU across
# each human label
automated_label_best_fits = np.max(IoU_Matrix, axis=1)
# human_label_count = automated_label_best_fits.shape[0]
# Calculating the number of true positives based off of the passed in
# thresholds.
tp_count = automated_label_best_fits[automated_label_best_fits >=
threshold].shape[0]
# Calculating the number of false negatives from the number of human
# labels and true positives
fn_count = automated_label_best_fits[automated_label_best_fits <
threshold].shape[0]
# Calculating the false positives
max_val_per_column = np.max(IoU_Matrix, axis=0)
fp_count = max_val_per_column[max_val_per_column < threshold].shape[0]
# Calculating the necessary statistics
try:
recall = round(tp_count / (tp_count + fn_count), 4)
precision = round(tp_count / (tp_count + fp_count), 4)
f1 = round(2 * (recall * precision) / (recall + precision), 4)
except ZeroDivisionError:
print(
"Division by zero setting precision, recall, and f1 to zero on " +
filename)
recall = 0
precision = 0
f1 = 0
entry = {'FOLDER': audio_dir,
'IN FILE': filename,
'MANUAL ID': clip_class,
'TRUE POSITIVE': tp_count,
'FALSE NEGATIVE': fn_count,
'FALSE POSITIVE': fp_count,
'PRECISION': precision,
'RECALL': recall,
'F1': f1}
return pd.DataFrame.from_dict([entry])
def clip_catch(automated_df, manual_df):
"""
Function that determines whether or not a human label has been found across
all of the automated labels.
Args:
automated_df (Dataframe)
- Dataframe of automated labels for an audio clip.
manual_df (Dataframe)
- Dataframe of human labels for an audio clip.
Returns:
Numpy Array of statistics regarding the amount of overlap between the
manual and automated labels relative to the number of samples.
"""
# resetting the indices to make this function work
automated_df.reset_index(inplace=True, drop=True)
manual_df.reset_index(inplace=True, drop=True)
# figuring out how many automated labels and human labels exist
manual_row_count = manual_df.shape[0]
automated_row_count = automated_df.shape[0]
# finding the length of the clip as well as the sampling frequency.
duration = automated_df["CLIP LENGTH"].to_list()[0]
SAMPLE_RATE = automated_df["SAMPLE RATE"].to_list()[0]
# initializing the output array, as well as the two arrays used to
# calculate catch scores
catch_matrix = np.zeros(manual_row_count)
bot_arr = np.zeros((int(duration * SAMPLE_RATE)))
human_arr = np.zeros((int(duration * SAMPLE_RATE)))
# Determining the automated labelled regions with respect to samples
# Looping through each human label
for row in automated_df.index:
# converting each label into a "pulse" on an array that represents the
# labels as 0's and 1's on bot array.
minval = int(round(automated_df["OFFSET"][row] * SAMPLE_RATE, 0))
maxval = int(
round(
(automated_df["OFFSET"][row] +
automated_df["DURATION"][row]) *
SAMPLE_RATE,
0))
bot_arr[minval:maxval] = 1
# Looping through each human label and computing catch =
# (#intersections)/(#samples in label)
for row in manual_df.index:
# Determining the beginning of a human label
minval = int(round(manual_df["OFFSET"][row] * SAMPLE_RATE, 0))
# Determining the end of a human label
maxval = int(
round(
(manual_df["OFFSET"][row] +
manual_df["DURATION"][row]) *
SAMPLE_RATE,
0))
# Placing the label relative to the clip
human_arr[minval:maxval] = 1
# Determining the length of a label with respect to samples
samples_in_label = maxval - minval
# Finding where the human label and all of the annotated labels overlap
intersection = human_arr * bot_arr
# Determining how many samples overlap.
intersection_count = np.count_nonzero(intersection == 1)
# Intersection/length of label
catch_matrix[row] = round(intersection_count / samples_in_label, 4)
# resetting the human label
human_arr[human_arr == 1] = 0
return catch_matrix
# def dataset_IoU(automated_df,manual_df):
# """
# Function that takes in two Pandas dataframes that represent human labels
# and automated labels.
# It then runs the clip_IoU function across each clip and appends the best
# fit IoU score to each labels on the manual dataframe as its output.
#
# Args:
# automated_df (Dataframe) - Dataframe of automated labels for multiple
# audio clips.
# manual_df (Dataframe) - Dataframe of human labels for multiple audio
# clips.
#
# Returns:
# Dataframe of manual labels with the best fit IoU score as a column.
# """
# # Getting a list of clips
# clips = automated_df["IN FILE"].to_list()
# # Removing duplicates
# clips = list(dict.fromkeys(clips))
# # Initializing the ouput dataframe
# manual_df_with_IoU = pd.DataFrame()
# for clip in clips:
# print(clip)
# # Isolating a clip from the human and automated dataframes
# clip_automated_df = automated_df[automated_df["IN FILE"] == clip]
# clip_manual_df = manual_df[manual_df["IN FILE"] == clip]
# # Calculating the IoU scores of each human label.
# IoU_Matrix = clip_IoU(clip_automated_df,clip_manual_df)
# # Finding the best automated IoU score with respect to each label
# automated_label_best_fits = np.max(IoU_Matrix,axis=1)
# clip_manual_df["IoU"] = automated_label_best_fits
# # Appending on the best fit IoU score to each human label
# if manual_df_with_IoU.empty == True:
# manual_df_with_IoU = clip_manual_df
# else:
# manual_df_with_IoU = manual_df_with_IoU.append(clip_manual_df)
# # Adjusting the indices.
# manual_df_with_IoU.reset_index(inplace = True, drop = True)
# return manual_df_with_IoU
# def class_IoU_Statistics(automated_df,manual_df,threshold = 0.5):
# """
# Wrapper function that takes matrix_IoU_Scores across multiple clips from a
# class. Allows user to modify the threshold that determines whether or not
# a label is a true positive.
# Args:
# automated_df (Dataframe)
# - Dataframe of automated labels for multiple
# audio clips.
# manual_df (Dataframe)
# - Dataframe of human labels for multiple audio clips.
# threshold (float)
# - IoU threshold for determining true positives, false positives,
# and false negatives.
# Returns:
# Dataframe of IoU statistics for multiple audio clips.
# """
# # isolating the names of the clips that have been labelled into an array.
# clips = automated_df["IN FILE"].to_list()
# clips = list(dict.fromkeys(clips))
# # initializing the output Pandas dataframe
# # Looping through all of the clips
# for clip in clips:
# print(clip)
# clip_automated_df = automated_df[automated_df["IN FILE"] == clip]
# clip_manual_df = manual_df[manual_df["IN FILE"] == clip]
# # Computing the IoU Matrix across a specific clip
# IoU_Matrix = clip_IoU(clip_automated_df,clip_manual_df)
# # Calculating the best fit IoU to each label for the clip
# clip_stats_df = matrix_IoU_Scores(IoU_Matrix,clip_manual_df,threshold)
# # adding onto the output array.
# if IoU_Statistics.empty == True:
# IoU_Statistics = clip_stats_df
# else:
# IoU_Statistics = IoU_Statistics.append(clip_stats_df)
# IoU_Statistics.reset_index(inplace = True, drop = True)
# return IoU_Statistics
def global_IoU_Statistics(statistics_df):
"""
Function that takes the output of dataset_IoU Statistics and outputs a
global count of true positives and false positives, as well as computing \
the precision, recall, and f1 metrics across the dataset.
Args:
statistics_df (Dataframe)
- Dataframe of matrix IoU scores for multiple clips.
Returns:
Dataframe of global IoU statistics which include the number of true
positives, false positives, and false negatives. Contains Precision,
Recall, and F1 metrics as well
"""
data_class = statistics_df["MANUAL ID"][0]
# taking the sum of the number of true positives and false positives.
tp_sum = statistics_df["TRUE POSITIVE"].sum()
fn_sum = statistics_df["FALSE NEGATIVE"].sum()
fp_sum = statistics_df["FALSE POSITIVE"].sum()
# calculating the precision, recall, | |
return False
def verify_selected_by_id(self, element_id):
"""
Checks if element is selected.
:param element_id:
"""
is_selected = self.driver.execute_script(("return document.getElementById('%s').checked") % element_id)
return is_selected
def wait_for_enabled_by_id(self, element_id):
"""
Waits for an element to be present, visible and enabled such that you can click it.
:param element_id:
"""
print "Executing wait_for_clickable_by_id('{0}')".format(element_id)
try:
for i in range(1, 20):
element=self.driver.find_element_by_id(element_id)
if element.is_enabled():
print "Element by id = " + element_id + " is enabled "
time.sleep(1)
break
else:
print "Waiting for element by id = " + element_id + " to become enabled "
except TimeoutException:
print "Element by id = '{0}' not enabled.".format(element_id)
return False
def verify_enabled_by_css(self, css):
"""
Waits for an element to become enabled.
:param css:
"""
print "Checking if element by_css ('{0}') is enabled".format(css)
try:
for i in range(1, 30):
element = self.driver.find_element_by_css_selector(css)
if element.is_enabled():
print "Element by css = " + css + " is enabled "
time.sleep(1)
break
else:
print "Waiting for element by css = " + css + " to become enabled "
except TimeoutException:
print "Element by icss = '{0}' not enabled.".format(css)
return False
def wait_for_clickable_by_id(self, element_id):
"""
Waits for an element to be present, visible and enabled such that you can click it.
:param element_id:
"""
print "Executing wait_for_clickable_by_id('{0}')".format(element_id)
try:
self.set_implicit_wait(0)
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until(
EC.element_to_be_clickable((By.ID, element_id)))
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
print "Found clickable element by id = '{0}'".format(element_id)
except TimeoutException, tout:
print "ERROR: Did not find clickable element by id = '{0}'".format(element_id)
print "Checking whether element by id = '{0}' present in the DOM.".format(element_id)
try:
self.driver.find_element_by_css_selector(element_id)
print "Element by id = '{0}' is present in the DOM but not clickable.".format(element_id)
except NoSuchElementException:
print "ERROR: Element by id = '{0}' not found in the DOM.".format(element_id)
def wait_for_clickable_by_css(self, css):
"""
Waits for an element to be present, visible and enabled such that you can click it.
:param css:
"""
print "Executing wait_for_clickable_by_css('{0}')".format(css)
try:
self.set_implicit_wait(0)
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, css)))
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
print "Found clickable element by css = '{0}'".format(css)
except TimeoutException, tout:
print "ERROR: Did not find clickable element by css = '{0}'".format(css)
print "Checking whether element by css = '{0}' present in the DOM.".format(css)
try:
self.driver.find_element_by_css_selector(css)
print "Element by css = '{0}' is present in the DOM but not clickable.".format(css)
except NoSuchElementException:
print "ERROR: Element by css = '{0}' not found in the DOM.".format(css)
def wait_for_clickable_by_xpath(self, xpath):
"""
Waits for an element to be present, visible and enabled such that you can click it.
:param xpath:
"""
print "Executing wait_for_clickable_by_xpath('{0}')".format(xpath)
try:
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until(
EC.element_to_be_clickable((By.XPATH, xpath)))
print "Found clickable element by xpath = '{0}'".format(xpath)
except TimeoutException, tout:
print "ERROR: Did not find clickable element by xpath = '{0}'".format(xpath)
print "Checking whether element by xpath = '{0}' present in the DOM.".format(xpath)
try:
self.driver.find_element_by_xpath(xpath)
print "Element by css = '{0}' is present in the DOM but not clickable.".format(xpath)
except NoSuchElementException:
print "ERROR: Element by css = '{0}' not found in the DOM.".format(xpath)
def click_element_by_id(self, element_id):
"""
Waits for an element to be present and visible such that you can click it.
Clicks the element.
:param element_id:
"""
self.wait_for_clickable_by_id(element_id)
print "Executing click_element_by_id('{0}')".format(element_id)
try:
time.sleep(0.6)
self.driver.find_element_by_id(element_id).click()
print "Clicking on element by id = ('{0}')".format(element_id)
except Exception, e:
print "ERROR: Could not perform click on element by id = ('{0}')".format(element_id)
self.close_browser()
raise
def click_element_by_css(self, css, wait=True):
"""
Waits for an element to be present and visible such that you can click it.
Clicks the element.
:param css:
"""
if wait:
self.wait_for_clickable_by_css(css)
print "Executing click_element_by_css('{0}')".format(css)
try:
time.sleep(0.6)
self.driver.find_element_by_css_selector(css).click()
print "Clicking on element by css = ('{0}')".format(css)
except Exception, e:
print "ERROR: Could not perform click on element by css = ('{0}')".format(css)
self.close_browser()
raise
def click_element_by_id_robust(self, element_id, element_id_on_next_page):
"""
Waits for an element to be enabled such that you can click it.
Clicks the element, checks if element is still visible, hits enter on element if visible up to 5 times.
:param element_id_on_next_page:
:param element_id:
"""
print "Executing click_element_by_id_robust ('{0}')".format(element_id)
self.wait_for_clickable_by_id(element_id)
time.sleep(2)
self.click_element_by_id(element_id)
time.sleep(2)
is_visible = self.check_visibility_by_id(element_id_on_next_page)
k = 1
while not is_visible and (k < 6):
try:
time.sleep(2)
print "Hitting enter. Executing attempt " + str(k)
self.send_keys_by_id(element_id, "\n", clear_field=False)
except Exception, e:
print str(k) + "-th attempt to hit enter unsuccessful."
is_visible = self.check_visibility_by_id(element_id_on_next_page)
k = k + 1
def click_element_by_id_css_robust(self, element_id, element_css_on_next_page):
"""
Waits for an element to be enabled such that you can click it.
Clicks the element, checks if element is still visible, hits enter on element if visible up to 5 times.
:param element_css_on_next_page:
:param element_id:
"""
print "Executing click_element_by_id_css_robust ('{0}')".format(element_id)
self.wait_for_clickable_by_id(element_id)
time.sleep(2)
self.click_element_by_id(element_id)
time.sleep(2)
is_visible = self.check_visibility_by_css(element_css_on_next_page)
k = 1
while not is_visible and (k < 6):
try:
time.sleep(2)
print "Hitting enter. Executing attempt " + str(k)
self.send_keys_by_id(element_id, "\n", clear_field=False)
except Exception, e:
print str(k) + "-th attempt to hit enter unsuccessful."
is_visible = self.check_visibility_by_css(element_css_on_next_page)
k = k + 1
def click_element_by_id_text_by_id_robust(self, element_id, element_id_for_text_on_next_page, text):
"""
Waits for an element to be enabled such that you can click it.
Clicks the element, checks if text is visible, hits enter on element if visible up to 5 times.
:param element_css_on_next_page:
:param element_id:
"""
print "Executing click_element_by_id_css_robust ('{0}')".format(element_id)
self.wait_for_clickable_by_id(element_id)
self.click_element_by_id(element_id)
is_visible = self.check_visibility_by_id(element_id_for_text_on_next_page)
print "Checked visibility on {0}".format(element_id_for_text_on_next_page) + "Visibility status: " + str(
is_visible)
k = 1
no_match = True
while no_match and (k < 6):
if is_visible:
title = self.store_text_by_id(element_id_for_text_on_next_page)
print "Stored text at the locator {0} ".format(element_id_for_text_on_next_page) + title
try:
if title == text:
no_match = False
else:
time.sleep(1)
print "Hitting enter. Executing attempt " + str(k)
try:
self.send_keys_by_id(element_id, "\n", clear_field=False)
except Exception, e:
print str(k) + "-th attempt to hit enter unsuccessful."
is_visible = self.check_visibility_by_id(element_id_for_text_on_next_page)
print "Checked visibility on {0}".format(
element_id_for_text_on_next_page) + "Visibility status: " + str(is_visible)
k = k + 1
except:
pass
else:
time.sleep(1)
print "Hitting enter. Executing attempt " + str(k)
try:
self.send_keys_by_id(element_id, "\n", clear_field=False)
except Exception, e:
print str(k) + "-th attempt to hit enter unsuccessful."
is_visible = self.check_visibility_by_id(element_id_for_text_on_next_page)
print "Checked visibility on {0}".format(
element_id_for_text_on_next_page) + "Visibility status: " + str(is_visible)
k = k + 1
def click_element_by_css_robust(self, css, element_css_on_next_page):
"""
Waits for an element to be enabled such that you can click it.
Clicks the element, checks if element is still visible, hits enter on element if visible up to 5 times.
:param element_id:
"""
print "Executing click_element_by_css_robust ('{0}')".format(css)
self.wait_for_clickable_by_css(css)
time.sleep(1)
self.click_element_by_css(css)
time.sleep(1)
is_visible = self.check_visibility_by_css(element_css_on_next_page)
print "Checked visibility on {0}".format(element_css_on_next_page) + "Visibility status: " + str(is_visible)
k = 1
while not is_visible and (k < 6):
print "Executing Attempt {0}".format(k)
print""
try:
time.sleep(2)
is_visible = self.check_visibility_by_css(css)
if is_visible:
break
time.sleep(1)
print "Hitting enter. Executing attempt " + str(k)
self.send_keys_by_id(css, "\n", clear_field=False)
except NoSuchElementException:
print "Element by css = {0} not found".format(css)
except ElementNotVisibleException:
print "Element by css = {0} not visible".format(css)
except Exception, e:
print str(k) + "-th attempt to hit enter unsuccessful."
raise
is_visible = self.check_visibility_by_css(element_css_on_next_page)
print "Checked visibility on {0}".format(element_css_on_next_page) + "Visibility status: " + str(is_visible)
k = k + 1
while not is_visible and (k < 8):
try:
time.sleep(2)
is_visible = self.check_visibility_by_css(css)
if is_visible:
break
print "Hitting enter. Executing attempt " + str(k)
self.send_keys_by_css(css, "\n", clear_field=False)
except Exception, e:
print str(k) + "-th attempt to hit enter unsuccessful."
self.close_browser()
raise
is_visible = self.check_visibility_by_css(element_css_on_next_page)
k = k + 1
try:
is_visible
except Exception, e:
print "ERROR: click_robust_by_css on element by css={0} has failed.".format(css)
self.close_browser()
raise
def click_element_by_id_resilient(self, element_id, element_to_disappear_id):
"""
Method will verify that element is enabled and try performing a click and hit enter until given element disappears.
"""
print "Executing click_element_by_id_resilient ('{0}')".format(element_id)
#self.verify_enabled_by_id(element_id)
self.wait_for_clickable_by_id(element_id)
element = self.driver.find_element_by_id(element_id)
element.click()
is_visible = self.check_visibility_by_id(element_to_disappear_id)
k = 1
while is_visible and (k < 4):
print "Repeated click. Executing attempt " + str(k)
try:
element.click()
except Exception, e:
print str(k) + "-th attempt to click unsuccessful."
self.close_browser()
raise
time.sleep(1)
is_visible = self.check_visibility_by_id(element_to_disappear_id)
k = k + 1
while is_visible and (k < 7):
print "Hitting enter. Executing attempt " + str(k)
try:
self.send_keys_by_id(element_id, "\n", clear_field=False)
except Exception, | |
import datetime
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from model_clone import CloneMixin
from django.contrib.postgres.fields import ArrayField
from dataprocessing.models import Items
'''
class FieldOfStudyWorkProgram(models.Model):
#Модель для связи направления и рабочей программы
field_of_study = models.ForeignKey('FieldOfStudy', on_delete=models.CASCADE,
verbose_name='Образовательная программа')
work_program = models.ForeignKey('WorkProgram', on_delete=models.CASCADE, verbose_name='Рабочая программа')
# competence = models.ForeignKey('Competence',null=True, on_delete=models.CASCADE, verbose_name = 'Компетенции')
# class Meta:
# unique_together = ('work_program', 'field_of_study')
'''
def current_year():
return datetime.date.today().year
def max_value_current_year(value):
return MaxValueValidator(current_year())(value)
class WorkProgram(CloneMixin, models.Model):
'''
Модель для рабочей программы
'''
PRIMARY_VOCATIONAL_EDUCATION = 'primary_vocational_education'
SECONADARY_VOCATIONAL_EDUCATION = 'secondary_vocational_education'
BACHELOR = 'bachelor'
SPECIALIST = 'specialist'
MASTER = 'master'
QUALIFICATION_CHOICES = (
(PRIMARY_VOCATIONAL_EDUCATION, 'Primary vocational education'),
(SECONADARY_VOCATIONAL_EDUCATION, 'Secondary vocational education'),
(BACHELOR, 'Bachelor'),
(SPECIALIST, 'Specialist'),
(MASTER, 'Master')
)
status_choise = (
('w', 'inwork'),
('a', 'archive'),
)
extra_points_choise = (
('0', '0'),
('3', '3'),
)
approval_date = models.DateTimeField(editable=True, auto_now_add=True, blank=True, null=True)
discipline_code = models.CharField(max_length=1024, blank=True, null=True)
subject_code = models.CharField(max_length=1024, blank=True, null=True)
authors = models.CharField(max_length=1024, blank=True, null=True)
prerequisites = models.ManyToManyField(Items, related_name='WorkProgramPrerequisites', )
qualification = models.CharField(choices=QUALIFICATION_CHOICES, max_length=1024, verbose_name='Квалификация',
blank=True, null=True)
prerequisites = models.ManyToManyField(Items, related_name='WorkProgramPrerequisites',
through='PrerequisitesOfWorkProgram', blank=True, null=True,
verbose_name="Пререквизиты")
outcomes = models.ManyToManyField(Items, related_name='WorkProgramOutcomes', through='OutcomesOfWorkProgram',
verbose_name="Постреквизиты")
title = models.CharField(max_length=1024, verbose_name="Название")
hoursFirstSemester = models.IntegerField(blank=True, null=True, verbose_name="Количество часов в 1 семестре")
hoursSecondSemester = models.IntegerField(blank=True, null=True, verbose_name="Количество часов в 2 семестре")
# goals = models.CharField(max_length=1024, verbose_name = "Цели освоения" )
# result_goals = models.CharField(max_length=1024, verbose_name = "Результаты освоения" )
#field_of_studies = models.ManyToManyField('FieldOfStudy', through=FieldOfStudyWorkProgram,
# verbose_name="Предметная область",
# related_name='workprograms_in_fieldofstudy')
bibliographic_reference = models.ManyToManyField('BibliographicReference', verbose_name='Библиогравическая_ссылка',
related_name='bibrefs')
# evaluation_tool = models.ManyToManyField('EvaluationTool', verbose_name='Оценочное средство')
description = models.CharField(max_length=5000, blank=True, null=True)
video = models.CharField(max_length=1024, blank=True, null=True)
credit_units = models.CharField(max_length=1024, blank=True, null=True)
semester_hour = models.CharField(max_length=1024, blank=True, null=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
work_status = models.CharField(max_length=1, choices=status_choise, verbose_name='Архив', default = 'w')
hours = models.IntegerField(blank=True, null=True, verbose_name="Сумма часов по разделам")
extra_points = models.CharField(choices=extra_points_choise, max_length=1, verbose_name='Квалификация',
blank=True, null=True)
_clone_many_to_many_fields = ['prerequisites', 'field_of_studies', 'bibliographic_reference']
# list_of_references = models.TextField(blank=True, null=True)
# guidelines = models.TextField(blank=True, null=True)
def __str__(self):
return (self.discipline_code + self.title)
def new_relations(old_descipline_code, new_descipline_code):
old_work_program = WorkProgram.objects.get(id = old_descipline_code)
print ('old', old_work_program, old_work_program.id)
new_work_program = WorkProgram.objects.get(id = new_descipline_code)
print ('new', new_work_program, new_work_program.id)
for wp_in_fs in WorkProgramInFieldOfStudy.objects.filter(work_program = old_work_program):
wp_in_fs.work_program = new_work_program
wp_in_fs.save()
print ('замена прошла')
old_work_program.delete()
def clone_programm(programm_id):
program = WorkProgram.objects.get(pk=programm_id)
clone_program = program.make_clone()
discipline = DisciplineSection.objects.filter(work_program_id=programm_id)
disp_clone_list = []
eva_clone_list = []
for disp in discipline:
clone_discipline = disp.make_clone(attrs={'work_program': clone_program})
topic = Topic.objects.filter(discipline_section=disp)
for top in topic:
top.make_clone(attrs={'discipline_section': clone_discipline})
clone_dict={'id':disp.id, 'clone_id':clone_discipline.id}
disp_clone_list.append(clone_dict)
for eva in EvaluationTool.objects.filter():
evaluation_disciplines = eva.evaluation_tools.all().filter(work_program_id=programm_id)
if (evaluation_disciplines):
clone_eva = eva.make_clone()
for disp in evaluation_disciplines:
for elem in disp_clone_list:
if (disp.id==elem['id']):
DisciplineSection.objects.get(pk=elem['clone_id']).evaluation_tools.add(clone_eva)
clone_dict={'id':eva.id, 'clone_id':clone_eva.id}
eva_clone_list.append(clone_dict)
for out in OutcomesOfWorkProgram.objects.filter(workprogram=program):
clone_outcomes = out.make_clone(attrs={'workprogram': clone_program})
for eva in out.evaluation_tool.all():
for elem in eva_clone_list:
if(eva.id==elem['id']):
clone_outcomes.evaluation_tool.add(EvaluationTool.objects.get(pk=elem['clone_id']))
return clone_program
class PrerequisitesOfWorkProgram(models.Model):
'''
Модель для пререквизитов рабочей программы
'''
# class Meta:
# auto_created = True
item = models.ForeignKey(Items, on_delete=models.CASCADE, verbose_name="Пререквизит")
workprogram = models.ForeignKey(WorkProgram, on_delete=models.CASCADE, verbose_name="Рабочая программа")
MasterylevelChoices = [
('1', 'low'),
('2', 'average'),
('3', 'high'),
]
masterylevel = models.CharField(
max_length=1,
choices=MasterylevelChoices,
default=1, verbose_name="Уровень"
)
# def __str__(self):
# return self.item
class OutcomesOfWorkProgram(CloneMixin, models.Model):
'''
Модель для результатов обучения по рабочей программе
'''
# class Meta:
# auto_created = True
item = models.ForeignKey(Items, on_delete=models.CASCADE, verbose_name="Постреквизит")
workprogram = models.ForeignKey(WorkProgram, on_delete=models.CASCADE, verbose_name="Рабочая программа")
MasterylevelChoices = [
('1', 'low'),
('2', 'average'),
('3', 'high'),
]
masterylevel = models.CharField(
max_length=1,
choices=MasterylevelChoices,
default=1, verbose_name="Уровень"
)
evaluation_tool = models.ManyToManyField('EvaluationTool', verbose_name='Оценочные средства',
related_name='evaluation_tool_of_outcomes', blank=True, null=True)
# _clone_many_to_many_fields =['evaluation_tool']
# def __str__(self):
# return str(self.item) + str(self.workprogram)
#
# class User(AbstractUser):
# '''
# Модель для пользователей
# '''
# first_name = models.CharField(max_length=1024)
# last_name = models.CharField(max_length=1024)
# patronymic = models.CharField(max_length=1024)
# isu_number = models.CharField(max_length=1024)
#
# def __str__(self):
# return self.first_name + ' ' + self.last_name
class FieldOfStudy(models.Model):
'''
Модель для направлений
'''
PRIMARY_VOCATIONAL_EDUCATION = 'primary_vocational_education'
SECONADARY_VOCATIONAL_EDUCATION = 'secondary_vocational_education'
BACHELOR = 'bachelor'
SPECIALIST = 'specialist'
MASTER = 'master'
QUALIFICATION_CHOICES = (
(PRIMARY_VOCATIONAL_EDUCATION, 'Primary vocational education'),
(SECONADARY_VOCATIONAL_EDUCATION, 'Secondary vocational education'),
(BACHELOR, 'Bachelor'),
(SPECIALIST, 'Specialist'),
(MASTER, 'Master')
)
INTERNAL = 'internal'
EXTRAMURAL = 'extramural'
EDUCATION_FORM_CHOICES = (
(INTERNAL, 'Internal'),
(EXTRAMURAL, 'Extramural'),
)
number = models.CharField(max_length=1024, verbose_name = 'Шифр ОП')
title = models.CharField(max_length=1024, verbose_name = 'Название ОП', blank = True, null = True)
qualification = models.CharField(choices=QUALIFICATION_CHOICES, max_length=1024, verbose_name = 'Квалификация', blank = True, null = True)
educational_profile = models.CharField(max_length=1024, verbose_name = 'Профиль ОП', blank = True, null = True)
faculty = models.CharField(max_length=150, verbose_name = 'Факультет (Структурное подразделение)', null=True)
education_form = models.CharField(choices=EDUCATION_FORM_CHOICES, max_length=1024, verbose_name = 'Форма обучения', blank = True, null = True)
def __str__(self):
return self.number
# class CompetenceIndicator(models.Model):
# '''
# Модель для связи компетенций и индикаторов
# '''
# competence = models.ForeignKey('Competence', on_delete=models.CASCADE)
# indicator = models.ForeignKey('Indicator', on_delete=models.CASCADE)
# #field_of_study = models.ForeignKey('FieldOfStudy', on_delete=models.CASCADE)
#
# class Meta:
# unique_together = ('competence', 'indicator')
def current_year():
return datetime.date.today().year
def max_value_current_year(value):
return MaxValueValidator(current_year())(value)
class AcademicPlan(models.Model):
'''
Модель учебного плана
'''
PRIMARY_VOCATIONAL_EDUCATION = 'primary_vocational_education'
SECONADARY_VOCATIONAL_EDUCATION = 'secondary_vocational_education'
BACHELOR = 'bachelor'
SPECIALIST = 'specialist'
MASTER = 'master'
QUALIFICATION_CHOICES = (
(PRIMARY_VOCATIONAL_EDUCATION, 'Primary vocational education'),
(SECONADARY_VOCATIONAL_EDUCATION, 'Secondary vocational education'),
(BACHELOR, 'Bachelor'),
(SPECIALIST, 'Specialist'),
(MASTER, 'Master')
)
INTERNAL = 'internal'
EXTRAMURAL = 'extramural'
EDUCATION_FORM_CHOICES = (
(INTERNAL, 'Internal'),
(EXTRAMURAL, 'Extramural'),
)
qualification = models.CharField(choices=QUALIFICATION_CHOICES, max_length=1024, verbose_name = 'Квалификация', blank = True, null = True)
educational_profile = models.CharField(max_length=1024, verbose_name = 'Профиль ОП', blank = True, null = True)
number = models.CharField(unique=True, max_length=1024, verbose_name = 'Номер учебного плана', blank = True, null = True)
field_of_study = models.ManyToManyField('FieldOfStudy', through='ImplementationAcademicPlan', related_name="block_in_academic_plan", blank = True, null = True)
approval_date = models.DateTimeField(editable=True, auto_now_add=True, blank=True, null=True)
year = models.CharField(max_length=1024, blank = True, null = True)
education_form = models.CharField(choices=EDUCATION_FORM_CHOICES, max_length=1024, verbose_name = 'Форма обучения', blank = True, null = True)
author=models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='Автор учебного плана', on_delete = models.CASCADE, related_name = 'academic_plan_author', blank = True, null = True)
#TODO: Добавить год набора
def __str__(self):
return (self.educational_profile)
def clone_descipline_blocks(id, siap):
DisciplineBlocks = DisciplineBlock.objects.filter(academic_plan__educational_profile='Экспертный профиль')
for Block in DisciplineBlocks:
block_clone = Block.make_clone(attrs={'academic_plan_id': siap.data.get("id")})
print(Block.modules_in_discipline_block.all())
for Module in Block.modules_in_discipline_block.all():
module_clone = Module.make_clone(attrs={'descipline_block_id': block_clone.id})
def new_descipline_blocks(iap, siap):
blocks = ['Блок 1', 'Блок 2', 'Блок 3']
print(siap.data.get("id"))
for block in blocks:
db = DisciplineBlock()
db.name = block
db.academic_plan_id = siap.data.get("id")
db.save()
print(db.id)
DisciplineBlock.new_descipline_block_modules(db.id)
class EducationalProgram(models.Model):
'''
Модель описания образовтаельной программы
'''
PRIMARY_VOCATIONAL_EDUCATION = 'primary_vocational_education'
SECONADARY_VOCATIONAL_EDUCATION = 'secondary_vocational_education'
BACHELOR = 'bachelor'
SPECIALIST = 'specialist'
MASTER = 'master'
QUALIFICATION_CHOICES = (
(PRIMARY_VOCATIONAL_EDUCATION, 'Primary vocational education'),
(SECONADARY_VOCATIONAL_EDUCATION, 'Secondary vocational education'),
(BACHELOR, 'Bachelor'),
(SPECIALIST, 'Specialist'),
(MASTER, 'Master')
)
INTERNAL = 'internal'
EXTRAMURAL = 'extramural'
EDUCATION_FORM_CHOICES = (
(INTERNAL, 'Internal'),
(EXTRAMURAL, 'Extramural'),
)
qualification = models.CharField(choices=QUALIFICATION_CHOICES, max_length=1024, verbose_name = 'Квалификация', blank = True, null = True)
#academic_plan = models.ForeignKey('ImplementationAcademicPlan', on_delete=models.CASCADE, verbose_name = 'Учебный план', related_name="academic_plan_in_educational_program")
year_of_recruitment = models.PositiveIntegerField(
default=current_year(), validators=[MinValueValidator(1984), max_value_current_year])
manager = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
academic_plan_for_ep = models.ForeignKey('ImplementationAcademicPlan', on_delete=models.SET_NULL, verbose_name = 'Учебный план_1', related_name="academic_plan_in_educational_program", blank = True, null = True)
class GeneralCharacteristics(models.Model):
'''
Модель описания характеристики образовтаельной программы
'''
educational_program = models.ForeignKey('EducationalProgram', on_delete=models.SET_NULL, verbose_name = 'Образовательная программа', related_name="general_characteristics_in_educational_program", blank = True, null = True)
area_of_activity = models.ManyToManyField('ProfessionalAreaOfGeneralCharacteristics', verbose_name = 'Область профессиональной деятельности')
objects_of_activity = models.CharField(max_length=512, verbose_name="Объекты профессиональной деятельности", blank=True, null=True)
kinds_of_activity = models.CharField(max_length=512, verbose_name="Сферы профессиональной деятельности, к которому (которым) готовятся выпускники", blank=True, null=True)
tasks_of_activity = models.CharField(max_length=512, verbose_name="Задачи профессиональной деятельности ", blank=True, null=True)
type_of_activity = models.CharField(max_length=512, verbose_name="Тип основной профессиональной образовательной программы", blank=True, null=True)
ok_competences = models.ManyToManyField('Competence', verbose_name="ОБЩЕКУЛЬТУРНЫЕ КОМПЕТЕНЦИИ", related_name="ok_competences_in_gh", blank=True)
kc_competences = models.ManyToManyField('Competence', verbose_name="Ключевые компетенции", related_name="kc_competences_in_gh", blank=True)
pk_competences = models.ManyToManyField('Competence', verbose_name="ПРОФЕССИОНАЛЬНЫЕ КОМПЕТЕНЦИИ", through = 'PkCompetencesInGeneralCharacteristics', related_name="pk_competences_in_gh", blank=True)
np_competences = models.ManyToManyField('Competence', verbose_name="Надпрофессиональные компетенции", related_name="np_competences_in_gh", blank=True,)
#pps = ArrayField(models.CharField(max_length=512, verbose_name="Сведения о профессорско-преподавательском составе, необходимом для реализации основной профессиональной образовательной программы"), blank=True, null=True)
pps = models.TextField(max_length=55512, verbose_name="Сведения о профессорско-преподавательском составе, необходимом для реализации ", blank=True, null=True)
annotation = models.TextField(max_length=55512, verbose_name="Аннотация основной профессиональной образовательной программы", blank=True, null=True)
developers = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name="Сотрудники Университета ИТМО", related_name="ok_competences_in_gh", blank=True)
employers_representatives = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name="Представители работодателей", related_name="employers_representatives_in_gh", blank=True)
director_of_megafaculty = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="Директор мегафакультета", related_name="director_of_megafaculty_in_gh", blank=True, null=True)
dean_of_the_faculty = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="<NAME>", related_name="dean_of_the_faculty_in_gh", blank=True, null=True)
scientific_supervisor_of_the_educational_program = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="Научный руководитель образовательной программы",
related_name="scientific_supervisor_of_the_educational_program_in_gh", blank=True, null=True)
def __str__(self):
return str(self.educational_program) + str(self.director_of_megafaculty) + str(self.scientific_supervisor_of_the_educational_program)
class PkCompetencesInGeneralCharacteristics(models.Model):
"""
Факультет
"""
labor_functions = models.CharField(max_length=512, verbose_name="трудовая функция")
general_characteristic = models.ForeignKey('GeneralCharacteristics', on_delete=models.CASCADE, verbose_name="Декан", blank=True, null=True)
competence = models.ForeignKey('Competence', on_delete=models.CASCADE, verbose_name="Декан", blank=True, null=True)
professional_standard = models.ManyToManyField('ProfessionalStandard', verbose_name="Профессиональный стандарт", blank=True)
def __str__(self):
return str(self.labor_functions) + str(self.general_characteristic) + str(self.competence)
class Department(models.Model):
"""
Факультет
"""
title = models.CharField(max_length=512, verbose_name="Название факультета")
mini_titile = models.CharField(max_length=512, verbose_name="Краткое название факультета", blank=True, null=True)
dean = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.